Logo ROOT   6.12/07
Reference Guide
MethodDNN.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Peter Speckmayer
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6  * Package: TMVA *
7  * Class : MethodDNN *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Description: *
11  * A neural network implementation *
12  * *
13  * Authors (alphabetical): *
14  * Simon Pfreundschuh <s.pfreundschuh@gmail.com> - CERN, Switzerland *
15  * Peter Speckmayer <peter.speckmayer@gmx.ch> - CERN, Switzerland *
16  * *
17  * Copyright (c) 2005-2015: *
18  * CERN, Switzerland *
19  * U. of Victoria, Canada *
20  * MPI-K Heidelberg, Germany *
21  * U. of Bonn, Germany *
22  * *
23  * Redistribution and use in source and binary forms, with or without *
24  * modification, are permitted according to the terms listed in LICENSE *
25  * (http://tmva.sourceforge.net/LICENSE) *
26  **********************************************************************************/
27 
28 /*! \class TMVA::MethodDNN
29 \ingroup TMVA
30 Deep Neural Network Implementation.
31 */
32 
33 #include "TMVA/MethodDNN.h"
34 
35 #include "TString.h"
36 #include "TTree.h"
37 #include "TFile.h"
38 #include "TFormula.h"
39 
40 #include "TMVA/ClassifierFactory.h"
41 #include "TMVA/Configurable.h"
42 #include "TMVA/IMethod.h"
43 #include "TMVA/MsgLogger.h"
44 #include "TMVA/MethodBase.h"
45 #include "TMVA/Timer.h"
46 #include "TMVA/Types.h"
47 #include "TMVA/Tools.h"
48 #include "TMVA/Config.h"
49 #include "TMVA/Ranking.h"
50 
51 #include "TMVA/DNN/Net.h"
53 
54 #include "TMVA/NeuralNet.h"
55 #include "TMVA/Monitoring.h"
56 
57 #include <algorithm>
58 #include <iostream>
59 #include <string>
60 #include <iomanip>
61 
62 REGISTER_METHOD(DNN)
63 
65 
66 namespace TMVA
67 {
68  using namespace DNN;
69 
70  ////////////////////////////////////////////////////////////////////////////////
71  /// standard constructor
72 
73  TMVA::MethodDNN::MethodDNN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData,
74  const TString &theOption)
75  : MethodBase(jobName, Types::kDNN, methodTitle, theData, theOption), fWeightInitialization(), fOutputFunction(),
76  fLayoutString(), fErrorStrategy(), fTrainingStrategyString(), fWeightInitializationString(),
77  fArchitectureString(), fTrainingSettings(), fResume(false), fSettings()
78  {
79 }
80 
81 ////////////////////////////////////////////////////////////////////////////////
82 /// constructor from a weight file
83 
84 TMVA::MethodDNN::MethodDNN(DataSetInfo& theData,
85  const TString& theWeightFile)
86  : MethodBase( Types::kDNN, theData, theWeightFile),
87  fWeightInitialization(), fOutputFunction(), fLayoutString(), fErrorStrategy(),
88  fTrainingStrategyString(), fWeightInitializationString(), fArchitectureString(),
89  fTrainingSettings(), fResume(false), fSettings()
90 {
91  fWeightInitialization = DNN::EInitialization::kGauss;
92  fOutputFunction = DNN::EOutputFunction::kSigmoid;
93 }
94 
95 ////////////////////////////////////////////////////////////////////////////////
96 /// destructor
97 
99 {
100  fWeightInitialization = DNN::EInitialization::kGauss;
101  fOutputFunction = DNN::EOutputFunction::kSigmoid;
102 }
103 
104 ////////////////////////////////////////////////////////////////////////////////
105 /// MLP can handle classification with 2 classes and regression with
106 /// one regression-target
107 
109  UInt_t numberClasses,
110  UInt_t /*numberTargets*/ )
111 {
112  if (type == Types::kClassification && numberClasses == 2 ) return kTRUE;
113  if (type == Types::kMulticlass ) return kTRUE;
114  if (type == Types::kRegression ) return kTRUE;
115 
116  return kFALSE;
117 }
118 
119 ////////////////////////////////////////////////////////////////////////////////
120 /// default initializations
121 
122 void TMVA::MethodDNN::Init() {}
123 
124 ////////////////////////////////////////////////////////////////////////////////
125 /// Options to be set in the option string:
126 ///
127 /// - LearningRate <float> DNN learning rate parameter.
128 /// - DecayRate <float> Decay rate for learning parameter.
129 /// - TestRate <int> Period of validation set error computation.
130 /// - BatchSize <int> Number of event per batch.
131 ///
132 /// - ValidationSize <string> How many events to use for validation. "0.2"
133 /// or "20%" indicates that a fifth of the
134 /// training data should be used. "100"
135 /// indicates that 100 events should be used.
136 
138 {
139 
140  DeclareOptionRef(fLayoutString="SOFTSIGN|(N+100)*2,LINEAR",
141  "Layout",
142  "Layout of the network.");
143 
144  DeclareOptionRef(fValidationSize = "20%", "ValidationSize",
145  "Part of the training data to use for "
146  "validation. Specify as 0.2 or 20% to use a "
147  "fifth of the data set as validation set. "
148  "Specify as 100 to use exactly 100 events. "
149  "(Default: 20%)");
150 
151  DeclareOptionRef(fErrorStrategy="CROSSENTROPY",
152  "ErrorStrategy",
153  "Loss function: Mean squared error (regression)"
154  " or cross entropy (binary classification).");
155  AddPreDefVal(TString("CROSSENTROPY"));
156  AddPreDefVal(TString("SUMOFSQUARES"));
157  AddPreDefVal(TString("MUTUALEXCLUSIVE"));
158 
159  DeclareOptionRef(fWeightInitializationString="XAVIER",
160  "WeightInitialization",
161  "Weight initialization strategy");
162  AddPreDefVal(TString("XAVIER"));
163  AddPreDefVal(TString("XAVIERUNIFORM"));
164 
165  DeclareOptionRef(fArchitectureString = "CPU", "Architecture", "Which architecture to perform the training on.");
166  AddPreDefVal(TString("STANDARD"));
167  AddPreDefVal(TString("CPU"));
168  AddPreDefVal(TString("GPU"));
169  AddPreDefVal(TString("OPENCL"));
170 
172  fTrainingStrategyString = "LearningRate=1e-1,"
173  "Momentum=0.3,"
174  "Repetitions=3,"
175  "ConvergenceSteps=50,"
176  "BatchSize=30,"
177  "TestRepetitions=7,"
178  "WeightDecay=0.0,"
179  "Renormalize=L2,"
180  "DropConfig=0.0,"
181  "DropRepetitions=5|LearningRate=1e-4,"
182  "Momentum=0.3,"
183  "Repetitions=3,"
184  "ConvergenceSteps=50,"
185  "BatchSize=20,"
186  "TestRepetitions=7,"
187  "WeightDecay=0.001,"
188  "Renormalize=L2,"
189  "DropConfig=0.0+0.5+0.5,"
190  "DropRepetitions=5,"
191  "Multithreading=True",
192  "TrainingStrategy",
193  "Defines the training strategies.");
194 }
195 
196 ////////////////////////////////////////////////////////////////////////////////
197 /// parse layout specification string and return a vector, each entry
198 /// containing the number of neurons to go in each successive layer
199 
201  -> LayoutVector_t
202 {
203  LayoutVector_t layout;
204  const TString layerDelimiter(",");
205  const TString subDelimiter("|");
206 
207  const size_t inputSize = GetNvar();
208 
209  TObjArray* layerStrings = layoutString.Tokenize(layerDelimiter);
210  TIter nextLayer (layerStrings);
211  TObjString* layerString = (TObjString*)nextLayer ();
212 
213  for (; layerString != nullptr; layerString = (TObjString*) nextLayer()) {
214  int numNodes = 0;
215  EActivationFunction activationFunction = EActivationFunction::kTanh;
216 
217  TObjArray* subStrings = layerString->GetString().Tokenize(subDelimiter);
218  TIter nextToken (subStrings);
219  TObjString* token = (TObjString *) nextToken();
220  int idxToken = 0;
221  for (; token != nullptr; token = (TObjString *) nextToken()) {
222  switch (idxToken)
223  {
224  case 0:
225  {
226  TString strActFnc (token->GetString ());
227  if (strActFnc == "RELU") {
228  activationFunction = DNN::EActivationFunction::kRelu;
229  } else if (strActFnc == "TANH") {
230  activationFunction = DNN::EActivationFunction::kTanh;
231  } else if (strActFnc == "SYMMRELU") {
232  activationFunction = DNN::EActivationFunction::kSymmRelu;
233  } else if (strActFnc == "SOFTSIGN") {
234  activationFunction = DNN::EActivationFunction::kSoftSign;
235  } else if (strActFnc == "SIGMOID") {
236  activationFunction = DNN::EActivationFunction::kSigmoid;
237  } else if (strActFnc == "LINEAR") {
238  activationFunction = DNN::EActivationFunction::kIdentity;
239  } else if (strActFnc == "GAUSS") {
240  activationFunction = DNN::EActivationFunction::kGauss;
241  }
242  }
243  break;
244  case 1: // number of nodes
245  {
246  TString strNumNodes (token->GetString ());
247  TString strN ("x");
248  strNumNodes.ReplaceAll ("N", strN);
249  strNumNodes.ReplaceAll ("n", strN);
250  TFormula fml ("tmp",strNumNodes);
251  numNodes = fml.Eval (inputSize);
252  }
253  break;
254  }
255  ++idxToken;
256  }
257  layout.push_back(std::make_pair(numNodes, activationFunction));
258  }
259  return layout;
260 }
261 
262 ////////////////////////////////////////////////////////////////////////////////
263 /// parse key value pairs in blocks -> return vector of blocks with map of key value pairs
264 
266  TString blockDelim,
267  TString tokenDelim)
268  -> KeyValueVector_t
269 {
270  KeyValueVector_t blockKeyValues;
271  const TString keyValueDelim ("=");
272 
273  TObjArray* blockStrings = parseString.Tokenize (blockDelim);
274  TIter nextBlock (blockStrings);
275  TObjString* blockString = (TObjString *) nextBlock();
276 
277  for (; blockString != nullptr; blockString = (TObjString *) nextBlock())
278  {
279  blockKeyValues.push_back (std::map<TString,TString>());
280  std::map<TString,TString>& currentBlock = blockKeyValues.back ();
281 
282  TObjArray* subStrings = blockString->GetString ().Tokenize (tokenDelim);
283  TIter nextToken (subStrings);
284  TObjString* token = (TObjString*)nextToken ();
285 
286  for (; token != nullptr; token = (TObjString *)nextToken())
287  {
288  TString strKeyValue (token->GetString ());
289  int delimPos = strKeyValue.First (keyValueDelim.Data ());
290  if (delimPos <= 0)
291  continue;
292 
293  TString strKey = TString (strKeyValue (0, delimPos));
294  strKey.ToUpper();
295  TString strValue = TString (strKeyValue (delimPos+1, strKeyValue.Length ()));
296 
297  strKey.Strip (TString::kBoth, ' ');
298  strValue.Strip (TString::kBoth, ' ');
299 
300  currentBlock.insert (std::make_pair (strKey, strValue));
301  }
302  }
303  return blockKeyValues;
304 }
305 
306 ////////////////////////////////////////////////////////////////////////////////
307 
308 TString fetchValue (const std::map<TString, TString>& keyValueMap, TString key)
309 {
310  key.ToUpper ();
311  std::map<TString, TString>::const_iterator it = keyValueMap.find (key);
312  if (it == keyValueMap.end()) {
313  return TString ("");
314  }
315  return it->second;
316 }
317 
318 ////////////////////////////////////////////////////////////////////////////////
319 
320 template <typename T>
321 T fetchValue(const std::map<TString,TString>& keyValueMap,
322  TString key,
323  T defaultValue);
324 
325 ////////////////////////////////////////////////////////////////////////////////
326 
327 template <>
328 int fetchValue(const std::map<TString,TString>& keyValueMap,
329  TString key,
330  int defaultValue)
331 {
332  TString value (fetchValue (keyValueMap, key));
333  if (value == "") {
334  return defaultValue;
335  }
336  return value.Atoi ();
337 }
338 
339 ////////////////////////////////////////////////////////////////////////////////
340 
341 template <>
342 double fetchValue (const std::map<TString,TString>& keyValueMap,
343  TString key, double defaultValue)
344 {
345  TString value (fetchValue (keyValueMap, key));
346  if (value == "") {
347  return defaultValue;
348  }
349  return value.Atof ();
350 }
351 
352 ////////////////////////////////////////////////////////////////////////////////
353 
354 template <>
355 TString fetchValue (const std::map<TString,TString>& keyValueMap,
356  TString key, TString defaultValue)
357 {
358  TString value (fetchValue (keyValueMap, key));
359  if (value == "") {
360  return defaultValue;
361  }
362  return value;
363 }
364 
365 ////////////////////////////////////////////////////////////////////////////////
366 
367 template <>
368 bool fetchValue (const std::map<TString,TString>& keyValueMap,
369  TString key, bool defaultValue)
370 {
371  TString value (fetchValue (keyValueMap, key));
372  if (value == "") {
373  return defaultValue;
374  }
375  value.ToUpper ();
376  if (value == "TRUE" || value == "T" || value == "1") {
377  return true;
378  }
379  return false;
380 }
381 
382 ////////////////////////////////////////////////////////////////////////////////
383 
384 template <>
385 std::vector<double> fetchValue(const std::map<TString, TString> & keyValueMap,
386  TString key,
387  std::vector<double> defaultValue)
388 {
389  TString parseString (fetchValue (keyValueMap, key));
390  if (parseString == "") {
391  return defaultValue;
392  }
393  parseString.ToUpper ();
394  std::vector<double> values;
395 
396  const TString tokenDelim ("+");
397  TObjArray* tokenStrings = parseString.Tokenize (tokenDelim);
398  TIter nextToken (tokenStrings);
399  TObjString* tokenString = (TObjString*)nextToken ();
400  for (; tokenString != NULL; tokenString = (TObjString*)nextToken ()) {
401  std::stringstream sstr;
402  double currentValue;
403  sstr << tokenString->GetString ().Data ();
404  sstr >> currentValue;
405  values.push_back (currentValue);
406  }
407  return values;
408 }
409 
410 ////////////////////////////////////////////////////////////////////////////////
411 
413 {
415  Log() << kINFO
416  << "Will ignore negative events in training!"
417  << Endl;
418  }
419 
420  if (fArchitectureString == "STANDARD") {
421  Log() << kERROR << "The STANDARD architecture has been deprecated. "
422  "Please use Architecture=CPU or Architecture=CPU."
423  "See the TMVA Users' Guide for instructions if you "
424  "encounter problems."
425  << Endl;
426  Log() << kFATAL << "The STANDARD architecture has been deprecated. "
427  "Please use Architecture=CPU or Architecture=CPU."
428  "See the TMVA Users' Guide for instructions if you "
429  "encounter problems."
430  << Endl;
431  }
432 
433  if (fArchitectureString == "OPENCL") {
434  Log() << kERROR << "The OPENCL architecture has not been implemented yet. "
435  "Please use Architecture=CPU or Architecture=CPU for the "
436  "time being. See the TMVA Users' Guide for instructions "
437  "if you encounter problems."
438  << Endl;
439  Log() << kFATAL << "The OPENCL architecture has not been implemented yet. "
440  "Please use Architecture=CPU or Architecture=CPU for the "
441  "time being. See the TMVA Users' Guide for instructions "
442  "if you encounter problems."
443  << Endl;
444  }
445 
446  if (fArchitectureString == "GPU") {
447 #ifndef DNNCUDA // Included only if DNNCUDA flag is _not_ set.
448  Log() << kERROR << "CUDA backend not enabled. Please make sure "
449  "you have CUDA installed and it was successfully "
450  "detected by CMAKE."
451  << Endl;
452  Log() << kFATAL << "CUDA backend not enabled. Please make sure "
453  "you have CUDA installed and it was successfully "
454  "detected by CMAKE."
455  << Endl;
456 #endif // DNNCUDA
457  }
458 
459  if (fArchitectureString == "CPU") {
460 #ifndef DNNCPU // Included only if DNNCPU flag is _not_ set.
461  Log() << kERROR << "Multi-core CPU backend not enabled. Please make sure "
462  "you have a BLAS implementation and it was successfully "
463  "detected by CMake as well that the imt CMake flag is set."
464  << Endl;
465  Log() << kFATAL << "Multi-core CPU backend not enabled. Please make sure "
466  "you have a BLAS implementation and it was successfully "
467  "detected by CMake as well that the imt CMake flag is set."
468  << Endl;
469 #endif // DNNCPU
470  }
471 
472  //
473  // Set network structure.
474  //
475 
476  fLayout = TMVA::MethodDNN::ParseLayoutString (fLayoutString);
477  size_t inputSize = GetNVariables ();
478  size_t outputSize = 1;
479  if (fAnalysisType == Types::kRegression && GetNTargets() != 0) {
480  outputSize = GetNTargets();
481  } else if (fAnalysisType == Types::kMulticlass && DataInfo().GetNClasses() >= 2) {
482  outputSize = DataInfo().GetNClasses();
483  }
484 
485  fNet.SetBatchSize(1);
486  fNet.SetInputWidth(inputSize);
487 
488  auto itLayout = std::begin (fLayout);
489  auto itLayoutEnd = std::end (fLayout)-1;
490  for ( ; itLayout != itLayoutEnd; ++itLayout) {
491  fNet.AddLayer((*itLayout).first, (*itLayout).second);
492  }
493  fNet.AddLayer(outputSize, EActivationFunction::kIdentity);
494 
495  //
496  // Loss function and output.
497  //
498 
499  fOutputFunction = EOutputFunction::kSigmoid;
501  {
502  if (fErrorStrategy == "SUMOFSQUARES") {
503  fNet.SetLossFunction(ELossFunction::kMeanSquaredError);
504  }
505  if (fErrorStrategy == "CROSSENTROPY") {
506  fNet.SetLossFunction(ELossFunction::kCrossEntropy);
507  }
508  fOutputFunction = EOutputFunction::kSigmoid;
509  } else if (fAnalysisType == Types::kRegression) {
510  if (fErrorStrategy != "SUMOFSQUARES") {
511  Log () << kWARNING << "For regression only SUMOFSQUARES is a valid "
512  << " neural net error function. Setting error function to "
513  << " SUMOFSQUARES now." << Endl;
514  }
515  fNet.SetLossFunction(ELossFunction::kMeanSquaredError);
516  fOutputFunction = EOutputFunction::kIdentity;
517  } else if (fAnalysisType == Types::kMulticlass) {
518  if (fErrorStrategy == "SUMOFSQUARES") {
519  fNet.SetLossFunction(ELossFunction::kMeanSquaredError);
520  }
521  if (fErrorStrategy == "CROSSENTROPY") {
522  fNet.SetLossFunction(ELossFunction::kCrossEntropy);
523  }
524  if (fErrorStrategy == "MUTUALEXCLUSIVE") {
525  fNet.SetLossFunction(ELossFunction::kSoftmaxCrossEntropy);
526  }
527  fOutputFunction = EOutputFunction::kSoftmax;
528  }
529 
530  //
531  // Initialization
532  //
533 
534  if (fWeightInitializationString == "XAVIER") {
535  fWeightInitialization = DNN::EInitialization::kGauss;
536  }
537  else if (fWeightInitializationString == "XAVIERUNIFORM") {
538  fWeightInitialization = DNN::EInitialization::kUniform;
539  }
540  else {
541  fWeightInitialization = DNN::EInitialization::kGauss;
542  }
543 
544  //
545  // Training settings.
546  //
547 
548  // Force validation of the ValidationSize option
549  GetNumValidationSamples();
550 
551  KeyValueVector_t strategyKeyValues = ParseKeyValueString(fTrainingStrategyString,
552  TString ("|"),
553  TString (","));
554  for (auto& block : strategyKeyValues) {
555  TTrainingSettings settings;
556 
557  settings.convergenceSteps = fetchValue(block, "ConvergenceSteps", 100);
558  settings.batchSize = fetchValue(block, "BatchSize", 30);
559  settings.testInterval = fetchValue(block, "TestRepetitions", 7);
560  settings.weightDecay = fetchValue(block, "WeightDecay", 0.0);
561  settings.learningRate = fetchValue(block, "LearningRate", 1e-5);
562  settings.momentum = fetchValue(block, "Momentum", 0.3);
563  settings.dropoutProbabilities = fetchValue(block, "DropConfig",
564  std::vector<Double_t>());
565 
566  TString regularization = fetchValue(block, "Regularization",
567  TString ("NONE"));
568  if (regularization == "L1") {
570  } else if (regularization == "L2") {
572  }
573 
574  TString strMultithreading = fetchValue(block, "Multithreading",
575  TString ("True"));
576  if (strMultithreading.BeginsWith ("T")) {
577  settings.multithreading = true;
578  } else {
579  settings.multithreading = false;
580  }
581 
582  fTrainingSettings.push_back(settings);
583  }
584 }
585 
586 ////////////////////////////////////////////////////////////////////////////////
587 /// Validation of the ValidationSize option. Allowed formats are 20%, 0.2 and
588 /// 100 etc.
589 /// - 20% and 0.2 selects 20% of the training set as validation data.
590 /// - 100 selects 100 events as the validation data.
591 ///
592 /// @return number of samples in validation set
593 ///
594 
596 {
597  Int_t nValidationSamples = 0;
598  UInt_t trainingSetSize = GetEventCollection(Types::kTraining).size();
599 
600  // Parsing + Validation
601  // --------------------
602  if (fValidationSize.EndsWith("%")) {
603  // Relative spec. format 20%
604  TString intValStr = TString(fValidationSize.Strip(TString::kTrailing, '%'));
605 
606  if (intValStr.IsFloat()) {
607  Double_t valSizeAsDouble = fValidationSize.Atof() / 100.0;
608  nValidationSamples = GetEventCollection(Types::kTraining).size() * valSizeAsDouble;
609  } else {
610  Log() << kFATAL << "Cannot parse number \"" << fValidationSize
611  << "\". Expected string like \"20%\" or \"20.0%\"." << Endl;
612  }
613  } else if (fValidationSize.IsFloat()) {
614  Double_t valSizeAsDouble = fValidationSize.Atof();
615 
616  if (valSizeAsDouble < 1.0) {
617  // Relative spec. format 0.2
618  nValidationSamples = GetEventCollection(Types::kTraining).size() * valSizeAsDouble;
619  } else {
620  // Absolute spec format 100 or 100.0
621  nValidationSamples = valSizeAsDouble;
622  }
623  } else {
624  Log() << kFATAL << "Cannot parse number \"" << fValidationSize << "\". Expected string like \"0.2\" or \"100\"."
625  << Endl;
626  }
627 
628  // Value validation
629  // ----------------
630  if (nValidationSamples < 0) {
631  Log() << kFATAL << "Validation size \"" << fValidationSize << "\" is negative." << Endl;
632  }
633 
634  if (nValidationSamples == 0) {
635  Log() << kFATAL << "Validation size \"" << fValidationSize << "\" is zero." << Endl;
636  }
637 
638  if (nValidationSamples >= (Int_t)trainingSetSize) {
639  Log() << kFATAL << "Validation size \"" << fValidationSize
640  << "\" is larger than or equal in size to training set (size=\"" << trainingSetSize << "\")." << Endl;
641  }
642 
643  return nValidationSamples;
644 }
645 
646 ////////////////////////////////////////////////////////////////////////////////
647 
649 {
651  std::vector<TString> titles = {"Error on training set", "Error on test set"};
652  fInteractive->Init(titles);
653  // JsMVA progress bar maximum (100%)
654  fIPyMaxIter = 100;
655  }
656 
657  if (fArchitectureString == "GPU") {
658  TrainGpu();
661  return;
662  } else if (fArchitectureString == "OpenCL") {
663  Log() << kFATAL << "OpenCL backend not yet supported." << Endl;
664  return;
665  } else if (fArchitectureString == "CPU") {
666  TrainCpu();
669  return;
670  }
671 
672  Log() << kINFO << "Using Standard Implementation.";
673 
674  std::vector<Pattern> trainPattern;
675  std::vector<Pattern> testPattern;
676 
677  size_t nValidationSamples = GetNumValidationSamples();
678  size_t nTrainingSamples = GetEventCollection(Types::kTraining).size() - nValidationSamples;
679 
680  const std::vector<TMVA::Event *> &allData = GetEventCollection(Types::kTraining);
681  const std::vector<TMVA::Event *> eventCollectionTraining{allData.begin(), allData.begin() + nTrainingSamples};
682  const std::vector<TMVA::Event *> eventCollectionTesting{allData.begin() + nTrainingSamples, allData.end()};
683 
684  for (auto &event : eventCollectionTraining) {
685  const std::vector<Float_t>& values = event->GetValues();
687  double outputValue = event->GetClass () == 0 ? 0.9 : 0.1;
688  trainPattern.push_back(Pattern (values.begin(),
689  values.end(),
690  outputValue,
691  event->GetWeight()));
692  trainPattern.back().addInput(1.0);
693  } else if (fAnalysisType == Types::kMulticlass) {
694  std::vector<Float_t> oneHot(DataInfo().GetNClasses(), 0.0);
695  oneHot[event->GetClass()] = 1.0;
696  trainPattern.push_back(Pattern (values.begin(), values.end(),
697  oneHot.cbegin(), oneHot.cend(),
698  event->GetWeight()));
699  trainPattern.back().addInput(1.0);
700  } else {
701  const std::vector<Float_t>& targets = event->GetTargets ();
702  trainPattern.push_back(Pattern(values.begin(),
703  values.end(),
704  targets.begin(),
705  targets.end(),
706  event->GetWeight ()));
707  trainPattern.back ().addInput (1.0); // bias node
708  }
709  }
710 
711  for (auto &event : eventCollectionTesting) {
712  const std::vector<Float_t>& values = event->GetValues();
714  double outputValue = event->GetClass () == 0 ? 0.9 : 0.1;
715  testPattern.push_back(Pattern (values.begin(),
716  values.end(),
717  outputValue,
718  event->GetWeight()));
719  testPattern.back().addInput(1.0);
720  } else if (fAnalysisType == Types::kMulticlass) {
721  std::vector<Float_t> oneHot(DataInfo().GetNClasses(), 0.0);
722  oneHot[event->GetClass()] = 1.0;
723  testPattern.push_back(Pattern (values.begin(), values.end(),
724  oneHot.cbegin(), oneHot.cend(),
725  event->GetWeight()));
726  testPattern.back().addInput(1.0);
727  } else {
728  const std::vector<Float_t>& targets = event->GetTargets ();
729  testPattern.push_back(Pattern(values.begin(),
730  values.end(),
731  targets.begin(),
732  targets.end(),
733  event->GetWeight ()));
734  testPattern.back ().addInput (1.0); // bias node
735  }
736  }
737 
738  TMVA::DNN::Net net;
739  std::vector<double> weights;
740 
742 
743  net.setInputSize(fNet.GetInputWidth() + 1);
744  net.setOutputSize(fNet.GetOutputWidth() + 1);
745 
746  for (size_t i = 0; i < fNet.GetDepth(); i++) {
747  EActivationFunction f = fNet.GetLayer(i).GetActivationFunction();
748  EnumFunction g = EnumFunction::LINEAR;
749  switch(f) {
750  case EActivationFunction::kIdentity: g = EnumFunction::LINEAR; break;
751  case EActivationFunction::kRelu: g = EnumFunction::RELU; break;
752  case EActivationFunction::kSigmoid: g = EnumFunction::SIGMOID; break;
753  case EActivationFunction::kTanh: g = EnumFunction::TANH; break;
754  case EActivationFunction::kSymmRelu: g = EnumFunction::SYMMRELU; break;
755  case EActivationFunction::kSoftSign: g = EnumFunction::SOFTSIGN; break;
756  case EActivationFunction::kGauss: g = EnumFunction::GAUSS; break;
757  }
758  if (i < fNet.GetDepth() - 1) {
759  net.addLayer(Layer(fNet.GetLayer(i).GetWidth(), g));
760  } else {
761  ModeOutputValues h = ModeOutputValues::DIRECT;
762  switch(fOutputFunction) {
763  case EOutputFunction::kIdentity: h = ModeOutputValues::DIRECT; break;
764  case EOutputFunction::kSigmoid: h = ModeOutputValues::SIGMOID; break;
765  case EOutputFunction::kSoftmax: h = ModeOutputValues::SOFTMAX; break;
766  }
767  net.addLayer(Layer(fNet.GetLayer(i).GetWidth(), g, h));
768  }
769  }
770 
771  switch(fNet.GetLossFunction()) {
772  case ELossFunction::kMeanSquaredError:
773  net.setErrorFunction(ModeErrorFunction::SUMOFSQUARES);
774  break;
775  case ELossFunction::kCrossEntropy:
776  net.setErrorFunction(ModeErrorFunction::CROSSENTROPY);
777  break;
778  case ELossFunction::kSoftmaxCrossEntropy:
779  net.setErrorFunction(ModeErrorFunction::CROSSENTROPY_MUTUALEXCLUSIVE);
780  break;
781  }
782 
783  switch(fWeightInitialization) {
784  case EInitialization::kGauss:
785  net.initializeWeights(WeightInitializationStrategy::XAVIER,
786  std::back_inserter(weights));
787  break;
788  case EInitialization::kUniform:
789  net.initializeWeights(WeightInitializationStrategy::XAVIERUNIFORM,
790  std::back_inserter(weights));
791  break;
792  default:
793  net.initializeWeights(WeightInitializationStrategy::XAVIER,
794  std::back_inserter(weights));
795  break;
796  }
797 
798  int idxSetting = 0;
799  for (auto s : fTrainingSettings) {
800 
802  switch(s.regularization) {
804  case ERegularization::kL1: r = EnumRegularization::L1; break;
805  case ERegularization::kL2: r = EnumRegularization::L2; break;
806  }
807 
808  Settings * settings = new Settings(TString(), s.convergenceSteps, s.batchSize,
809  s.testInterval, s.weightDecay, r,
810  MinimizerType::fSteepest, s.learningRate,
811  s.momentum, 1, s.multithreading);
812  std::shared_ptr<Settings> ptrSettings(settings);
813  ptrSettings->setMonitoring (0);
814  Log() << kINFO
815  << "Training with learning rate = " << ptrSettings->learningRate ()
816  << ", momentum = " << ptrSettings->momentum ()
817  << ", repetitions = " << ptrSettings->repetitions ()
818  << Endl;
819 
820  ptrSettings->setProgressLimits ((idxSetting)*100.0/(fSettings.size ()),
821  (idxSetting+1)*100.0/(fSettings.size ()));
822 
823  const std::vector<double>& dropConfig = ptrSettings->dropFractions ();
824  if (!dropConfig.empty ()) {
825  Log () << kINFO << "Drop configuration" << Endl
826  << " drop repetitions = " << ptrSettings->dropRepetitions()
827  << Endl;
828  }
829 
830  int idx = 0;
831  for (auto f : dropConfig) {
832  Log () << kINFO << " Layer " << idx << " = " << f << Endl;
833  ++idx;
834  }
835  Log () << kINFO << Endl;
836 
837  DNN::Steepest minimizer(ptrSettings->learningRate(),
838  ptrSettings->momentum(),
839  ptrSettings->repetitions());
840  net.train(weights, trainPattern, testPattern, minimizer, *ptrSettings.get());
841  ptrSettings.reset();
842  Log () << kINFO << Endl;
843  idxSetting++;
844  }
845  size_t weightIndex = 0;
846  for (size_t l = 0; l < fNet.GetDepth(); l++) {
847  auto & layerWeights = fNet.GetLayer(l).GetWeights();
848  for (Int_t j = 0; j < layerWeights.GetNcols(); j++) {
849  for (Int_t i = 0; i < layerWeights.GetNrows(); i++) {
850  layerWeights(i,j) = weights[weightIndex];
851  weightIndex++;
852  }
853  }
854  auto & layerBiases = fNet.GetLayer(l).GetBiases();
855  if (l == 0) {
856  for (Int_t i = 0; i < layerBiases.GetNrows(); i++) {
857  layerBiases(i,0) = weights[weightIndex];
858  weightIndex++;
859  }
860  } else {
861  for (Int_t i = 0; i < layerBiases.GetNrows(); i++) {
862  layerBiases(i,0) = 0.0;
863  }
864  }
865  }
868 }
869 
870 ////////////////////////////////////////////////////////////////////////////////
871 
873 {
874 
875 #ifdef DNNCUDA // Included only if DNNCUDA flag is set.
876  Log() << kINFO << "Start of neural network training on GPU." << Endl << Endl;
877 
878  size_t nValidationSamples = GetNumValidationSamples();
879  size_t nTrainingSamples = GetEventCollection(Types::kTraining).size() - nValidationSamples;
880  size_t nTestSamples = nValidationSamples;
881 
882  Log() << kDEBUG << "Using " << nValidationSamples << " validation samples." << Endl;
883  Log() << kDEBUG << "Using " << nTestSamples << " training samples." << Endl;
884 
885  size_t trainingPhase = 1;
886  fNet.Initialize(fWeightInitialization);
887  for (TTrainingSettings & settings : fTrainingSettings) {
888 
889  if (fInteractive){
891  }
892 
893  TNet<TCuda<>> net(settings.batchSize, fNet);
894  net.SetWeightDecay(settings.weightDecay);
895  net.SetRegularization(settings.regularization);
896 
897  // Need to convert dropoutprobabilities to conventions used
898  // by backend implementation.
899  std::vector<Double_t> dropoutVector(settings.dropoutProbabilities);
900  for (auto & p : dropoutVector) {
901  p = 1.0 - p;
902  }
903  net.SetDropoutProbabilities(dropoutVector);
904 
905  net.InitializeGradients();
906  auto testNet = net.CreateClone(settings.batchSize);
907 
908  Log() << kINFO << "Training phase " << trainingPhase << " of "
909  << fTrainingSettings.size() << ":" << Endl;
910  trainingPhase++;
911 
912  using DataLoader_t = TDataLoader<TMVAInput_t, TCuda<>>;
913 
914  // Split training data into training and validation set
915  const std::vector<Event *> &allData = GetEventCollection(Types::kTraining);
916  const std::vector<Event *> trainingInputData =
917  std::vector<Event *>(allData.begin(), allData.begin() + nTrainingSamples);
918  const std::vector<Event *> testInputData =
919  std::vector<Event *>(allData.begin() + nTrainingSamples, allData.end());
920 
921  if (trainingInputData.size() != nTrainingSamples) {
922  Log() << kFATAL << "Inconsistent training sample size" << Endl;
923  }
924  if (testInputData.size() != nTestSamples) {
925  Log() << kFATAL << "Inconsistent test sample size" << Endl;
926  }
927 
928  size_t nThreads = 1;
929  TMVAInput_t trainingTuple = std::tie(trainingInputData, DataInfo());
930  TMVAInput_t testTuple = std::tie(testInputData, DataInfo());
931  DataLoader_t trainingData(trainingTuple, nTrainingSamples,
932  net.GetBatchSize(), net.GetInputWidth(),
933  net.GetOutputWidth(), nThreads);
934  DataLoader_t testData(testTuple, nTestSamples, testNet.GetBatchSize(),
935  net.GetInputWidth(), net.GetOutputWidth(),
936  nThreads);
937  DNN::TGradientDescent<TCuda<>> minimizer(settings.learningRate,
938  settings.convergenceSteps,
939  settings.testInterval);
940 
941  std::vector<TNet<TCuda<>>> nets{};
942  std::vector<TBatch<TCuda<>>> batches{};
943  nets.reserve(nThreads);
944  for (size_t i = 0; i < nThreads; i++) {
945  nets.push_back(net);
946  for (size_t j = 0; j < net.GetDepth(); j++)
947  {
948  auto &masterLayer = net.GetLayer(j);
949  auto &layer = nets.back().GetLayer(j);
950  TCuda<>::Copy(layer.GetWeights(),
951  masterLayer.GetWeights());
952  TCuda<>::Copy(layer.GetBiases(),
953  masterLayer.GetBiases());
954  }
955  }
956 
957  bool converged = false;
958  size_t stepCount = 0;
959  size_t batchesInEpoch = nTrainingSamples / net.GetBatchSize();
960 
961  std::chrono::time_point<std::chrono::system_clock> start, end;
962  start = std::chrono::system_clock::now();
963 
964  if (!fInteractive) {
965  Log() << std::setw(10) << "Epoch" << " | "
966  << std::setw(12) << "Train Err."
967  << std::setw(12) << "Test Err."
968  << std::setw(12) << "GFLOP/s"
969  << std::setw(12) << "Conv. Steps" << Endl;
970  std::string separator(62, '-');
971  Log() << separator << Endl;
972  }
973 
974  while (!converged)
975  {
976  stepCount++;
977 
978  // Perform minimization steps for a full epoch.
979  trainingData.Shuffle();
980  for (size_t i = 0; i < batchesInEpoch; i += nThreads) {
981  batches.clear();
982  for (size_t j = 0; j < nThreads; j++) {
983  batches.reserve(nThreads);
984  batches.push_back(trainingData.GetBatch());
985  }
986  if (settings.momentum > 0.0) {
987  minimizer.StepMomentum(net, nets, batches, settings.momentum);
988  } else {
989  minimizer.Step(net, nets, batches);
990  }
991  }
992 
993  if ((stepCount % minimizer.GetTestInterval()) == 0) {
994 
995  // Compute test error.
996  Double_t testError = 0.0;
997  for (auto batch : testData) {
998  auto inputMatrix = batch.GetInput();
999  auto outputMatrix = batch.GetOutput();
1000  testError += testNet.Loss(inputMatrix, outputMatrix);
1001  }
1002  testError /= (Double_t) (nTestSamples / settings.batchSize);
1003 
1004  end = std::chrono::system_clock::now();
1005 
1006  // Compute training error.
1007  Double_t trainingError = 0.0;
1008  for (auto batch : trainingData) {
1009  auto inputMatrix = batch.GetInput();
1010  auto outputMatrix = batch.GetOutput();
1011  trainingError += net.Loss(inputMatrix, outputMatrix);
1012  }
1013  trainingError /= (Double_t) (nTrainingSamples / settings.batchSize);
1014 
1015  // Compute numerical throughput.
1016  std::chrono::duration<double> elapsed_seconds = end - start;
1017  double seconds = elapsed_seconds.count();
1018  double nFlops = (double) (settings.testInterval * batchesInEpoch);
1019  nFlops *= net.GetNFlops() * 1e-9;
1020 
1021  converged = minimizer.HasConverged(testError);
1022  start = std::chrono::system_clock::now();
1023 
1024  if (fInteractive) {
1025  fInteractive->AddPoint(stepCount, trainingError, testError);
1026  fIPyCurrentIter = 100.0 * minimizer.GetConvergenceCount()
1027  / minimizer.GetConvergenceSteps ();
1028  if (fExitFromTraining) break;
1029  } else {
1030  Log() << std::setw(10) << stepCount << " | "
1031  << std::setw(12) << trainingError
1032  << std::setw(12) << testError
1033  << std::setw(12) << nFlops / seconds
1034  << std::setw(12) << minimizer.GetConvergenceCount() << Endl;
1035  if (converged) {
1036  Log() << Endl;
1037  }
1038  }
1039  }
1040  }
1041  for (size_t l = 0; l < net.GetDepth(); l++) {
1042  fNet.GetLayer(l).GetWeights() = (TMatrixT<Double_t>) net.GetLayer(l).GetWeights();
1043  fNet.GetLayer(l).GetBiases() = (TMatrixT<Double_t>) net.GetLayer(l).GetBiases();
1044  }
1045  }
1046 
1047 #else // DNNCUDA flag not set.
1048 
1049  Log() << kFATAL << "CUDA backend not enabled. Please make sure "
1050  "you have CUDA installed and it was successfully "
1051  "detected by CMAKE." << Endl;
1052 #endif // DNNCUDA
1053 }
1054 
1055 ////////////////////////////////////////////////////////////////////////////////
1056 
1058 {
1059 
1060 #ifdef DNNCPU // Included only if DNNCPU flag is set.
1061  Log() << kINFO << "Start of neural network training on CPU." << Endl << Endl;
1062 
1063  size_t nValidationSamples = GetNumValidationSamples();
1064  size_t nTrainingSamples = GetEventCollection(Types::kTraining).size() - nValidationSamples;
1065  size_t nTestSamples = nValidationSamples;
1066 
1067  Log() << kDEBUG << "Using " << nValidationSamples << " validation samples." << Endl;
1068  Log() << kDEBUG << "Using " << nTestSamples << " training samples." << Endl;
1069 
1070  fNet.Initialize(fWeightInitialization);
1071 
1072  size_t trainingPhase = 1;
1073  for (TTrainingSettings & settings : fTrainingSettings) {
1074 
1075  if (fInteractive){
1077  }
1078 
1079  Log() << "Training phase " << trainingPhase << " of "
1080  << fTrainingSettings.size() << ":" << Endl;
1081  trainingPhase++;
1082 
1083  TNet<TCpu<>> net(settings.batchSize, fNet);
1084  net.SetWeightDecay(settings.weightDecay);
1085  net.SetRegularization(settings.regularization);
1086  // Need to convert dropoutprobabilities to conventions used
1087  // by backend implementation.
1088  std::vector<Double_t> dropoutVector(settings.dropoutProbabilities);
1089  for (auto & p : dropoutVector) {
1090  p = 1.0 - p;
1091  }
1092  net.SetDropoutProbabilities(dropoutVector);
1093  //net.SetDropoutProbabilities(settings.dropoutProbabilities);
1094  net.InitializeGradients();
1095  auto testNet = net.CreateClone(settings.batchSize);
1096 
1097  using DataLoader_t = TDataLoader<TMVAInput_t, TCpu<>>;
1098 
1099  // Split training data into training and validation set
1100  const std::vector<Event *> &allData = GetEventCollection(Types::kTraining);
1101  const std::vector<Event *> trainingInputData =
1102  std::vector<Event *>(allData.begin(), allData.begin() + nTrainingSamples);
1103  const std::vector<Event *> testInputData =
1104  std::vector<Event *>(allData.begin() + nTrainingSamples, allData.end());
1105 
1106  if (trainingInputData.size() != nTrainingSamples) {
1107  Log() << kFATAL << "Inconsistent training sample size" << Endl;
1108  }
1109  if (testInputData.size() != nTestSamples) {
1110  Log() << kFATAL << "Inconsistent test sample size" << Endl;
1111  }
1112 
1113  size_t nThreads = 1;
1114  TMVAInput_t trainingTuple = std::tie(trainingInputData, DataInfo());
1115  TMVAInput_t testTuple = std::tie(testInputData, DataInfo());
1116  DataLoader_t trainingData(trainingTuple, nTrainingSamples,
1117  net.GetBatchSize(), net.GetInputWidth(),
1118  net.GetOutputWidth(), nThreads);
1119  DataLoader_t testData(testTuple, nTestSamples, testNet.GetBatchSize(),
1120  net.GetInputWidth(), net.GetOutputWidth(),
1121  nThreads);
1122  DNN::TGradientDescent<TCpu<>> minimizer(settings.learningRate,
1123  settings.convergenceSteps,
1124  settings.testInterval);
1125 
1126  std::vector<TNet<TCpu<>>> nets{};
1127  std::vector<TBatch<TCpu<>>> batches{};
1128  nets.reserve(nThreads);
1129  for (size_t i = 0; i < nThreads; i++) {
1130  nets.push_back(net);
1131  for (size_t j = 0; j < net.GetDepth(); j++)
1132  {
1133  auto &masterLayer = net.GetLayer(j);
1134  auto &layer = nets.back().GetLayer(j);
1135  TCpu<>::Copy(layer.GetWeights(),
1136  masterLayer.GetWeights());
1137  TCpu<>::Copy(layer.GetBiases(),
1138  masterLayer.GetBiases());
1139  }
1140  }
1141 
1142  bool converged = false;
1143  size_t stepCount = 0;
1144  size_t batchesInEpoch = nTrainingSamples / net.GetBatchSize();
1145 
1146  std::chrono::time_point<std::chrono::system_clock> start, end;
1147  start = std::chrono::system_clock::now();
1148 
1149  if (!fInteractive) {
1150  Log() << std::setw(10) << "Epoch" << " | "
1151  << std::setw(12) << "Train Err."
1152  << std::setw(12) << "Test Err."
1153  << std::setw(12) << "GFLOP/s"
1154  << std::setw(12) << "Conv. Steps" << Endl;
1155  std::string separator(62, '-');
1156  Log() << separator << Endl;
1157  }
1158 
1159  while (!converged)
1160  {
1161  stepCount++;
1162  // Perform minimization steps for a full epoch.
1163  trainingData.Shuffle();
1164  for (size_t i = 0; i < batchesInEpoch; i += nThreads) {
1165  batches.clear();
1166  for (size_t j = 0; j < nThreads; j++) {
1167  batches.reserve(nThreads);
1168  batches.push_back(trainingData.GetBatch());
1169  }
1170  if (settings.momentum > 0.0) {
1171  minimizer.StepMomentum(net, nets, batches, settings.momentum);
1172  } else {
1173  minimizer.Step(net, nets, batches);
1174  }
1175  }
1176 
1177  if ((stepCount % minimizer.GetTestInterval()) == 0) {
1178 
1179  // Compute test error.
1180  Double_t testError = 0.0;
1181  for (auto batch : testData) {
1182  auto inputMatrix = batch.GetInput();
1183  auto outputMatrix = batch.GetOutput();
1184  auto weightMatrix = batch.GetWeights();
1185  testError += testNet.Loss(inputMatrix, outputMatrix, weightMatrix);
1186  }
1187  testError /= (Double_t) (nTestSamples / settings.batchSize);
1188 
1189  end = std::chrono::system_clock::now();
1190 
1191  // Compute training error.
1192  Double_t trainingError = 0.0;
1193  for (auto batch : trainingData) {
1194  auto inputMatrix = batch.GetInput();
1195  auto outputMatrix = batch.GetOutput();
1196  auto weightMatrix = batch.GetWeights();
1197  trainingError += net.Loss(inputMatrix, outputMatrix, weightMatrix);
1198  }
1199  trainingError /= (Double_t) (nTrainingSamples / settings.batchSize);
1200 
1201  if (fInteractive){
1202  fInteractive->AddPoint(stepCount, trainingError, testError);
1203  fIPyCurrentIter = 100*(double)minimizer.GetConvergenceCount() /(double)settings.convergenceSteps;
1204  if (fExitFromTraining) break;
1205  }
1206 
1207  // Compute numerical throughput.
1208  std::chrono::duration<double> elapsed_seconds = end - start;
1209  double seconds = elapsed_seconds.count();
1210  double nFlops = (double) (settings.testInterval * batchesInEpoch);
1211  nFlops *= net.GetNFlops() * 1e-9;
1212 
1213  converged = minimizer.HasConverged(testError);
1214  start = std::chrono::system_clock::now();
1215 
1216  if (fInteractive) {
1217  fInteractive->AddPoint(stepCount, trainingError, testError);
1218  fIPyCurrentIter = 100.0 * minimizer.GetConvergenceCount()
1219  / minimizer.GetConvergenceSteps ();
1220  if (fExitFromTraining) break;
1221  } else {
1222  Log() << std::setw(10) << stepCount << " | "
1223  << std::setw(12) << trainingError
1224  << std::setw(12) << testError
1225  << std::setw(12) << nFlops / seconds
1226  << std::setw(12) << minimizer.GetConvergenceCount() << Endl;
1227  if (converged) {
1228  Log() << Endl;
1229  }
1230  }
1231  }
1232  }
1233 
1234 
1235  for (size_t l = 0; l < net.GetDepth(); l++) {
1236  auto & layer = fNet.GetLayer(l);
1237  layer.GetWeights() = (TMatrixT<Double_t>) net.GetLayer(l).GetWeights();
1238  layer.GetBiases() = (TMatrixT<Double_t>) net.GetLayer(l).GetBiases();
1239  }
1240  }
1241 
1242 #else // DNNCPU flag not set.
1243  Log() << kFATAL << "Multi-core CPU backend not enabled. Please make sure "
1244  "you have a BLAS implementation and it was successfully "
1245  "detected by CMake as well that the imt CMake flag is set." << Endl;
1246 #endif // DNNCPU
1247 }
1248 
1249 ////////////////////////////////////////////////////////////////////////////////
1250 
1252 {
1253  size_t nVariables = GetEvent()->GetNVariables();
1254  Matrix_t X(1, nVariables);
1255  Matrix_t YHat(1, 1);
1256 
1257  const std::vector<Float_t>& inputValues = GetEvent()->GetValues();
1258  for (size_t i = 0; i < nVariables; i++) {
1259  X(0,i) = inputValues[i];
1260  }
1261 
1262  fNet.Prediction(YHat, X, fOutputFunction);
1263  return YHat(0,0);
1264 }
1265 
1266 ////////////////////////////////////////////////////////////////////////////////
1267 
1268 const std::vector<Float_t> & TMVA::MethodDNN::GetRegressionValues()
1269 {
1270  size_t nVariables = GetEvent()->GetNVariables();
1271  Matrix_t X(1, nVariables);
1272 
1273  const Event *ev = GetEvent();
1274  const std::vector<Float_t>& inputValues = ev->GetValues();
1275  for (size_t i = 0; i < nVariables; i++) {
1276  X(0,i) = inputValues[i];
1277  }
1278 
1279  size_t nTargets = std::max(1u, ev->GetNTargets());
1280  Matrix_t YHat(1, nTargets);
1281  std::vector<Float_t> output(nTargets);
1282  auto net = fNet.CreateClone(1);
1283  net.Prediction(YHat, X, fOutputFunction);
1284 
1285  for (size_t i = 0; i < nTargets; i++)
1286  output[i] = YHat(0, i);
1287 
1288  if (fRegressionReturnVal == NULL) {
1289  fRegressionReturnVal = new std::vector<Float_t>();
1290  }
1291  fRegressionReturnVal->clear();
1292 
1293  Event * evT = new Event(*ev);
1294  for (size_t i = 0; i < nTargets; ++i) {
1295  evT->SetTarget(i, output[i]);
1296  }
1297 
1298  const Event* evT2 = GetTransformationHandler().InverseTransform(evT);
1299  for (size_t i = 0; i < nTargets; ++i) {
1300  fRegressionReturnVal->push_back(evT2->GetTarget(i));
1301  }
1302  delete evT;
1303  return *fRegressionReturnVal;
1304 }
1305 
1306 const std::vector<Float_t> & TMVA::MethodDNN::GetMulticlassValues()
1307 {
1308  size_t nVariables = GetEvent()->GetNVariables();
1309  Matrix_t X(1, nVariables);
1310  Matrix_t YHat(1, DataInfo().GetNClasses());
1311  if (fMulticlassReturnVal == NULL) {
1312  fMulticlassReturnVal = new std::vector<Float_t>(DataInfo().GetNClasses());
1313  }
1314 
1315  const std::vector<Float_t>& inputValues = GetEvent()->GetValues();
1316  for (size_t i = 0; i < nVariables; i++) {
1317  X(0,i) = inputValues[i];
1318  }
1319 
1320  fNet.Prediction(YHat, X, fOutputFunction);
1321  for (size_t i = 0; i < (size_t) YHat.GetNcols(); i++) {
1322  (*fMulticlassReturnVal)[i] = YHat(0, i);
1323  }
1324  return *fMulticlassReturnVal;
1325 }
1326 
1327 ////////////////////////////////////////////////////////////////////////////////
1328 
1329 void TMVA::MethodDNN::AddWeightsXMLTo( void* parent ) const
1330 {
1331  void* nn = gTools().xmlengine().NewChild(parent, 0, "Weights");
1332  Int_t inputWidth = fNet.GetInputWidth();
1333  Int_t depth = fNet.GetDepth();
1334  char lossFunction = static_cast<char>(fNet.GetLossFunction());
1335  gTools().xmlengine().NewAttr(nn, 0, "InputWidth",
1336  gTools().StringFromInt(inputWidth));
1337  gTools().xmlengine().NewAttr(nn, 0, "Depth", gTools().StringFromInt(depth));
1338  gTools().xmlengine().NewAttr(nn, 0, "LossFunction", TString(lossFunction));
1339  gTools().xmlengine().NewAttr(nn, 0, "OutputFunction",
1340  TString(static_cast<char>(fOutputFunction)));
1341 
1342  for (Int_t i = 0; i < depth; i++) {
1343  const auto& layer = fNet.GetLayer(i);
1344  auto layerxml = gTools().xmlengine().NewChild(nn, 0, "Layer");
1345  int activationFunction = static_cast<int>(layer.GetActivationFunction());
1346  gTools().xmlengine().NewAttr(layerxml, 0, "ActivationFunction",
1347  TString::Itoa(activationFunction, 10));
1348  WriteMatrixXML(layerxml, "Weights", layer.GetWeights());
1349  WriteMatrixXML(layerxml, "Biases", layer.GetBiases());
1350  }
1351 }
1352 
1353 ////////////////////////////////////////////////////////////////////////////////
1354 
1356 {
1357  auto netXML = gTools().GetChild(rootXML, "Weights");
1358  if (!netXML){
1359  netXML = rootXML;
1360  }
1361 
1362  fNet.Clear();
1363  fNet.SetBatchSize(1);
1364 
1365  size_t inputWidth, depth;
1366  gTools().ReadAttr(netXML, "InputWidth", inputWidth);
1367  gTools().ReadAttr(netXML, "Depth", depth);
1368  char lossFunctionChar;
1369  gTools().ReadAttr(netXML, "LossFunction", lossFunctionChar);
1370  char outputFunctionChar;
1371  gTools().ReadAttr(netXML, "OutputFunction", outputFunctionChar);
1372 
1373  fNet.SetInputWidth(inputWidth);
1374  fNet.SetLossFunction(static_cast<ELossFunction>(lossFunctionChar));
1375  fOutputFunction = static_cast<EOutputFunction>(outputFunctionChar);
1376 
1377  size_t previousWidth = inputWidth;
1378  auto layerXML = gTools().xmlengine().GetChild(netXML, "Layer");
1379  for (size_t i = 0; i < depth; i++) {
1380  TString fString;
1382 
1383  // Read activation function.
1384  gTools().ReadAttr(layerXML, "ActivationFunction", fString);
1385  f = static_cast<EActivationFunction>(fString.Atoi());
1386 
1387  // Read number of neurons.
1388  size_t width;
1389  auto matrixXML = gTools().GetChild(layerXML, "Weights");
1390  gTools().ReadAttr(matrixXML, "rows", width);
1391 
1392  fNet.AddLayer(width, f);
1393  TMatrixT<Double_t> weights(width, previousWidth);
1394  TMatrixT<Double_t> biases(width, 1);
1395  ReadMatrixXML(layerXML, "Weights", weights);
1396  ReadMatrixXML(layerXML, "Biases", biases);
1397  fNet.GetLayer(i).GetWeights() = weights;
1398  fNet.GetLayer(i).GetBiases() = biases;
1399 
1400  layerXML = gTools().GetNextChild(layerXML);
1401  previousWidth = width;
1402  }
1403 }
1404 
1405 ////////////////////////////////////////////////////////////////////////////////
1406 
1407 void TMVA::MethodDNN::ReadWeightsFromStream( std::istream & /*istr*/)
1408 {
1409 }
1410 
1411 ////////////////////////////////////////////////////////////////////////////////
1412 
1414 {
1415  fRanking = new Ranking( GetName(), "Importance" );
1416  for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
1417  fRanking->AddRank( Rank( GetInputLabel(ivar), 1.0));
1418  }
1419  return fRanking;
1420 }
1421 
1422 ////////////////////////////////////////////////////////////////////////////////
1423 
1424 void TMVA::MethodDNN::MakeClassSpecific( std::ostream& /*fout*/,
1425  const TString& /*className*/ ) const
1426 {
1427 }
1428 
1429 ////////////////////////////////////////////////////////////////////////////////
1430 
1432 {
1433  // get help message text
1434  //
1435  // typical length of text line:
1436  // "|--------------------------------------------------------------|"
1437  TString col = gConfig().WriteOptionsReference() ? TString() : gTools().Color("bold");
1438  TString colres = gConfig().WriteOptionsReference() ? TString() : gTools().Color("reset");
1439 
1440  Log() << Endl;
1441  Log() << col << "--- Short description:" << colres << Endl;
1442  Log() << Endl;
1443  Log() << "The DNN neural network is a feedforward" << Endl;
1444  Log() << "multilayer perceptron implementation. The DNN has a user-" << Endl;
1445  Log() << "defined hidden layer architecture, where the number of input (output)" << Endl;
1446  Log() << "nodes is determined by the input variables (output classes, i.e., " << Endl;
1447  Log() << "signal and one background, regression or multiclass). " << Endl;
1448  Log() << Endl;
1449  Log() << col << "--- Performance optimisation:" << colres << Endl;
1450  Log() << Endl;
1451 
1452  const char* txt = "The DNN supports various options to improve performance in terms of training speed and \n \
1453 reduction of overfitting: \n \
1454 \n \
1455  - different training settings can be stacked. Such that the initial training \n\
1456  is done with a large learning rate and a large drop out fraction whilst \n \
1457  in a later stage learning rate and drop out can be reduced. \n \
1458  - drop out \n \
1459  [recommended: \n \
1460  initial training stage: 0.0 for the first layer, 0.5 for later layers. \n \
1461  later training stage: 0.1 or 0.0 for all layers \n \
1462  final training stage: 0.0] \n \
1463  Drop out is a technique where a at each training cycle a fraction of arbitrary \n \
1464  nodes is disabled. This reduces co-adaptation of weights and thus reduces overfitting. \n \
1465  - L1 and L2 regularization are available \n \
1466  - Minibatches \n \
1467  [recommended 10 - 150] \n \
1468  Arbitrary mini-batch sizes can be chosen. \n \
1469  - Multithreading \n \
1470  [recommended: True] \n \
1471  Multithreading can be turned on. The minibatches are distributed to the available \n \
1472  cores. The algorithm is lock-free (\"Hogwild!\"-style) for each cycle. \n \
1473  \n \
1474  Options: \n \
1475  \"Layout\": \n \
1476  - example: \"TANH|(N+30)*2,TANH|(N+30),LINEAR\" \n \
1477  - meaning: \n \
1478  . two hidden layers (separated by \",\") \n \
1479  . the activation function is TANH (other options: RELU, SOFTSIGN, LINEAR) \n \
1480  . the activation function for the output layer is LINEAR \n \
1481  . the first hidden layer has (N+30)*2 nodes where N is the number of input neurons \n \
1482  . the second hidden layer has N+30 nodes, where N is the number of input neurons \n \
1483  . the number of nodes in the output layer is determined by the number of output nodes \n \
1484  and can therefore not be chosen freely. \n \
1485  \n \
1486  \"ErrorStrategy\": \n \
1487  - SUMOFSQUARES \n \
1488  The error of the neural net is determined by a sum-of-squares error function \n \
1489  For regression, this is the only possible choice. \n \
1490  - CROSSENTROPY \n \
1491  The error of the neural net is determined by a cross entropy function. The \n \
1492  output values are automatically (internally) transformed into probabilities \n \
1493  using a sigmoid function. \n \
1494  For signal/background classification this is the default choice. \n \
1495  For multiclass using cross entropy more than one or no output classes \n \
1496  can be equally true or false (e.g. Event 0: A and B are true, Event 1: \n \
1497  A and C is true, Event 2: C is true, ...) \n \
1498  - MUTUALEXCLUSIVE \n \
1499  In multiclass settings, exactly one of the output classes can be true (e.g. either A or B or C) \n \
1500  \n \
1501  \"WeightInitialization\" \n \
1502  - XAVIER \n \
1503  [recommended] \n \
1504  \"Xavier Glorot & Yoshua Bengio\"-style of initializing the weights. The weights are chosen randomly \n \
1505  such that the variance of the values of the nodes is preserved for each layer. \n \
1506  - XAVIERUNIFORM \n \
1507  The same as XAVIER, but with uniformly distributed weights instead of gaussian weights \n \
1508  - LAYERSIZE \n \
1509  Random values scaled by the layer size \n \
1510  \n \
1511  \"TrainingStrategy\" \n \
1512  - example: \"LearningRate=1e-1,Momentum=0.3,ConvergenceSteps=50,BatchSize=30,TestRepetitions=7,WeightDecay=0.0,Renormalize=L2,DropConfig=0.0,DropRepetitions=5|LearningRate=1e-4,Momentum=0.3,ConvergenceSteps=50,BatchSize=20,TestRepetitions=7,WeightDecay=0.001,Renormalize=L2,DropFraction=0.0,DropRepetitions=5\" \n \
1513  - explanation: two stacked training settings separated by \"|\" \n \
1514  . first training setting: \"LearningRate=1e-1,Momentum=0.3,ConvergenceSteps=50,BatchSize=30,TestRepetitions=7,WeightDecay=0.0,Renormalize=L2,DropConfig=0.0,DropRepetitions=5\" \n \
1515  . second training setting : \"LearningRate=1e-4,Momentum=0.3,ConvergenceSteps=50,BatchSize=20,TestRepetitions=7,WeightDecay=0.001,Renormalize=L2,DropFractions=0.0,DropRepetitions=5\" \n \
1516  . LearningRate : \n \
1517  - recommended for classification: 0.1 initially, 1e-4 later \n \
1518  - recommended for regression: 1e-4 and less \n \
1519  . Momentum : \n \
1520  preserve a fraction of the momentum for the next training batch [fraction = 0.0 - 1.0] \n \
1521  . Repetitions : \n \
1522  train \"Repetitions\" repetitions with the same minibatch before switching to the next one \n \
1523  . ConvergenceSteps : \n \
1524  Assume that convergence is reached after \"ConvergenceSteps\" cycles where no improvement \n \
1525  of the error on the test samples has been found. (Mind that only at each \"TestRepetitions\" \n \
1526  cycle the test samples are evaluated and thus the convergence is checked) \n \
1527  . BatchSize \n \
1528  Size of the mini-batches. \n \
1529  . TestRepetitions \n \
1530  Perform testing the neural net on the test samples each \"TestRepetitions\" cycle \n \
1531  . WeightDecay \n \
1532  If \"Renormalize\" is set to L1 or L2, \"WeightDecay\" provides the renormalization factor \n \
1533  . Renormalize \n \
1534  NONE, L1 (|w|) or L2 (w^2) \n \
1535  . DropConfig \n \
1536  Drop a fraction of arbitrary nodes of each of the layers according to the values given \n \
1537  in the DropConfig. \n \
1538  [example: DropConfig=0.0+0.5+0.3 \n \
1539  meaning: drop no nodes in layer 0 (input layer), half of the nodes in layer 1 and 30% of the nodes \n \
1540  in layer 2 \n \
1541  recommended: leave all the nodes turned on for the input layer (layer 0) \n \
1542  turn off half of the nodes in later layers for the initial training; leave all nodes \n \
1543  turned on (0.0) in later training stages] \n \
1544  . DropRepetitions \n \
1545  Each \"DropRepetitions\" cycle the configuration of which nodes are dropped is changed \n \
1546  [recommended : 1] \n \
1547  . Multithreading \n \
1548  turn on multithreading [recommended: True] \n \
1549  \n";
1550  Log () << txt << Endl;
1551 }
1552 
1553 } // namespace TMVA
Types::EAnalysisType fAnalysisType
Definition: MethodBase.h:582
void GetHelpMessage() const
Definition: MethodDNN.cxx:1431
Scalar_t Loss(const Matrix_t &Y, const Matrix_t &weights, bool includeRegularization=true) const
Evaluate the loss function of the net using the activations that are currently stored in the output l...
Definition: Net.h:305
An array of TObjects.
Definition: TObjArray.h:37
TXMLEngine & xmlengine()
Definition: Tools.h:270
LayoutVector_t ParseLayoutString(TString layerSpec)
static TString Itoa(Int_t value, Int_t base)
Converts an Int_t to a TString with respect to the base specified (2-36).
Definition: TString.cxx:2079
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:158
void AddPoint(Double_t x, Double_t y1, Double_t y2)
This function is used only in 2 TGraph case, and it will add new data points to graphs.
Definition: MethodBase.cxx:212
void ProcessOptions()
Definition: MethodDNN.cxx:412
Collectable string class.
Definition: TObjString.h:28
Steepest Gradient Descent algorithm (SGD)
Definition: NeuralNet.h:334
double T(double x)
Definition: ChebyshevPol.h:34
std::vector< std::map< TString, TString > > KeyValueVector_t
Definition: MethodDNN.h:75
void SetDropoutProbabilities(const std::vector< Double_t > &probabilities)
Definition: Net.h:378
UInt_t GetNvar() const
Definition: MethodBase.h:333
Config & gConfig()
TH1 * h
Definition: legend2.C:5
MsgLogger & Log() const
Definition: Configurable.h:122
void MakeClassSpecific(std::ostream &, const TString &) const
Definition: MethodDNN.cxx:1424
OptionBase * DeclareOptionRef(T &ref, const TString &name, const TString &desc="")
Bool_t IsFloat() const
Returns kTRUE if string contains a floating point or integer number.
Definition: TString.cxx:1845
EAnalysisType
Definition: Types.h:125
void ToUpper()
Change string to upper case.
Definition: TString.cxx:1112
bool fExitFromTraining
Definition: MethodBase.h:436
void setErrorFunction(ModeErrorFunction eErrorFunction)
which error function is to be used
Definition: NeuralNet.h:1103
Basic string class.
Definition: TString.h:125
typename Architecture_t::Matrix_t Matrix_t
Definition: MethodDNN.h:71
TransformationHandler & GetTransformationHandler(Bool_t takeReroutedIfAvailable=true)
Definition: MethodBase.h:383
Ranking for variables in method (implementation)
Definition: Ranking.h:48
Scalar_t GetNFlops()
Definition: Net.h:347
int Int_t
Definition: RtypesCore.h:41
bool Bool_t
Definition: RtypesCore.h:59
Definition: Pattern.h:7
UInt_t GetNClasses() const
Definition: DataSetInfo.h:136
UInt_t GetNTargets() const
Definition: MethodBase.h:335
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
void setOutputSize(size_t sizeOutput)
set the output size of the DNN
Definition: NeuralNet.h:1100
void SetWeightDecay(Scalar_t weightDecay)
Definition: Net.h:152
neural net
Definition: NeuralNet.h:1068
const TString & GetInputLabel(Int_t i) const
Definition: MethodBase.h:339
void ReadWeightsFromStream(std::istream &i)
Definition: MethodDNN.cxx:1407
void SetIpythonInteractive(IPythonInteractive *fI, bool *fE, UInt_t *M, UInt_t *C)
Definition: NeuralNet.h:1290
std::vector< Double_t > dropoutProbabilities
Definition: MethodDNN.h:86
const Event * GetEvent() const
Definition: MethodBase.h:738
MethodBase(const TString &jobName, Types::EMVA methodType, const TString &methodTitle, DataSetInfo &dsi, const TString &theOption="")
standard constructor
Definition: MethodBase.cxx:242
void ClearGraphs()
This function sets the point number to 0 for all graphs.
Definition: MethodBase.cxx:198
void ReadWeightsFromXML(void *wghtnode)
Definition: MethodDNN.cxx:1355
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1161
void setInputSize(size_t sizeInput)
set the input size of the DNN
Definition: NeuralNet.h:1099
Generic neural network class.
Definition: Net.h:49
void Init(std::vector< TString > &graphTitles)
This function gets some title and it creates a TGraph for every title.
Definition: MethodBase.cxx:174
DataSetInfo & DataInfo() const
Definition: MethodBase.h:399
Ssiz_t First(char c) const
Find first occurrence of a character c.
Definition: TString.cxx:477
UInt_t fIPyCurrentIter
Definition: MethodBase.h:437
UInt_t GetNTargets() const
accessor to the number of targets
Definition: Event.cxx:320
KeyValueVector_t ParseKeyValueString(TString parseString, TString blockDelim, TString tokenDelim)
void initializeWeights(WeightInitializationStrategy eInitStrategy, OutIterator itWeight)
initialize the weights with the given strategy
Definition: NeuralNet.icc:1482
const TString & GetString() const
Definition: TObjString.h:47
Float_t GetTarget(UInt_t itgt) const
Definition: Event.h:97
ROOT::R::TRInterface & r
Definition: Object.C:4
const char * GetName() const
Definition: MethodBase.h:323
TDataLoader.
Definition: DataLoader.h:79
The Formula class.
Definition: TFormula.h:83
const Ranking * CreateRanking()
Definition: MethodDNN.cxx:1413
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition: TString.h:561
UInt_t fIPyMaxIter
Definition: MethodBase.h:437
void SetRegularization(ERegularization R)
Definition: Net.h:150
unsigned int UInt_t
Definition: RtypesCore.h:42
const Handle_t kNone
Definition: GuiTypes.h:87
const Event * InverseTransform(const Event *, Bool_t suppressIfNoTargets=true) const
size_t GetDepth() const
Definition: Net.h:137
auto regularization(const typename Architecture_t::Matrix_t &A, ERegularization R) -> decltype(Architecture_t::L1Regularization(A))
Evaluate the regularization functional for a given weight matrix.
Definition: Functions.h:205
TSubString Strip(EStripType s=kTrailing, char c=' ') const
Return a substring of self stripped at beginning and/or end.
Definition: TString.cxx:1080
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition: Event.cxx:360
void ReadAttr(void *node, const char *, T &value)
read attribute from xml
Definition: Tools.h:290
Tools & gTools()
Settings for the training of the neural net.
Definition: NeuralNet.h:736
TNet< Architecture_t, TSharedLayer< Architecture_t > > CreateClone(size_t batchSize)
Create a clone that uses the same weight and biases matrices but potentially a difference batch size...
Definition: Net.h:212
UInt_t GetNVariables() const
Definition: MethodBase.h:334
UInt_t GetNVariables() const
accessor to the number of variables
Definition: Event.cxx:309
const Bool_t kFALSE
Definition: RtypesCore.h:88
Layer defines the layout of a layer.
Definition: NeuralNet.h:676
Bool_t IgnoreEventsWithNegWeightsInTraining() const
Definition: MethodBase.h:673
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
Definition: TXMLEngine.cxx:578
TObjArray * Tokenize(const TString &delim) const
This function is used to isolate sequential tokens in a TString.
Definition: TString.cxx:2251
const std::vector< TMVA::Event * > & GetEventCollection(Types::ETreeType type)
returns the event collection (i.e.
size_t GetOutputWidth() const
Definition: Net.h:144
#define NONE
Definition: Rotated.cxx:52
void Copy(void *source, void *dest)
size_t GetInputWidth() const
Definition: Net.h:143
#define ClassImp(name)
Definition: Rtypes.h:359
UInt_t GetNumValidationSamples()
ModeOutputValues
Definition: NeuralNet.h:179
double Double_t
Definition: RtypesCore.h:55
Bool_t WriteOptionsReference() const
Definition: Config.h:70
Deep Neural Network Implementation.
Definition: MethodDNN.h:65
TString fetchValue(const std::map< TString, TString > &keyValueMap, TString key)
Definition: MethodDNN.cxx:308
std::vector< Float_t > * fMulticlassReturnVal
Definition: MethodBase.h:585
double train(std::vector< double > &weights, std::vector< Pattern > &trainPattern, const std::vector< Pattern > &testPattern, Minimizer &minimizer, Settings &settings)
start the training
Definition: NeuralNet.icc:711
EOutputFunction
Enum that represents output functions.
Definition: Functions.h:43
int type
Definition: TGX11.cxx:120
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1173
static constexpr double s
you should not use this method at all Int_t Int_t Double_t Double_t Double_t e
Definition: TRolke.cxx:630
DNN::ERegularization regularization
Definition: MethodDNN.h:82
void AddPreDefVal(const T &)
Definition: Configurable.h:168
std::tuple< const std::vector< Event * > &, const DataSetInfo & > TMVAInput_t
Definition: DataLoader.h:40
void ExitFromTraining()
Definition: MethodBase.h:451
void DeclareOptions()
const TString & Color(const TString &)
human readable color strings
Definition: Tools.cxx:839
#define REGISTER_METHOD(CLASS)
for example
virtual ~MethodDNN()
void addLayer(Layer &layer)
add a layer (layout)
Definition: NeuralNet.h:1101
Abstract ClassifierFactory template that handles arbitrary types.
Ranking * fRanking
Definition: MethodBase.h:574
std::vector< Float_t > & GetValues()
Definition: Event.h:89
IPythonInteractive * fInteractive
Definition: MethodBase.h:435
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
Definition: MethodDNN.cxx:1251
void InitializeGradients()
Initialize the gradients in the net to zero.
Definition: Net.h:263
auto * l
Definition: textangle.C:4
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xmlnode
virtual void AddRank(const Rank &rank)
Add a new rank take ownership of it.
Definition: Ranking.cxx:86
MethodDNN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=0)
create new child element for parent node
Definition: TXMLEngine.cxx:707
Int_t Atoi() const
Return integer value of string.
Definition: TString.cxx:1975
EActivationFunction
Enum that represents layer activation functions.
Definition: Functions.h:31
std::vector< Float_t > * fRegressionReturnVal
Definition: MethodBase.h:584
Double_t Atof() const
Return floating-point value contained in string.
Definition: TString.cxx:2041
void AddWeightsXMLTo(void *parent) const
Definition: MethodDNN.cxx:1329
size_t GetBatchSize() const
Definition: Net.h:138
EnumRegularization
Definition: NeuralNet.h:173
virtual const std::vector< Float_t > & GetMulticlassValues()
Definition: MethodDNN.cxx:1306
const Bool_t kTRUE
Definition: RtypesCore.h:87
virtual const std::vector< Float_t > & GetRegressionValues()
Definition: MethodDNN.cxx:1268
Layer_t & GetLayer(size_t i)
Definition: Net.h:139
const char * Data() const
Definition: TString.h:345
static constexpr double g