Logo ROOT   6.12/07
Reference Guide
NeuralNet.cxx
Go to the documentation of this file.
1 
2 
3 #include "TMVA/NeuralNet.h"
4 
5 #include "TMVA/MethodDNN.h"
6 
7 namespace TMVA
8 {
9  namespace DNN
10  {
11 
12 
13 
14  double gaussDouble (double mean, double sigma)
15  {
16  static std::default_random_engine generator;
17  std::normal_distribution<double> distribution (mean, sigma);
18  return distribution (generator);
19  }
20 
21 
22  double uniformDouble (double minValue, double maxValue)
23  {
24  static std::default_random_engine generator;
25  std::uniform_real_distribution<double> distribution(minValue, maxValue);
26  return distribution(generator);
27  }
28 
29 
30 
31  int randomInt (int maxValue)
32  {
33  static std::default_random_engine generator;
34  std::uniform_int_distribution<int> distribution(0,maxValue-1);
35  return distribution(generator);
36  }
37 
38 
39  double studenttDouble (double distributionParameter)
40  {
41  static std::default_random_engine generator;
42  std::student_t_distribution<double> distribution (distributionParameter);
43  return distribution (generator);
44  }
45 
46 
47  LayerData::LayerData (size_t inputSize)
48  : m_hasDropOut (false)
49  , m_isInputLayer (true)
50  , m_hasWeights (false)
51  , m_hasGradients (false)
52  , m_eModeOutput (ModeOutputValues::DIRECT)
53  {
54  m_size = inputSize;
55  m_deltas.assign (m_size, 0);
56  }
57 
58 
59 
61  : m_hasDropOut (false)
62  , m_isInputLayer (true)
63  , m_hasWeights (false)
64  , m_hasGradients (false)
65  , m_eModeOutput (eModeOutput)
66  {
67  m_itInputBegin = itInputBegin;
68  m_itInputEnd = itInputEnd;
69  m_size = std::distance (itInputBegin, itInputEnd);
70  m_deltas.assign (m_size, 0);
71  }
72 
73 
74 
75 
76  LayerData::LayerData (size_t _size,
77  const_iterator_type itWeightBegin,
78  iterator_type itGradientBegin,
79  std::shared_ptr<std::function<double(double)>> _activationFunction,
80  std::shared_ptr<std::function<double(double)>> _inverseActivationFunction,
81  ModeOutputValues eModeOutput)
82  : m_size (_size)
83  , m_hasDropOut (false)
84  , m_itConstWeightBegin (itWeightBegin)
85  , m_itGradientBegin (itGradientBegin)
86  , m_activationFunction (_activationFunction)
87  , m_inverseActivationFunction (_inverseActivationFunction)
88  , m_isInputLayer (false)
89  , m_hasWeights (true)
90  , m_hasGradients (true)
91  , m_eModeOutput (eModeOutput)
92  {
93  m_values.assign (_size, 0);
94  m_deltas.assign (_size, 0);
95  m_valueGradients.assign (_size, 0);
96  }
97 
98 
99 
100 
101  LayerData::LayerData (size_t _size, const_iterator_type itWeightBegin,
102  std::shared_ptr<std::function<double(double)>> _activationFunction,
103  ModeOutputValues eModeOutput)
104  : m_size (_size)
105  , m_hasDropOut (false)
106  , m_itConstWeightBegin (itWeightBegin)
107  , m_activationFunction (_activationFunction)
109  , m_isInputLayer (false)
110  , m_hasWeights (true)
111  , m_hasGradients (false)
112  , m_eModeOutput (eModeOutput)
113  {
114  m_values.assign (_size, 0);
115  }
116 
117 
118 
120  {
121  container_type probabilitiesContainer;
123  {
124  std::transform (begin (m_values), end (m_values), std::back_inserter (probabilitiesContainer), (*Sigmoid.get ()));
125  }
127  {
128  double sum = 0;
129  probabilitiesContainer = m_values;
130  std::for_each (begin (probabilitiesContainer), end (probabilitiesContainer), [&sum](double& p){ p = std::exp (p); sum += p; });
131  if (sum != 0)
132  std::for_each (begin (probabilitiesContainer), end (probabilitiesContainer), [sum ](double& p){ p /= sum; });
133  }
134  else
135  {
136  probabilitiesContainer.assign (begin (m_values), end (m_values));
137  }
138  return probabilitiesContainer;
139  }
140 
141 
142 
143 
144 
145  Layer::Layer (size_t _numNodes, EnumFunction _activationFunction, ModeOutputValues eModeOutputValues)
146  : m_numNodes (_numNodes)
147  , m_eModeOutputValues (eModeOutputValues)
148  , m_activationFunctionType (_activationFunction)
149  {
150  for (size_t iNode = 0; iNode < _numNodes; ++iNode)
151  {
152  auto actFnc = Linear;
153  auto invActFnc = InvLinear;
154  switch (_activationFunction)
155  {
156  case EnumFunction::ZERO:
157  actFnc = ZeroFnc;
158  invActFnc = ZeroFnc;
159  break;
161  actFnc = Linear;
162  invActFnc = InvLinear;
163  break;
164  case EnumFunction::TANH:
165  actFnc = Tanh;
166  invActFnc = InvTanh;
167  break;
168  case EnumFunction::RELU:
169  actFnc = ReLU;
170  invActFnc = InvReLU;
171  break;
173  actFnc = SymmReLU;
174  invActFnc = InvSymmReLU;
175  break;
177  actFnc = TanhShift;
178  invActFnc = InvTanhShift;
179  break;
181  actFnc = SoftSign;
182  invActFnc = InvSoftSign;
183  break;
185  actFnc = Sigmoid;
186  invActFnc = InvSigmoid;
187  break;
188  case EnumFunction::GAUSS:
189  actFnc = Gauss;
190  invActFnc = InvGauss;
191  break;
193  actFnc = GaussComplement;
194  invActFnc = InvGaussComplement;
195  break;
196  }
197  m_activationFunction = actFnc;
198  m_inverseActivationFunction = invActFnc;
199  }
200  }
201 
202 
203 
204 
205 
206 
207 
208 
209 
210 
212  size_t _convergenceSteps, size_t _batchSize, size_t _testRepetitions,
213  double _factorWeightDecay, EnumRegularization eRegularization,
214  MinimizerType _eMinimizerType, double _learningRate,
215  double _momentum, int _repetitions, bool _useMultithreading)
216  : m_timer (100, name)
217  , m_minProgress (0)
218  , m_maxProgress (100)
219  , m_convergenceSteps (_convergenceSteps)
220  , m_batchSize (_batchSize)
221  , m_testRepetitions (_testRepetitions)
222  , m_factorWeightDecay (_factorWeightDecay)
223  , count_E (0)
224  , count_dE (0)
225  , count_mb_E (0)
226  , count_mb_dE (0)
227  , m_regularization (eRegularization)
228  , fLearningRate (_learningRate)
229  , fMomentum (_momentum)
230  , fRepetitions (_repetitions)
231  , fMinimizerType (_eMinimizerType)
232  , m_convergenceCount (0)
233  , m_maxConvergenceCount (0)
234  , m_minError (1e10)
235  , m_useMultithreading (_useMultithreading)
236  , fMonitoring (NULL)
237  {
238  }
239 
241  {
242  }
243 
244 
245 
246 
247 
248 
249 
250 
251 
252 
253 
254 
255 
256 
257  /** \brief action to be done when the training cycle is started (e.g. update some monitoring output)
258  *
259  */
261  {
262  if (fMonitoring)
263  {
264  create ("ROC", 100, 0, 1, 100, 0, 1);
265  create ("Significance", 100, 0, 1, 100, 0, 3);
266  create ("OutputSig", 100, 0, 1);
267  create ("OutputBkg", 100, 0, 1);
268  fMonitoring->ProcessEvents ();
269  }
270  }
271 
272  /** \brief action to be done when the training cycle is ended (e.g. update some monitoring output)
273  *
274  */
275  void ClassificationSettings::endTrainCycle (double /*error*/)
276  {
277  if (fMonitoring) fMonitoring->ProcessEvents ();
278  }
279 
280  /** \brief action to be done after the computation of a test sample (e.g. update some monitoring output)
281  *
282  */
283  void ClassificationSettings::testSample (double /*error*/, double output, double target, double weight)
284  {
285 
286  m_output.push_back (output);
287  m_targets.push_back (target);
288  m_weights.push_back (weight);
289  }
290 
291 
292  /** \brief action to be done when the test cycle is started (e.g. update some monitoring output)
293  *
294  */
296  {
297  m_output.clear ();
298  m_targets.clear ();
299  m_weights.clear ();
300  }
301 
302  /** \brief action to be done when the training cycle is ended (e.g. update some monitoring output)
303  *
304  */
306  {
307  if (m_output.empty ())
308  return;
309  double minVal = *std::min_element (begin (m_output), end (m_output));
310  double maxVal = *std::max_element (begin (m_output), end (m_output));
311  const size_t numBinsROC = 1000;
312  const size_t numBinsData = 100;
313 
314  std::vector<double> truePositives (numBinsROC+1, 0);
315  std::vector<double> falsePositives (numBinsROC+1, 0);
316  std::vector<double> trueNegatives (numBinsROC+1, 0);
317  std::vector<double> falseNegatives (numBinsROC+1, 0);
318 
319  std::vector<double> x (numBinsData, 0);
320  std::vector<double> datSig (numBinsData+1, 0);
321  std::vector<double> datBkg (numBinsData+1, 0);
322 
323  double binSizeROC = (maxVal - minVal)/(double)numBinsROC;
324  double binSizeData = (maxVal - minVal)/(double)numBinsData;
325 
326  double sumWeightsSig = 0.0;
327  double sumWeightsBkg = 0.0;
328 
329  for (size_t b = 0; b < numBinsData; ++b)
330  {
331  double binData = minVal + b*binSizeData;
332  x.at (b) = binData;
333  }
334 
335  if (fabs(binSizeROC) < 0.0001)
336  return;
337 
338  for (size_t i = 0, iEnd = m_output.size (); i < iEnd; ++i)
339  {
340  double val = m_output.at (i);
341  double truth = m_targets.at (i);
342  double weight = m_weights.at (i);
343 
344  bool isSignal = (truth > 0.5 ? true : false);
345 
346  if (m_sumOfSigWeights != 0 && m_sumOfBkgWeights != 0)
347  {
348  if (isSignal)
349  weight *= m_sumOfSigWeights;
350  else
351  weight *= m_sumOfBkgWeights;
352  }
353 
354  size_t binROC = (val-minVal)/binSizeROC;
355  size_t binData = (val-minVal)/binSizeData;
356 
357  if (isSignal)
358  {
359  for (size_t n = 0; n <= binROC; ++n)
360  {
361  truePositives.at (n) += weight;
362  }
363  for (size_t n = binROC+1; n < numBinsROC; ++n)
364  {
365  falseNegatives.at (n) += weight;
366  }
367 
368  datSig.at (binData) += weight;
369  sumWeightsSig += weight;
370  }
371  else
372  {
373  for (size_t n = 0; n <= binROC; ++n)
374  {
375  falsePositives.at (n) += weight;
376  }
377  for (size_t n = binROC+1; n < numBinsROC; ++n)
378  {
379  trueNegatives.at (n) += weight;
380  }
381 
382  datBkg.at (binData) += weight;
383  sumWeightsBkg += weight;
384  }
385  }
386 
387  std::vector<double> sigEff;
388  std::vector<double> backRej;
389 
390  double bestSignificance = 0;
391  double bestCutSignificance = 0;
392 
393  double numEventsScaleFactor = 1.0;
394  if (m_scaleToNumEvents > 0)
395  {
396  size_t numEvents = m_output.size ();
397  numEventsScaleFactor = double (m_scaleToNumEvents)/double (numEvents);
398  }
399 
400  clear ("ROC");
401  clear ("Significance");
402 
403  for (size_t i = 0; i < numBinsROC; ++i)
404  {
405  double tp = truePositives.at (i) * numEventsScaleFactor;
406  double fp = falsePositives.at (i) * numEventsScaleFactor;
407  double tn = trueNegatives.at (i) * numEventsScaleFactor;
408  double fn = falseNegatives.at (i) * numEventsScaleFactor;
409 
410  double seff = (tp+fn == 0.0 ? 1.0 : (tp / (tp+fn)));
411  double brej = (tn+fp == 0.0 ? 0.0 : (tn / (tn+fp)));
412 
413  sigEff.push_back (seff);
414  backRej.push_back (brej);
415 
416  // m_histROC->Fill (seff, brej);
417  addPoint ("ROC", seff, brej); // x, y
418 
419 
420  double currentCut = (i * binSizeROC)+minVal;
421 
422  double sig = tp;
423  double bkg = fp;
424  double significance = sig / sqrt (sig + bkg);
425  if (significance > bestSignificance)
426  {
427  bestSignificance = significance;
428  bestCutSignificance = currentCut;
429  }
430 
431  addPoint ("Significance", currentCut, significance);
432  // m_histSignificance->Fill (currentCut, significance);
433  }
434 
435  m_significances.push_back (bestSignificance);
436  static size_t testCycle = 0;
437 
438  clear ("OutputSig");
439  clear ("OutputBkg");
440  for (size_t i = 0; i < numBinsData; ++i)
441  {
442  addPoint ("OutputSig", x.at (i), datSig.at (i)/sumWeightsSig);
443  addPoint ("OutputBkg", x.at (i), datBkg.at (i)/sumWeightsBkg);
444  // m_histOutputSignal->Fill (x.at (i), datSig.at (1)/sumWeightsSig);
445  // m_histOutputBackground->Fill (x.at (i), datBkg.at (1)/sumWeightsBkg);
446  }
447 
448 
449  ++testCycle;
450 
451  if (fMonitoring)
452  {
453  plot ("ROC", "", 2, kRed);
454  plot ("Significance", "", 3, kRed);
455  plot ("OutputSig", "", 4, kRed);
456  plot ("OutputBkg", "same", 4, kBlue);
457  fMonitoring->ProcessEvents ();
458  }
459 
460  m_cutValue = bestCutSignificance;
461  }
462 
463 
464  /** \brief check for convergence
465  *
466  */
467  bool Settings::hasConverged (double testError)
468  {
469  // std::cout << "check convergence; minError " << m_minError << " current " << testError
470  // << " current convergence count " << m_convergenceCount << std::endl;
471  if (testError < m_minError*0.999)
472  {
473  m_convergenceCount = 0;
474  m_minError = testError;
475  }
476  else
477  {
480  }
481 
482 
483  if (m_convergenceCount >= convergenceSteps () || testError <= 0)
484  return true;
485 
486  return false;
487  }
488 
489 
490 
491  /** \brief set the weight sums to be scaled to (preparations for monitoring output)
492  *
493  */
494  void ClassificationSettings::setWeightSums (double sumOfSigWeights, double sumOfBkgWeights)
495  {
496  m_sumOfSigWeights = sumOfSigWeights; m_sumOfBkgWeights = sumOfBkgWeights;
497  }
498 
499  /** \brief preparation for monitoring output
500  *
501  */
503  std::string _fileNameNetConfig,
504  std::string _fileNameResult,
505  std::vector<Pattern>* _resultPatternContainer)
506  {
507  m_pResultPatternContainer = _resultPatternContainer;
508  m_fileNameResult = _fileNameResult;
509  m_fileNameNetConfig = _fileNameNetConfig;
510  }
511 
512 
513 
514 
515 
516 
517 
518 
519  /** \brief compute the number of weights given the size of the input layer
520  *
521  */
522  size_t Net::numWeights (size_t trainingStartLayer) const
523  {
524  size_t num (0);
525  size_t index (0);
526  size_t prevNodes (inputSize ());
527  for (auto& layer : m_layers)
528  {
529  if (index >= trainingStartLayer)
530  num += layer.numWeights (prevNodes);
531  prevNodes = layer.numNodes ();
532  ++index;
533  }
534  return num;
535  }
536 
537 
538  size_t Net::numNodes (size_t trainingStartLayer) const
539  {
540  size_t num (0);
541  size_t index (0);
542  for (auto& layer : m_layers)
543  {
544  if (index >= trainingStartLayer)
545  num += layer.numNodes ();
546  ++index;
547  }
548  return num;
549  }
550 
551  /** \brief prepare the drop-out container given the provided drop-fractions
552  *
553  */
554  void Net::fillDropContainer (DropContainer& dropContainer, double dropFraction, size_t _numNodes) const
555  {
556  size_t numDrops = dropFraction * _numNodes;
557  if (numDrops >= _numNodes) // maintain at least one node
558  numDrops = _numNodes - 1;
559  // add the markers for the nodes which are enabled
560  dropContainer.insert (end (dropContainer), _numNodes-numDrops, true);
561  // add the markers for the disabled nodes
562  dropContainer.insert (end (dropContainer), numDrops, false);
563  // shuffle enabled and disabled markers
564  std::shuffle(end(dropContainer)-_numNodes, end(dropContainer), std::default_random_engine{});
565  }
566 
567  }; // namespace DNN
568 }; // namespace TMVA
569 
void addPoint(std::string histoName, double x)
for monitoring
Definition: NeuralNet.h:828
void setWeightSums(double sumOfSigWeights, double sumOfBkgWeights)
set the weight sums to be scaled to (preparations for monitoring output)
Definition: NeuralNet.cxx:494
static std::shared_ptr< std::function< double(double)> > InvGauss
Definition: NeuralNet.icc:78
static long int sum(long int i)
Definition: Factory.cxx:2173
MinimizerType
< list all the minimizer types
Definition: NeuralNet.h:321
static std::shared_ptr< std::function< double(double)> > Tanh
Definition: NeuralNet.icc:56
std::vector< char > DropContainer
Definition: NeuralNet.h:220
static std::shared_ptr< std::function< double(double)> > InvReLU
Definition: NeuralNet.icc:66
std::shared_ptr< std::function< double(double)> > m_inverseActivationFunction
stores the inverse activation function
Definition: NeuralNet.h:704
Definition: Rtypes.h:59
bool isFlagSet(T flag, T value)
Definition: NeuralNet.h:213
static std::shared_ptr< std::function< double(double)> > InvTanh
Definition: NeuralNet.icc:57
size_t convergenceSteps() const
how many steps until training is deemed to have converged
Definition: NeuralNet.h:773
std::shared_ptr< Monitoring > fMonitoring
Definition: NeuralNet.h:872
Basic string class.
Definition: TString.h:125
void plot(std::string histoName, std::string options, int pad, EColor color)
for monitoring
Definition: NeuralNet.h:830
iterator_type m_itGradientBegin
iterator to the first gradient of this layer in the gradient vector
Definition: NeuralNet.h:653
bool m_hasGradients
does this layer have gradients (only if in training mode)
Definition: NeuralNet.h:660
ModeOutputValues m_eModeOutput
stores the output mode (DIRECT, SIGMOID, SOFTMAX)
Definition: NeuralNet.h:662
static std::shared_ptr< std::function< double(double)> > InvSoftSign
Definition: NeuralNet.icc:75
bool m_isInputLayer
is this layer an input layer
Definition: NeuralNet.h:658
static std::shared_ptr< std::function< double(double)> > TanhShift
Definition: NeuralNet.icc:71
Settings(TString name, size_t _convergenceSteps=15, size_t _batchSize=10, size_t _testRepetitions=7, double _factorWeightDecay=1e-5, TMVA::DNN::EnumRegularization _regularization=TMVA::DNN::EnumRegularization::NONE, MinimizerType _eMinimizerType=MinimizerType::fSteepest, double _learningRate=1e-5, double _momentum=0.3, int _repetitions=3, bool _multithreading=true)
c&#39;tor
Definition: NeuralNet.cxx:211
std::vector< double > m_valueGradients
stores the gradients of the values (nodes)
Definition: NeuralNet.h:647
static std::shared_ptr< std::function< double(double)> > Sigmoid
Definition: NeuralNet.icc:53
double sqrt(double)
container_type::const_iterator const_iterator_type
Definition: NeuralNet.h:443
Double_t x[n]
Definition: legend1.C:17
const_iterator_type m_itConstWeightBegin
const iterator to the first weight of this layer in the weight vector
Definition: NeuralNet.h:652
const_iterator_type m_itInputBegin
iterator to the first of the nodes in the input node vector
Definition: NeuralNet.h:643
bool m_hasDropOut
dropOut is turned on?
Definition: NeuralNet.h:650
static std::shared_ptr< std::function< double(double)> > SymmReLU
Definition: NeuralNet.icc:62
void function(const Char_t *name_, T fun, const Char_t *docstring=0)
Definition: RExports.h:146
void startTrainCycle()
action to be done when the training cycle is started (e.g.
Definition: NeuralNet.cxx:260
const Double_t sigma
void create(std::string histoName, int bins, double min, double max)
for monitoring
Definition: NeuralNet.h:826
double studenttDouble(double distributionParameter)
Definition: NeuralNet.cxx:39
LayerData(const_iterator_type itInputBegin, const_iterator_type itInputEnd, ModeOutputValues eModeOutput=ModeOutputValues::DIRECT)
c&#39;tor of LayerData
Definition: NeuralNet.cxx:60
virtual ~Settings()
d&#39;tor
Definition: NeuralNet.cxx:240
std::vector< double > m_deltas
stores the deltas for the DNN training
Definition: NeuralNet.h:646
VecExpr< UnaryOp< Fabs< T >, VecExpr< A, T, D >, T >, T, D > fabs(const VecExpr< A, T, D > &rhs)
Layer(size_t numNodes, EnumFunction activationFunction, ModeOutputValues eModeOutputValues=ModeOutputValues::DIRECT)
c&#39;tor for defining a Layer
Definition: NeuralNet.cxx:145
container_type::iterator iterator_type
Definition: NeuralNet.h:442
container_type computeProbabilities() const
compute the probabilities from the node values
Definition: NeuralNet.cxx:119
size_t m_convergenceCount
Definition: NeuralNet.h:864
std::vector< double > container_type
Definition: NeuralNet.h:440
void endTrainCycle(double)
action to be done when the training cycle is ended (e.g.
Definition: NeuralNet.cxx:275
void fillDropContainer(DropContainer &dropContainer, double dropFraction, size_t numNodes) const
prepare the drop-out-container (select the nodes which are to be dropped out)
Definition: NeuralNet.cxx:554
virtual void endTestCycle()
action to be done when the training cycle is ended (e.g.
Definition: NeuralNet.cxx:305
static std::shared_ptr< std::function< double(double)> > SoftSign
Definition: NeuralNet.icc:74
static std::shared_ptr< std::function< double(double)> > ReLU
Definition: NeuralNet.icc:65
static std::shared_ptr< std::function< double(double)> > InvSigmoid
Definition: NeuralNet.icc:54
size_t numWeights(size_t trainingStartLayer=0) const
returns the number of weights in this net
Definition: NeuralNet.cxx:522
bool m_hasWeights
does this layer have weights (it does not if it is the input layer)
Definition: NeuralNet.h:659
static std::shared_ptr< std::function< double(double)> > GaussComplement
Definition: NeuralNet.icc:80
ModeOutputValues
Definition: NeuralNet.h:179
double gaussDouble(double mean, double sigma)
Definition: NeuralNet.cxx:14
static std::shared_ptr< std::function< double(double)> > Gauss
Definition: NeuralNet.icc:77
size_t numNodes(size_t trainingStartLayer=0) const
returns the number of nodes in this net
Definition: NeuralNet.cxx:538
double uniformDouble(double minValue, double maxValue)
Definition: NeuralNet.cxx:22
std::vector< double > m_values
stores the values of the nodes in this layer
Definition: NeuralNet.h:648
void setResultComputation(std::string _fileNameNetConfig, std::string _fileNameResult, std::vector< Pattern > *_resultPatternContainer)
preparation for monitoring output
Definition: NeuralNet.cxx:502
size_t m_maxConvergenceCount
Definition: NeuralNet.h:865
static std::shared_ptr< std::function< double(double)> > Linear
Definition: NeuralNet.icc:59
Abstract ClassifierFactory template that handles arbitrary types.
static std::shared_ptr< std::function< double(double)> > InvGaussComplement
Definition: NeuralNet.icc:81
const_iterator_type m_itInputEnd
iterator to the end of the nodes in the input node vector
Definition: NeuralNet.h:644
std::shared_ptr< std::function< double(double)> > m_activationFunction
activation function for this layer
Definition: NeuralNet.h:655
static std::shared_ptr< std::function< double(double)> > InvLinear
Definition: NeuralNet.icc:60
std::shared_ptr< std::function< double(double)> > m_inverseActivationFunction
inverse activation function for this layer
Definition: NeuralNet.h:656
void testSample(double error, double output, double target, double weight)
action to be done after the computation of a test sample (e.g.
Definition: NeuralNet.cxx:283
virtual void startTestCycle()
action to be done when the test cycle is started (e.g.
Definition: NeuralNet.cxx:295
you should not use this method at all Int_t Int_t Double_t Double_t Double_t Int_t Double_t Double_t Double_t Double_t b
Definition: TRolke.cxx:630
static std::shared_ptr< std::function< double(double)> > InvTanhShift
Definition: NeuralNet.icc:72
Definition: Rtypes.h:59
void clear(std::string histoName)
for monitoring
Definition: NeuralNet.h:831
double exp(double)
EnumRegularization
Definition: NeuralNet.h:173
const Int_t n
Definition: legend1.C:16
static std::shared_ptr< std::function< double(double)> > ZeroFnc
Definition: NeuralNet.icc:50
virtual bool hasConverged(double testError)
has this training converged already?
Definition: NeuralNet.cxx:467
char name[80]
Definition: TGX11.cxx:109
int randomInt(int maxValue)
Definition: NeuralNet.cxx:31
std::shared_ptr< std::function< double(double)> > m_activationFunction
stores the activation function
Definition: NeuralNet.h:703
static std::shared_ptr< std::function< double(double)> > InvSymmReLU
Definition: NeuralNet.icc:63