ROOT  6.06/09
Reference Guide
MethodANNBase.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Andreas Hoecker, Peter Speckmayer, Matt Jachowski, Jan Therhaag, Jiahang Zhong
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6  * Package: TMVA *
7  * Class : MethodANNBase *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Description: *
11  * Artificial neural network base class for the discrimination of signal *
12  * from background. *
13  * *
14  * Authors (alphabetical): *
15  * Krzysztof Danielowski <danielow@cern.ch> - IFJ & AGH, Poland *
16  * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
17  * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA *
18  * Kamil Kraszewski <kalq@cern.ch> - IFJ & UJ, Poland *
19  * Maciej Kruk <mkruk@cern.ch> - IFJ & AGH, Poland *
20  * Peter Speckmayer <peter.speckmayer@cern.ch> - CERN, Switzerland *
21  * Joerg Stelzer <stelzer@cern.ch> - DESY, Germany *
22  * Jan Therhaag <Jan.Therhaag@cern.ch> - U of Bonn, Germany *
23  * Jiahang Zhong <Jiahang.Zhong@cern.ch> - Academia Sinica, Taipei *
24  * *
25  * Copyright (c) 2005-2011: *
26  * CERN, Switzerland *
27  * U. of Bonn, Germany *
28  * *
29  * Redistribution and use in source and binary forms, with or without *
30  * modification, are permitted according to the terms listed in LICENSE *
31  * (http://tmva.sourceforge.net/LICENSE) *
32  **********************************************************************************/
33 
34 //_______________________________________________________________________
35 //
36 // Base class for all TMVA methods using artificial neural networks
37 //
38 //_______________________________________________________________________
39 
40 #include <vector>
41 #include <cstdlib>
42 #include <stdexcept>
43 #if __cplusplus > 199711L
44 #include <atomic>
45 #endif
46 
47 #include "TString.h"
48 #include "TTree.h"
49 #include "TDirectory.h"
50 #include "Riostream.h"
51 #include "TRandom3.h"
52 #include "TH2F.h"
53 #include "TH1.h"
54 #include "TMath.h"
55 
56 #include "TMVA/MethodBase.h"
57 #include "TMVA/MethodANNBase.h"
58 #include "TMVA/TNeuron.h"
59 #include "TMVA/TSynapse.h"
61 #include "TMVA/TActivationTanh.h"
62 #include "TMVA/Types.h"
63 #include "TMVA/Tools.h"
65 #include "TMVA/Ranking.h"
66 #include "TMVA/Version.h"
67 
68 using std::vector;
69 
71 
72 ////////////////////////////////////////////////////////////////////////////////
73 /// standard constructor
74 /// Note: Right now it is an option to choose the neuron input function,
75 /// but only the input function "sum" leads to weight convergence --
76 /// otherwise the weights go to nan and lead to an ABORT.
77 
78 TMVA::MethodANNBase::MethodANNBase( const TString& jobName,
79  Types::EMVA methodType,
80  const TString& methodTitle,
81  DataSetInfo& theData,
82  const TString& theOption,
83  TDirectory* theTargetDir )
84  : TMVA::MethodBase( jobName, methodType, methodTitle, theData, theOption, theTargetDir )
85  , fEstimator(kMSE)
86  , fUseRegulator(kFALSE)
87  , fRandomSeed(0)
88 {
89  InitANNBase();
90 
91  DeclareOptions();
92 }
93 
94 ////////////////////////////////////////////////////////////////////////////////
95 /// construct the Method from the weight file
96 
98  DataSetInfo& theData,
99  const TString& theWeightFile,
100  TDirectory* theTargetDir )
101  : TMVA::MethodBase( methodType, theData, theWeightFile, theTargetDir )
102  , fEstimator(kMSE)
103  , fUseRegulator(kFALSE)
104  , fRandomSeed(0)
105 {
106  InitANNBase();
107 
108  DeclareOptions();
109 }
110 
111 ////////////////////////////////////////////////////////////////////////////////
112 /// define the options (their key words) that can be set in the option string
113 /// here the options valid for ALL MVA methods are declared.
114 /// know options: NCycles=xx :the number of training cycles
115 /// Normalize=kTRUE,kFALSe :if normalised in put variables should be used
116 /// HiddenLayser="N-1,N-2" :the specification of the hidden layers
117 /// NeuronType=sigmoid,tanh,radial,linar : the type of activation function
118 /// used at the neuronn
119 ///
120 
122 {
123  DeclareOptionRef( fNcycles = 500, "NCycles", "Number of training cycles" );
124  DeclareOptionRef( fLayerSpec = "N,N-1", "HiddenLayers", "Specification of hidden layer architecture" );
125  DeclareOptionRef( fNeuronType = "sigmoid", "NeuronType", "Neuron activation function type" );
126  DeclareOptionRef( fRandomSeed = 1, "RandomSeed", "Random seed for initial synapse weights (0 means unique seed for each run; default value '1')");
127 
128  DeclareOptionRef(fEstimatorS="MSE", "EstimatorType",
129  "MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood" ); //zjh
130  AddPreDefVal(TString("MSE")); //zjh
131  AddPreDefVal(TString("CE")); //zjh
132 
133 
134  TActivationChooser aChooser;
135  std::vector<TString>* names = aChooser.GetAllActivationNames();
136  Int_t nTypes = names->size();
137  for (Int_t i = 0; i < nTypes; i++)
138  AddPreDefVal(names->at(i));
139  delete names;
140 
141  DeclareOptionRef(fNeuronInputType="sum", "NeuronInputType","Neuron input function type");
142  TNeuronInputChooser iChooser;
143  names = iChooser.GetAllNeuronInputNames();
144  nTypes = names->size();
145  for (Int_t i = 0; i < nTypes; i++) AddPreDefVal(names->at(i));
146  delete names;
147 }
148 
149 
150 ////////////////////////////////////////////////////////////////////////////////
151 /// do nothing specific at this moment
152 
154 {
155  if ( DoRegression() || DoMulticlass()) fEstimatorS = "MSE"; //zjh
156  else fEstimatorS = "CE" ; //hhv
157  if (fEstimatorS == "MSE" ) fEstimator = kMSE;
158  else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
159  std::vector<Int_t>* layout = ParseLayoutString(fLayerSpec);
160  BuildNetwork(layout);
161  delete layout;
162 }
163 
164 ////////////////////////////////////////////////////////////////////////////////
165 /// parse layout specification string and return a vector, each entry
166 /// containing the number of neurons to go in each successive layer
167 
168 std::vector<Int_t>* TMVA::MethodANNBase::ParseLayoutString(TString layerSpec)
169 {
170  std::vector<Int_t>* layout = new std::vector<Int_t>();
171  layout->push_back((Int_t)GetNvar());
172  while(layerSpec.Length()>0) {
173  TString sToAdd="";
174  if (layerSpec.First(',')<0) {
175  sToAdd = layerSpec;
176  layerSpec = "";
177  }
178  else {
179  sToAdd = layerSpec(0,layerSpec.First(','));
180  layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
181  }
182  int nNodes = 0;
183  if (sToAdd.BeginsWith("n") || sToAdd.BeginsWith("N")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
184  nNodes += atoi(sToAdd);
185  layout->push_back(nNodes);
186  }
187  if( DoRegression() )
188  layout->push_back( DataInfo().GetNTargets() ); // one output node for each target
189  else if( DoMulticlass() )
190  layout->push_back( DataInfo().GetNClasses() ); // one output node for each class
191  else
192  layout->push_back(1); // one output node (for signal/background classification)
193 
194  int n = 0;
195  for( std::vector<Int_t>::iterator it = layout->begin(); it != layout->end(); it++ ){
196  n++;
197  }
198 
199  return layout;
200 }
201 
202 ////////////////////////////////////////////////////////////////////////////////
203 /// initialize ANNBase object
204 
206 {
207  fNetwork = NULL;
208  frgen = NULL;
209  fActivation = NULL;
210  fOutput = NULL; //zjh
211  fIdentity = NULL;
212  fInputCalculator = NULL;
213  fSynapses = NULL;
214  fEstimatorHistTrain = NULL;
215  fEstimatorHistTest = NULL;
216 
217  // reset monitorign histogram vectors
218  fEpochMonHistS.clear();
219  fEpochMonHistB.clear();
220  fEpochMonHistW.clear();
221 
222  // these will be set in BuildNetwork()
223  fInputLayer = NULL;
224  fOutputNeurons.clear();
225 
226  frgen = new TRandom3(fRandomSeed);
227 
228  fSynapses = new TObjArray();
229 }
230 
231 ////////////////////////////////////////////////////////////////////////////////
232 /// destructor
233 
235 {
236  DeleteNetwork();
237 }
238 
239 ////////////////////////////////////////////////////////////////////////////////
240 /// delete/clear network
241 
243 {
244  if (fNetwork != NULL) {
245  TObjArray *layer;
246  Int_t numLayers = fNetwork->GetEntriesFast();
247  for (Int_t i = 0; i < numLayers; i++) {
248  layer = (TObjArray*)fNetwork->At(i);
249  DeleteNetworkLayer(layer);
250  }
251  delete fNetwork;
252  }
253 
254  if (frgen != NULL) delete frgen;
255  if (fActivation != NULL) delete fActivation;
256  if (fOutput != NULL) delete fOutput; //zjh
257  if (fIdentity != NULL) delete fIdentity;
258  if (fInputCalculator != NULL) delete fInputCalculator;
259  if (fSynapses != NULL) delete fSynapses;
260 
261  fNetwork = NULL;
262  frgen = NULL;
263  fActivation = NULL;
264  fOutput = NULL; //zjh
265  fIdentity = NULL;
266  fInputCalculator = NULL;
267  fSynapses = NULL;
268 }
269 
270 ////////////////////////////////////////////////////////////////////////////////
271 /// delete a network layer
272 
274 {
275  TNeuron* neuron;
276  Int_t numNeurons = layer->GetEntriesFast();
277  for (Int_t i = 0; i < numNeurons; i++) {
278  neuron = (TNeuron*)layer->At(i);
279  neuron->DeletePreLinks();
280  delete neuron;
281  }
282  delete layer;
283 }
284 
285 ////////////////////////////////////////////////////////////////////////////////
286 /// build network given a layout (number of neurons in each layer)
287 /// and optional weights array
288 
289 void TMVA::MethodANNBase::BuildNetwork( std::vector<Int_t>* layout, std::vector<Double_t>* weights, Bool_t fromFile )
290 {
291  if (fEstimatorS == "MSE") fEstimator = kMSE; //zjh
292  else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
293  else Log()<<kWARNING<<"fEstimator="<<fEstimator<<"\tfEstimatorS="<<fEstimatorS<<Endl;
294  if (fEstimator!=kMSE && fEstimator!=kCE) Log()<<kWARNING<<"Estimator type unspecified \t"<<Endl; //zjh
295 
296  Log() << kINFO << "Building Network" << Endl;
297 
298  DeleteNetwork();
299  InitANNBase();
300 
301  // set activation and input functions
302  TActivationChooser aChooser;
303  fActivation = aChooser.CreateActivation(fNeuronType);
304  fIdentity = aChooser.CreateActivation("linear");
305  if (fEstimator==kMSE) fOutput = aChooser.CreateActivation("linear"); //zjh
306  else if (fEstimator==kCE) fOutput = aChooser.CreateActivation("sigmoid"); //zjh
307  TNeuronInputChooser iChooser;
308  fInputCalculator = iChooser.CreateNeuronInput(fNeuronInputType);
309 
310  fNetwork = new TObjArray();
311  fRegulatorIdx.clear(); //zjh
312  fRegulators.clear(); //zjh
313  BuildLayers( layout, fromFile );
314 
315  // cache input layer and output neuron for fast access
316  fInputLayer = (TObjArray*)fNetwork->At(0);
317  TObjArray* outputLayer = (TObjArray*)fNetwork->At(fNetwork->GetEntriesFast()-1);
318  fOutputNeurons.clear();
319  for (Int_t i = 0; i < outputLayer->GetEntries(); i++) {
320  fOutputNeurons.push_back( (TNeuron*)outputLayer->At(i) );
321  }
322 
323  if (weights == NULL) InitWeights();
324  else ForceWeights(weights);
325 }
326 
327 
328 
329 
330 ////////////////////////////////////////////////////////////////////////////////
331 /// build the network layers
332 
333 void TMVA::MethodANNBase::BuildLayers( std::vector<Int_t>* layout, Bool_t fromFile )
334 {
335  TObjArray* curLayer;
336  TObjArray* prevLayer = NULL;
337 
338  Int_t numLayers = layout->size();
339 
340  for (Int_t i = 0; i < numLayers; i++) {
341  curLayer = new TObjArray();
342  BuildLayer(layout->at(i), curLayer, prevLayer, i, numLayers, fromFile);
343  prevLayer = curLayer;
344  fNetwork->Add(curLayer);
345  }
346 
347  // cache pointers to synapses for fast access, the order matters
348  for (Int_t i = 0; i < numLayers; i++) {
349  TObjArray* layer = (TObjArray*)fNetwork->At(i);
350  Int_t numNeurons = layer->GetEntriesFast();
351  if (i!=0 && i!=numLayers-1) fRegulators.push_back(0.); //zjh
352  for (Int_t j = 0; j < numNeurons; j++) {
353  if (i==0) fRegulators.push_back(0.); //zjh
354  TNeuron* neuron = (TNeuron*)layer->At(j);
355  Int_t numSynapses = neuron->NumPostLinks();
356  for (Int_t k = 0; k < numSynapses; k++) {
357  TSynapse* synapse = neuron->PostLinkAt(k);
358  fSynapses->Add(synapse);
359  fRegulatorIdx.push_back(fRegulators.size()-1); //zjh
360  }
361  }
362  }
363 }
364 
365 ////////////////////////////////////////////////////////////////////////////////
366 /// build a single layer with neurons and synapses connecting this
367 /// layer to the previous layer
368 
369 void TMVA::MethodANNBase::BuildLayer( Int_t numNeurons, TObjArray* curLayer,
370  TObjArray* prevLayer, Int_t layerIndex,
371  Int_t numLayers, Bool_t fromFile )
372 {
373  TNeuron* neuron;
374  for (Int_t j = 0; j < numNeurons; j++) {
375  if (fromFile && (layerIndex != numLayers-1) && (j==numNeurons-1)){
376  neuron = new TNeuron();
377  neuron->SetActivationEqn(fIdentity);
378  neuron->SetBiasNeuron();
379  neuron->ForceValue(1.0);
380  curLayer->Add(neuron);
381  }
382  else {
383  neuron = new TNeuron();
384  neuron->SetInputCalculator(fInputCalculator);
385 
386  // input layer
387  if (layerIndex == 0) {
388  neuron->SetActivationEqn(fIdentity);
389  neuron->SetInputNeuron();
390  }
391  else {
392  // output layer
393  if (layerIndex == numLayers-1) {
394  neuron->SetOutputNeuron();
395  neuron->SetActivationEqn(fOutput); //zjh
396  }
397  // hidden layers
398  else neuron->SetActivationEqn(fActivation);
399  AddPreLinks(neuron, prevLayer);
400  }
401 
402  curLayer->Add(neuron);
403  }
404  }
405 
406  // add bias neutron (except to output layer)
407  if(!fromFile){
408  if (layerIndex != numLayers-1) {
409  neuron = new TNeuron();
410  neuron->SetActivationEqn(fIdentity);
411  neuron->SetBiasNeuron();
412  neuron->ForceValue(1.0);
413  curLayer->Add(neuron);
414  }
415  }
416 }
417 
418 ////////////////////////////////////////////////////////////////////////////////
419 /// add synapses connecting a neuron to its preceding layer
420 
422 {
423  TSynapse* synapse;
424  int numNeurons = prevLayer->GetEntriesFast();
425  TNeuron* preNeuron;
426 
427  for (Int_t i = 0; i < numNeurons; i++) {
428  preNeuron = (TNeuron*)prevLayer->At(i);
429  synapse = new TSynapse();
430  synapse->SetPreNeuron(preNeuron);
431  synapse->SetPostNeuron(neuron);
432  preNeuron->AddPostLink(synapse);
433  neuron->AddPreLink(synapse);
434  }
435 }
436 
437 ////////////////////////////////////////////////////////////////////////////////
438 /// initialize the synapse weights randomly
439 
441 {
442  PrintMessage("Initializing weights");
443 
444  // init synapse weights
445  Int_t numSynapses = fSynapses->GetEntriesFast();
446  TSynapse* synapse;
447  for (Int_t i = 0; i < numSynapses; i++) {
448  synapse = (TSynapse*)fSynapses->At(i);
449  synapse->SetWeight(4.0*frgen->Rndm() - 2.0);
450  }
451 }
452 
453 ////////////////////////////////////////////////////////////////////////////////
454 /// force the synapse weights
455 
456 void TMVA::MethodANNBase::ForceWeights(std::vector<Double_t>* weights)
457 {
458  PrintMessage("Forcing weights");
459 
460  Int_t numSynapses = fSynapses->GetEntriesFast();
461  TSynapse* synapse;
462  for (Int_t i = 0; i < numSynapses; i++) {
463  synapse = (TSynapse*)fSynapses->At(i);
464  synapse->SetWeight(weights->at(i));
465  }
466 }
467 
468 ////////////////////////////////////////////////////////////////////////////////
469 /// force the input values of the input neurons
470 /// force the value for each input neuron
471 
473 {
474  Double_t x;
475  TNeuron* neuron;
476 
477  // const Event* ev = GetEvent();
478  for (UInt_t j = 0; j < GetNvar(); j++) {
479 
480  x = (j != (UInt_t)ignoreIndex)?ev->GetValue(j):0;
481 
482  neuron = GetInputNeuron(j);
483  neuron->ForceValue(x);
484  }
485 }
486 
487 ////////////////////////////////////////////////////////////////////////////////
488 /// calculate input values to each neuron
489 
491 {
492  TObjArray* curLayer;
493  TNeuron* neuron;
494  Int_t numLayers = fNetwork->GetEntriesFast();
495  Int_t numNeurons;
496 
497  for (Int_t i = 0; i < numLayers; i++) {
498  curLayer = (TObjArray*)fNetwork->At(i);
499  numNeurons = curLayer->GetEntriesFast();
500 
501  for (Int_t j = 0; j < numNeurons; j++) {
502  neuron = (TNeuron*) curLayer->At(j);
503  neuron->CalculateValue();
504  neuron->CalculateActivationValue();
505 
506  }
507  }
508 }
509 
510 ////////////////////////////////////////////////////////////////////////////////
511 /// print messages, turn off printing by setting verbose and debug flag appropriately
512 
514 {
515  if (Verbose() || Debug() || force) Log() << kINFO << message << Endl;
516 }
517 
518 ////////////////////////////////////////////////////////////////////////////////
519 /// wait for keyboard input, for debugging
520 
522 {
523  std::string dummy;
524  Log() << kINFO << "***Type anything to continue (q to quit): ";
525  std::getline(std::cin, dummy);
526  if (dummy == "q" || dummy == "Q") {
527  PrintMessage( "quit" );
528  delete this;
529  exit(0);
530  }
531 }
532 
533 ////////////////////////////////////////////////////////////////////////////////
534 /// print network representation, for debugging
535 
537 {
538  if (!Debug()) return;
539 
540  Log() << kINFO << Endl;
541  PrintMessage( "Printing network " );
542  Log() << kINFO << "-------------------------------------------------------------------" << Endl;
543 
544  TObjArray* curLayer;
545  Int_t numLayers = fNetwork->GetEntriesFast();
546 
547  for (Int_t i = 0; i < numLayers; i++) {
548 
549  curLayer = (TObjArray*)fNetwork->At(i);
550  Int_t numNeurons = curLayer->GetEntriesFast();
551 
552  Log() << kINFO << "Layer #" << i << " (" << numNeurons << " neurons):" << Endl;
553  PrintLayer( curLayer );
554  }
555 }
556 
557 ////////////////////////////////////////////////////////////////////////////////
558 /// print a single layer, for debugging
559 
561 {
562  Int_t numNeurons = layer->GetEntriesFast();
563  TNeuron* neuron;
564 
565  for (Int_t j = 0; j < numNeurons; j++) {
566  neuron = (TNeuron*) layer->At(j);
567  Log() << kINFO << "\tNeuron #" << j << " (LinksIn: " << neuron->NumPreLinks()
568  << " , LinksOut: " << neuron->NumPostLinks() << ")" << Endl;
569  PrintNeuron( neuron );
570  }
571 }
572 
573 ////////////////////////////////////////////////////////////////////////////////
574 /// print a neuron, for debugging
575 
577 {
578  Log() << kINFO
579  << "\t\tValue:\t" << neuron->GetValue()
580  << "\t\tActivation: " << neuron->GetActivationValue()
581  << "\t\tDelta: " << neuron->GetDelta() << Endl;
582  Log() << kINFO << "\t\tActivationEquation:\t";
583  neuron->PrintActivationEqn();
584  Log() << kINFO << "\t\tLinksIn:" << Endl;
585  neuron->PrintPreLinks();
586  Log() << kINFO << "\t\tLinksOut:" << Endl;
587  neuron->PrintPostLinks();
588 }
589 
590 ////////////////////////////////////////////////////////////////////////////////
591 /// get the mva value generated by the NN
592 
594 {
595  TNeuron* neuron;
596 
597  TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
598 
599  const Event * ev = GetEvent();
600 
601  for (UInt_t i = 0; i < GetNvar(); i++) {
602  neuron = (TNeuron*)inputLayer->At(i);
603  neuron->ForceValue( ev->GetValue(i) );
604  }
605  ForceNetworkCalculations();
606 
607  // check the output of the network
608  TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
609  neuron = (TNeuron*)outputLayer->At(0);
610 
611  // cannot determine error
612  NoErrorCalc(err, errUpper);
613 
614  return neuron->GetActivationValue();
615 }
616 
617 ////////////////////////////////////////////////////////////////////////////////
618 /// get the regression value generated by the NN
619 
620 const std::vector<Float_t> &TMVA::MethodANNBase::GetRegressionValues()
621 {
622  TNeuron* neuron;
623 
624  TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
625 
626  const Event * ev = GetEvent();
627 
628  for (UInt_t i = 0; i < GetNvar(); i++) {
629  neuron = (TNeuron*)inputLayer->At(i);
630  neuron->ForceValue( ev->GetValue(i) );
631  }
632  ForceNetworkCalculations();
633 
634  // check the output of the network
635  TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
636 
637  if (fRegressionReturnVal == NULL) fRegressionReturnVal = new std::vector<Float_t>();
638  fRegressionReturnVal->clear();
639 
640  Event * evT = new Event(*ev);
641  UInt_t ntgts = outputLayer->GetEntriesFast();
642  for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
643  evT->SetTarget(itgt,((TNeuron*)outputLayer->At(itgt))->GetActivationValue());
644  }
645 
646  const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
647  for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
648  fRegressionReturnVal->push_back( evT2->GetTarget(itgt) );
649  }
650 
651  delete evT;
652 
653  return *fRegressionReturnVal;
654 }
655 
656 
657 
658 
659 
660 
661 
662 
663 
664 ////////////////////////////////////////////////////////////////////////////////
665 /// get the multiclass classification values generated by the NN
666 
667 const std::vector<Float_t> &TMVA::MethodANNBase::GetMulticlassValues()
668 {
669  TNeuron* neuron;
670 
671  TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
672 
673  const Event * ev = GetEvent();
674 
675  for (UInt_t i = 0; i < GetNvar(); i++) {
676  neuron = (TNeuron*)inputLayer->At(i);
677  neuron->ForceValue( ev->GetValue(i) );
678  }
679  ForceNetworkCalculations();
680 
681  // check the output of the network
682 
683  if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector<Float_t>();
684  fMulticlassReturnVal->clear();
685  std::vector<Float_t> temp;
686 
687  UInt_t nClasses = DataInfo().GetNClasses();
688  for (UInt_t icls = 0; icls < nClasses; icls++) {
689  temp.push_back(GetOutputNeuron( icls )->GetActivationValue() );
690  }
691 
692  for(UInt_t iClass=0; iClass<nClasses; iClass++){
693  Double_t norm = 0.0;
694  for(UInt_t j=0;j<nClasses;j++){
695  if(iClass!=j)
696  norm+=exp(temp[j]-temp[iClass]);
697  }
698  (*fMulticlassReturnVal).push_back(1.0/(1.0+norm));
699  }
700 
701 
702 
703  return *fMulticlassReturnVal;
704 }
705 
706 
707 ////////////////////////////////////////////////////////////////////////////////
708 /// create XML description of ANN classifier
709 
710 void TMVA::MethodANNBase::AddWeightsXMLTo( void* parent ) const
711 {
712  Int_t numLayers = fNetwork->GetEntriesFast();
713  void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
714  void* xmlLayout = gTools().xmlengine().NewChild(wght, 0, "Layout");
715  gTools().xmlengine().NewAttr(xmlLayout, 0, "NLayers", gTools().StringFromInt(fNetwork->GetEntriesFast()) );
716  TString weights = "";
717  for (Int_t i = 0; i < numLayers; i++) {
718  TObjArray* layer = (TObjArray*)fNetwork->At(i);
719  Int_t numNeurons = layer->GetEntriesFast();
720  void* layerxml = gTools().xmlengine().NewChild(xmlLayout, 0, "Layer");
721  gTools().xmlengine().NewAttr(layerxml, 0, "Index", gTools().StringFromInt(i) );
722  gTools().xmlengine().NewAttr(layerxml, 0, "NNeurons", gTools().StringFromInt(numNeurons) );
723  for (Int_t j = 0; j < numNeurons; j++) {
724  TNeuron* neuron = (TNeuron*)layer->At(j);
725  Int_t numSynapses = neuron->NumPostLinks();
726  void* neuronxml = gTools().AddChild(layerxml, "Neuron");
727  gTools().AddAttr(neuronxml, "NSynapses", gTools().StringFromInt(numSynapses) );
728  if(numSynapses==0) continue;
729  std::stringstream s("");
730  s.precision( 16 );
731  for (Int_t k = 0; k < numSynapses; k++) {
732  TSynapse* synapse = neuron->PostLinkAt(k);
733  s << std::scientific << synapse->GetWeight() << " ";
734  }
735  gTools().AddRawLine( neuronxml, s.str().c_str() );
736  }
737  }
738 
739  // if inverse hessian exists, write inverse hessian to weight file
740  if( fInvHessian.GetNcols()>0 ){
741  void* xmlInvHessian = gTools().xmlengine().NewChild(wght, 0, "InverseHessian");
742 
743  // get the matrix dimensions
744  Int_t nElements = fInvHessian.GetNoElements();
745  Int_t nRows = fInvHessian.GetNrows();
746  Int_t nCols = fInvHessian.GetNcols();
747  gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NElements", gTools().StringFromInt(nElements) );
748  gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NRows", gTools().StringFromInt(nRows) );
749  gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NCols", gTools().StringFromInt(nCols) );
750 
751  // read in the matrix elements
752  Double_t* elements = new Double_t[nElements+10];
753  fInvHessian.GetMatrix2Array( elements );
754 
755  // store the matrix elements row-wise
756  Int_t index = 0;
757  for( Int_t row = 0; row < nRows; ++row ){
758  void* xmlRow = gTools().xmlengine().NewChild(xmlInvHessian, 0, "Row");
759  gTools().xmlengine().NewAttr(xmlRow, 0, "Index", gTools().StringFromInt(row) );
760 
761  // create the rows
762  std::stringstream s("");
763  s.precision( 16 );
764  for( Int_t col = 0; col < nCols; ++col ){
765  s << std::scientific << (*(elements+index)) << " ";
766  ++index;
767  }
768  gTools().xmlengine().AddRawLine( xmlRow, s.str().c_str() );
769  }
770  delete[] elements;
771  }
772 }
773 
774 
775 ////////////////////////////////////////////////////////////////////////////////
776 /// read MLP from xml weight file
777 
779 {
780  // build the layout first
781  Bool_t fromFile = kTRUE;
782  std::vector<Int_t>* layout = new std::vector<Int_t>();
783 
784  void* xmlLayout = NULL;
785  xmlLayout = gTools().GetChild(wghtnode, "Layout");
786  if( !xmlLayout )
787  xmlLayout = wghtnode;
788 
789  UInt_t nLayers;
790  gTools().ReadAttr( xmlLayout, "NLayers", nLayers );
791  layout->resize( nLayers );
792 
793  void* ch = gTools().xmlengine().GetChild(xmlLayout);
794  UInt_t index;
795  UInt_t nNeurons;
796  while (ch) {
797  gTools().ReadAttr( ch, "Index", index );
798  gTools().ReadAttr( ch, "NNeurons", nNeurons );
799  layout->at(index) = nNeurons;
800  ch = gTools().GetNextChild(ch);
801  }
802 
803  BuildNetwork( layout, NULL, fromFile );
804  // use 'slow' (exact) TanH if processing old weighfile to ensure 100% compatible results
805  // otherwise use the new default, the 'tast tanh' approximation
806  if (GetTrainingTMVAVersionCode() < TMVA_VERSION(4,2,1) && fActivation->GetExpression().Contains("tanh")){
807  TActivationTanh* act = dynamic_cast<TActivationTanh*>( fActivation );
808  if (act) act->SetSlow();
809  }
810 
811  // fill the weights of the synapses
812  UInt_t nSyn;
813  Float_t weight;
814  ch = gTools().xmlengine().GetChild(xmlLayout);
815  UInt_t iLayer = 0;
816  while (ch) { // layers
817  TObjArray* layer = (TObjArray*)fNetwork->At(iLayer);
818  gTools().ReadAttr( ch, "Index", index );
819  gTools().ReadAttr( ch, "NNeurons", nNeurons );
820 
821  void* nodeN = gTools().GetChild(ch);
822  UInt_t iNeuron = 0;
823  while( nodeN ){ // neurons
824  TNeuron *neuron = (TNeuron*)layer->At(iNeuron);
825  gTools().ReadAttr( nodeN, "NSynapses", nSyn );
826  if( nSyn > 0 ){
827  const char* content = gTools().GetContent(nodeN);
828  std::stringstream s(content);
829  for (UInt_t iSyn = 0; iSyn<nSyn; iSyn++) { // synapses
830 
831  TSynapse* synapse = neuron->PostLinkAt(iSyn);
832  s >> weight;
833  //Log() << kWARNING << neuron << " " << weight << Endl;
834  synapse->SetWeight(weight);
835  }
836  }
837  nodeN = gTools().GetNextChild(nodeN);
838  iNeuron++;
839  }
840  ch = gTools().GetNextChild(ch);
841  iLayer++;
842  }
843 
844  delete layout;
845 
846  void* xmlInvHessian = NULL;
847  xmlInvHessian = gTools().GetChild(wghtnode, "InverseHessian");
848  if( !xmlInvHessian )
849  // no inverse hessian available
850  return;
851 
852  fUseRegulator = kTRUE;
853 
854  Int_t nElements = 0;
855  Int_t nRows = 0;
856  Int_t nCols = 0;
857  gTools().ReadAttr( xmlInvHessian, "NElements", nElements );
858  gTools().ReadAttr( xmlInvHessian, "NRows", nRows );
859  gTools().ReadAttr( xmlInvHessian, "NCols", nCols );
860 
861  // adjust the matrix dimensions
862  fInvHessian.ResizeTo( nRows, nCols );
863 
864  // prepare an array to read in the values
865  Double_t* elements;
866  if (nElements > std::numeric_limits<int>::max()-100){
867  Log() << kFATAL << "you tried to read a hessian matrix with " << nElements << " elements, --> too large, guess s.th. went wrong reading from the weight file" << Endl;
868  return;
869  } else {
870  elements = new Double_t[nElements+10];
871  }
872 
873 
874 
875  void* xmlRow = gTools().xmlengine().GetChild(xmlInvHessian);
876  Int_t row = 0;
877  index = 0;
878  while (xmlRow) { // rows
879  gTools().ReadAttr( xmlRow, "Index", row );
880 
881  const char* content = gTools().xmlengine().GetNodeContent(xmlRow);
882 
883  std::stringstream s(content);
884  for (Int_t iCol = 0; iCol<nCols; iCol++) { // columns
885  s >> (*(elements+index));
886  ++index;
887  }
888  xmlRow = gTools().xmlengine().GetNext(xmlRow);
889  ++row;
890  }
891 
892  fInvHessian.SetMatrixArray( elements );
893 
894  delete[] elements;
895 }
896 
897 
898 ////////////////////////////////////////////////////////////////////////////////
899 /// destroy/clear the network then read it back in from the weights file
900 
902 {
903  // delete network so we can reconstruct network from scratch
904 
905  TString dummy;
906 
907  // synapse weights
908  Double_t weight;
909  std::vector<Double_t>* weights = new std::vector<Double_t>();
910  istr>> dummy;
911  while (istr>> dummy >> weight) weights->push_back(weight); // use w/ slower write-out
912 
913  ForceWeights(weights);
914 
915 
916  delete weights;
917 }
918 
919 ////////////////////////////////////////////////////////////////////////////////
920 /// compute ranking of input variables by summing function of weights
921 
923 {
924  // create the ranking object
925  fRanking = new Ranking( GetName(), "Importance" );
926 
927  TNeuron* neuron;
928  TSynapse* synapse;
929  Double_t importance, avgVal;
930  TString varName;
931 
932  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) {
933 
934  neuron = GetInputNeuron(ivar);
935  Int_t numSynapses = neuron->NumPostLinks();
936  importance = 0;
937  varName = GetInputVar(ivar); // fix this line
938 
939  // figure out average value of variable i
940  Double_t meanS, meanB, rmsS, rmsB, xmin, xmax;
941  Statistics( TMVA::Types::kTraining, varName,
942  meanS, meanB, rmsS, rmsB, xmin, xmax );
943 
944  avgVal = (TMath::Abs(meanS) + TMath::Abs(meanB))/2.0;
945  double meanrms = (TMath::Abs(rmsS) + TMath::Abs(rmsB))/2.;
946  if (avgVal<meanrms) avgVal = meanrms;
947  if (IsNormalised()) avgVal = 0.5*(1 + gTools().NormVariable( avgVal, GetXmin( ivar ), GetXmax( ivar )));
948 
949  for (Int_t j = 0; j < numSynapses; j++) {
950  synapse = neuron->PostLinkAt(j);
951  importance += synapse->GetWeight() * synapse->GetWeight();
952  }
953 
954  importance *= avgVal * avgVal;
955 
956  fRanking->AddRank( Rank( varName, importance ) );
957  }
958 
959  return fRanking;
960 }
961 
962 ////////////////////////////////////////////////////////////////////////////////
963 
965  std::vector<TH1*>* hv ) const
966 {
967  TH2F* hist;
968  Int_t numLayers = fNetwork->GetEntriesFast();
969 
970  for (Int_t i = 0; i < numLayers-1; i++) {
971 
972  TObjArray* layer1 = (TObjArray*)fNetwork->At(i);
973  TObjArray* layer2 = (TObjArray*)fNetwork->At(i+1);
974  Int_t numNeurons1 = layer1->GetEntriesFast();
975  Int_t numNeurons2 = layer2->GetEntriesFast();
976 
977  TString name = Form("%s%i%i", bulkname.Data(), i, i+1);
978  hist = new TH2F(name + "", name + "",
979  numNeurons1, 0, numNeurons1, numNeurons2, 0, numNeurons2);
980 
981  for (Int_t j = 0; j < numNeurons1; j++) {
982 
983  TNeuron* neuron = (TNeuron*)layer1->At(j);
984  Int_t numSynapses = neuron->NumPostLinks();
985 
986  for (Int_t k = 0; k < numSynapses; k++) {
987 
988  TSynapse* synapse = neuron->PostLinkAt(k);
989  hist->SetBinContent(j+1, k+1, synapse->GetWeight());
990 
991  }
992  }
993 
994  if (hv) hv->push_back( hist );
995  else {
996  hist->Write();
997  delete hist;
998  }
999  }
1000 }
1001 
1002 ////////////////////////////////////////////////////////////////////////////////
1003 /// write histograms to file
1004 
1006 {
1007  PrintMessage(Form("Write special histos to file: %s", BaseDir()->GetPath()), kTRUE);
1008 
1009  if (fEstimatorHistTrain) fEstimatorHistTrain->Write();
1010  if (fEstimatorHistTest ) fEstimatorHistTest ->Write();
1011 
1012  // histograms containing weights for architecture plotting (used in macro "network.cxx")
1013  CreateWeightMonitoringHists( "weights_hist" );
1014 
1015  // now save all the epoch-wise monitoring information
1016 #if __cplusplus > 199711L
1017  static std::atomic<int> epochMonitoringDirectoryNumber{0};
1018 #else
1019  static int epochMonitoringDirectoryNumber = 0;
1020 #endif
1021  int epochVal = epochMonitoringDirectoryNumber++;
1022  TDirectory* epochdir = NULL;
1023  if( epochVal == 0 )
1024  epochdir = BaseDir()->mkdir( "EpochMonitoring" );
1025  else
1026  epochdir = BaseDir()->mkdir( Form("EpochMonitoring_%4d",epochVal) );
1027 
1028  epochdir->cd();
1029  for (std::vector<TH1*>::const_iterator it = fEpochMonHistS.begin(); it != fEpochMonHistS.end(); it++) {
1030  (*it)->Write();
1031  delete (*it);
1032  }
1033  for (std::vector<TH1*>::const_iterator it = fEpochMonHistB.begin(); it != fEpochMonHistB.end(); it++) {
1034  (*it)->Write();
1035  delete (*it);
1036  }
1037  for (std::vector<TH1*>::const_iterator it = fEpochMonHistW.begin(); it != fEpochMonHistW.end(); it++) {
1038  (*it)->Write();
1039  delete (*it);
1040  }
1041  BaseDir()->cd();
1042 }
1043 
1044 ////////////////////////////////////////////////////////////////////////////////
1045 /// write specific classifier response
1046 
1047 void TMVA::MethodANNBase::MakeClassSpecific( std::ostream& fout, const TString& className ) const
1048 {
1049  Int_t numLayers = fNetwork->GetEntries();
1050 
1051  fout << std::endl;
1052  fout << " double ActivationFnc(double x) const;" << std::endl;
1053  fout << " double OutputActivationFnc(double x) const;" << std::endl; //zjh
1054  fout << std::endl;
1055  fout << " int fLayers;" << std::endl;
1056  fout << " int fLayerSize["<<numLayers<<"];" << std::endl;
1057  int numNodesFrom = -1;
1058  for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1059  int numNodesTo = ((TObjArray*)fNetwork->At(lIdx))->GetEntries();
1060  if (numNodesFrom<0) { numNodesFrom=numNodesTo; continue; }
1061  fout << " double fWeightMatrix" << lIdx-1 << "to" << lIdx << "[" << numNodesTo << "][" << numNodesFrom << "];";
1062  fout << " // weight matrix from layer " << lIdx-1 << " to " << lIdx << std::endl;
1063  numNodesFrom = numNodesTo;
1064  }
1065  fout << std::endl;
1066  fout << " double * fWeights["<<numLayers<<"];" << std::endl;
1067  fout << "};" << std::endl;
1068 
1069  fout << std::endl;
1070 
1071  fout << "inline void " << className << "::Initialize()" << std::endl;
1072  fout << "{" << std::endl;
1073  fout << " // build network structure" << std::endl;
1074  fout << " fLayers = " << numLayers << ";" << std::endl;
1075  for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1076  TObjArray* layer = (TObjArray*)fNetwork->At(lIdx);
1077  int numNodes = layer->GetEntries();
1078  fout << " fLayerSize[" << lIdx << "] = " << numNodes << "; fWeights["<<lIdx<<"] = new double["<<numNodes<<"]; " << std::endl;
1079  }
1080 
1081  for (Int_t i = 0; i < numLayers-1; i++) {
1082  fout << " // weight matrix from layer " << i << " to " << i+1 << std::endl;
1083  TObjArray* layer = (TObjArray*)fNetwork->At(i);
1084  Int_t numNeurons = layer->GetEntriesFast();
1085  for (Int_t j = 0; j < numNeurons; j++) {
1086  TNeuron* neuron = (TNeuron*)layer->At(j);
1087  Int_t numSynapses = neuron->NumPostLinks();
1088  for (Int_t k = 0; k < numSynapses; k++) {
1089  TSynapse* synapse = neuron->PostLinkAt(k);
1090  fout << " fWeightMatrix" << i << "to" << i+1 << "[" << k << "][" << j << "] = " << synapse->GetWeight() << ";" << std::endl;
1091  }
1092  }
1093  }
1094 
1095  fout << "}" << std::endl;
1096  fout << std::endl;
1097 
1098  // writing of the GetMvaValue__ method
1099  fout << "inline double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
1100  fout << "{" << std::endl;
1101  fout << " if (inputValues.size() != (unsigned int)fLayerSize[0]-1) {" << std::endl;
1102  fout << " std::cout << \"Input vector needs to be of size \" << fLayerSize[0]-1 << std::endl;" << std::endl;
1103  fout << " return 0;" << std::endl;
1104  fout << " }" << std::endl;
1105  fout << std::endl;
1106  fout << " for (int l=0; l<fLayers; l++)" << std::endl;
1107  fout << " for (int i=0; i<fLayerSize[l]; i++) fWeights[l][i]=0;" << std::endl;
1108  fout << std::endl;
1109  fout << " for (int l=0; l<fLayers-1; l++)" << std::endl;
1110  fout << " fWeights[l][fLayerSize[l]-1]=1;" << std::endl;
1111  fout << std::endl;
1112  fout << " for (int i=0; i<fLayerSize[0]-1; i++)" << std::endl;
1113  fout << " fWeights[0][i]=inputValues[i];" << std::endl;
1114  fout << std::endl;
1115  for (Int_t i = 0; i < numLayers-1; i++) {
1116  fout << " // layer " << i << " to " << i+1 << std::endl;
1117  if (i+1 == numLayers-1) {
1118  fout << " for (int o=0; o<fLayerSize[" << i+1 << "]; o++) {" << std::endl;
1119  }
1120  else {
1121  fout << " for (int o=0; o<fLayerSize[" << i+1 << "]-1; o++) {" << std::endl;
1122  }
1123  fout << " for (int i=0; i<fLayerSize[" << i << "]; i++) {" << std::endl;
1124  fout << " double inputVal = fWeightMatrix" << i << "to" << i+1 << "[o][i] * fWeights[" << i << "][i];" << std::endl;
1125 
1126  if ( fNeuronInputType == "sum") {
1127  fout << " fWeights[" << i+1 << "][o] += inputVal;" << std::endl;
1128  }
1129  else if ( fNeuronInputType == "sqsum") {
1130  fout << " fWeights[" << i+1 << "][o] += inputVal*inputVal;" << std::endl;
1131  }
1132  else { // fNeuronInputType == TNeuronInputChooser::kAbsSum
1133  fout << " fWeights[" << i+1 << "][o] += fabs(inputVal);" << std::endl;
1134  }
1135  fout << " }" << std::endl;
1136  if (i+1 != numLayers-1) // in the last layer no activation function is applied
1137  fout << " fWeights[" << i+1 << "][o] = ActivationFnc(fWeights[" << i+1 << "][o]);" << std::endl;
1138  else fout << " fWeights[" << i+1 << "][o] = OutputActivationFnc(fWeights[" << i+1 << "][o]);" << std::endl; //zjh
1139  fout << " }" << std::endl;
1140  }
1141  fout << std::endl;
1142  fout << " return fWeights[" << numLayers-1 << "][0];" << std::endl;
1143  fout << "}" << std::endl;
1144 
1145  fout << std::endl;
1146  TString fncName = className+"::ActivationFnc";
1147  fActivation->MakeFunction(fout, fncName);
1148  fncName = className+"::OutputActivationFnc"; //zjh
1149  fOutput->MakeFunction(fout, fncName); //zjh
1150 
1151  fout << " " << std::endl;
1152  fout << "// Clean up" << std::endl;
1153  fout << "inline void " << className << "::Clear() " << std::endl;
1154  fout << "{" << std::endl;
1155  fout << " // clean up the arrays" << std::endl;
1156  fout << " for (int lIdx = 0; lIdx < "<<numLayers<<"; lIdx++) {" << std::endl;
1157  fout << " delete[] fWeights[lIdx];" << std::endl;
1158  fout << " }" << std::endl;
1159  fout << "}" << std::endl;
1160 }
1161 
1162 ////////////////////////////////////////////////////////////////////////////////
1163 /// who the hell makes such strange Debug flags that even use "global pointers"..
1164 
1166 {
1167  return fgDEBUG;
1168 }
void WaitForKeyboard()
wait for keyboard input, for debugging
Double_t GetDelta() const
Definition: TNeuron.h:118
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
void BuildLayer(Int_t numNeurons, TObjArray *curLayer, TObjArray *prevLayer, Int_t layerIndex, Int_t numLayers, Bool_t from_file=false)
build a single layer with neurons and synapses connecting this layer to the previous layer ...
void AddWeightsXMLTo(void *parent) const
create XML description of ANN classifier
An array of TObjects.
Definition: TObjArray.h:39
TXMLEngine & xmlengine()
Definition: Tools.h:277
float xmin
Definition: THbookFile.cxx:93
Random number generator class based on M.
Definition: TRandom3.h:29
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:162
void ForceNetworkCalculations()
calculate input values to each neuron
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
Definition: TNeuron.cxx:87
void DeleteNetwork()
delete/clear network
Ssiz_t Length() const
Definition: TString.h:390
float Float_t
Definition: RtypesCore.h:53
THist< 2, float > TH2F
Definition: THist.h:321
void AddPreLinks(TNeuron *neuron, TObjArray *prevLayer)
add synapses connecting a neuron to its preceding layer
Double_t GetValue() const
Definition: TNeuron.h:116
const Ranking * CreateRanking()
compute ranking of input variables by summing function of weights
void SetPostNeuron(TNeuron *post)
Definition: TSynapse.h:74
virtual void ReadWeightsFromStream(std::istream &istr)
destroy/clear the network then read it back in from the weights file
void SetInputNeuron()
Definition: TNeuron.h:124
XMLNodePointer_t GetNext(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
return next to xmlnode node if realnode==kTRUE, any special nodes in between will be skipped ...
Basic string class.
Definition: TString.h:137
int Int_t
Definition: RtypesCore.h:41
virtual TDirectory * mkdir(const char *name, const char *title="")
Create a sub-directory and return a pointer to the created directory.
Definition: TDirectory.cxx:955
bool Bool_t
Definition: RtypesCore.h:59
const Bool_t kFALSE
Definition: Rtypes.h:92
void PrintPostLinks() const
Definition: TNeuron.h:131
Int_t GetEntriesFast() const
Definition: TObjArray.h:66
virtual void DeclareOptions()
define the options (their key words) that can be set in the option string here the options valid for ...
void SetActivationEqn(TActivation *activation)
set activation equation
Definition: TNeuron.cxx:165
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
Definition: Tools.h:308
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition: TString.h:558
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1134
Short_t Abs(Short_t d)
Definition: TMathBase.h:110
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
Definition: Event.cxx:231
const char * GetNodeContent(XMLNodePointer_t xmlnode)
get contents (if any) of xml node
Definition: TXMLEngine.cxx:938
void ForceWeights(std::vector< Double_t > *weights)
force the synapse weights
const char * Data() const
Definition: TString.h:349
Tools & gTools()
Definition: Tools.cxx:79
Double_t x[n]
Definition: legend1.C:17
void PrintLayer(TObjArray *layer) const
print a single layer, for debugging
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1158
void PrintMessage(TString message, Bool_t force=kFALSE) const
print messages, turn off printing by setting verbose and debug flag appropriately ...
virtual void ProcessOptions()
do nothing specific at this moment
virtual void BuildNetwork(std::vector< Int_t > *layout, std::vector< Double_t > *weights=NULL, Bool_t fromFile=kFALSE)
build network given a layout (number of neurons in each layer) and optional weights array ...
Bool_t AddRawLine(void *node, const char *raw)
XML helpers.
Definition: Tools.cxx:1198
TActivation * CreateActivation(EActivationType type) const
ClassImp(TMVA::MethodANNBase) TMVA
standard constructor Note: Right now it is an option to choose the neuron input function, but only the input function "sum" leads to weight convergence – otherwise the weights go to nan and lead to an ABORT.
void ReadWeightsFromXML(void *wghtnode)
read MLP from xml weight file
Double_t GetActivationValue() const
Definition: TNeuron.h:117
Int_t NumPostLinks() const
Definition: TNeuron.h:121
Bool_t AddRawLine(XMLNodePointer_t parent, const char *line)
Add just line into xml file Line should has correct xml syntax that later it can be decoded by xml pa...
Definition: TXMLEngine.cxx:769
virtual void WriteMonitoringHistosToFile() const
write histograms to file
2-D histogram with a float per channel (see TH1 documentation)}
Definition: TH2.h:256
Bool_t Debug() const
who the hell makes such strange Debug flags that even use "global pointers"..
unsigned int UInt_t
Definition: RtypesCore.h:42
void PrintActivationEqn()
print activation equation, for debugging
Definition: TNeuron.cxx:332
char * Form(const char *fmt,...)
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition: Event.cxx:354
const char * GetContent(void *node)
XML helpers.
Definition: Tools.cxx:1182
void SetBiasNeuron()
Definition: TNeuron.h:126
void ReadAttr(void *node, const char *, T &value)
Definition: Tools.h:295
float xmax
Definition: THbookFile.cxx:93
void PrintPreLinks() const
Definition: TNeuron.h:130
void CreateWeightMonitoringHists(const TString &bulkname, std::vector< TH1 * > *hv=0) const
void Debug(Int_t level, const char *va_(fmt),...)
void InitWeights()
initialize the synapse weights randomly
Double_t GetWeight()
Definition: TSynapse.h:59
TString & Remove(Ssiz_t pos)
Definition: TString.h:616
#define TMVA_VERSION(a, b, c)
Definition: Version.h:48
std::vector< Int_t > * ParseLayoutString(TString layerSpec)
parse layout specification string and return a vector, each entry containing the number of neurons to...
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
Definition: TXMLEngine.cxx:488
double Double_t
Definition: RtypesCore.h:55
virtual const std::vector< Float_t > & GetMulticlassValues()
get the multiclass classification values generated by the NN
Describe directory structure in memory.
Definition: TDirectory.h:41
void ForceNetworkInputs(const Event *ev, Int_t ignoreIndex=-1)
force the input values of the input neurons force the value for each input neuron ...
void SetOutputNeuron()
Definition: TNeuron.h:125
static RooMathCoreReg dummy
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1170
void CalculateValue()
calculate neuron input
Definition: TNeuron.cxx:96
std::vector< TString > * GetAllActivationNames() const
Int_t GetEntries() const
Return the number of objects in array (i.e.
Definition: TObjArray.cxx:493
RooCmdArg Verbose(Bool_t flag=kTRUE)
virtual ~MethodANNBase()
destructor
static Vc_ALWAYS_INLINE int_v max(const int_v &x, const int_v &y)
Definition: vector.h:440
#define name(a, b)
Definition: linkTestLib0.cpp:5
void SetPreNeuron(TNeuron *pre)
Definition: TSynapse.h:71
virtual void PrintNetwork() const
print network representation, for debugging
void SetWeight(Double_t weight)
set synapse weight
Definition: TSynapse.cxx:71
Float_t GetTarget(UInt_t itgt) const
Definition: Event.h:101
TNeuronInput * CreateNeuronInput(ENeuronInputType type) const
void CalculateActivationValue()
calculate neuron activation/output
Definition: TNeuron.cxx:105
Abstract ClassifierFactory template that handles arbitrary types.
void AddPostLink(TSynapse *post)
add synapse as a post-link to this neuron
Definition: TNeuron.cxx:183
MethodANNBase(const TString &jobName, Types::EMVA methodType, const TString &methodTitle, DataSetInfo &theData, const TString &theOption, TDirectory *theTargetDir)
virtual Bool_t cd(const char *path=0)
Change current directory to "this" directory.
Definition: TDirectory.cxx:433
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xml node
Definition: TXMLEngine.cxx:993
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=0)
create new child element for parent node
Definition: TXMLEngine.cxx:614
virtual const std::vector< Float_t > & GetRegressionValues()
get the regression value generated by the NN
TSynapse * PostLinkAt(Int_t index) const
Definition: TNeuron.h:123
#define NULL
Definition: Rtypes.h:82
Int_t NumPreLinks() const
Definition: TNeuron.h:120
void Add(TObject *obj)
Definition: TObjArray.h:75
Double_t NormVariable(Double_t x, Double_t xmin, Double_t xmax)
normalise to output range: [-1, 1]
Definition: Tools.cxx:127
TObject * At(Int_t idx) const
Definition: TObjArray.h:167
std::vector< TString > * GetAllNeuronInputNames() const
string message
Definition: ROOT.py:94
void AddPreLink(TSynapse *pre)
add synapse as a pre-link to this neuron
Definition: TNeuron.cxx:174
void DeletePreLinks()
delete all pre-links
Definition: TNeuron.cxx:192
void DeleteNetworkLayer(TObjArray *&layer)
delete a network layer
double exp(double)
const Bool_t kTRUE
Definition: Rtypes.h:91
void BuildLayers(std::vector< Int_t > *layout, Bool_t from_file=false)
build the network layers
double norm(double *x, double *p)
Definition: unuranDistr.cxx:40
virtual void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
const Int_t n
Definition: legend1.C:16
void SetInputCalculator(TNeuronInput *calculator)
set input calculator
Definition: TNeuron.cxx:156
Definition: math.cpp:60
Ssiz_t First(char c) const
Find first occurrence of a character c.
Definition: TString.cxx:466
void PrintNeuron(TNeuron *neuron) const
print a neuron, for debugging
void InitANNBase()
initialize ANNBase object