ROOT  6.07/01
Reference Guide
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Groups Pages
MethodANNBase.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Andreas Hoecker, Peter Speckmayer, Matt Jachowski, Jan Therhaag, Jiahang Zhong
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6  * Package: TMVA *
7  * Class : MethodANNBase *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Description: *
11  * Artificial neural network base class for the discrimination of signal *
12  * from background. *
13  * *
14  * Authors (alphabetical): *
15  * Krzysztof Danielowski <danielow@cern.ch> - IFJ & AGH, Poland *
16  * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
17  * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA *
18  * Kamil Kraszewski <kalq@cern.ch> - IFJ & UJ, Poland *
19  * Maciej Kruk <mkruk@cern.ch> - IFJ & AGH, Poland *
20  * Peter Speckmayer <peter.speckmayer@cern.ch> - CERN, Switzerland *
21  * Joerg Stelzer <stelzer@cern.ch> - DESY, Germany *
22  * Jan Therhaag <Jan.Therhaag@cern.ch> - U of Bonn, Germany *
23  * Jiahang Zhong <Jiahang.Zhong@cern.ch> - Academia Sinica, Taipei *
24  * *
25  * Copyright (c) 2005-2011: *
26  * CERN, Switzerland *
27  * U. of Bonn, Germany *
28  * *
29  * Redistribution and use in source and binary forms, with or without *
30  * modification, are permitted according to the terms listed in LICENSE *
31  * (http://tmva.sourceforge.net/LICENSE) *
32  **********************************************************************************/
33 
34 //_______________________________________________________________________
35 //
36 // Base class for all TMVA methods using artificial neural networks
37 //
38 //_______________________________________________________________________
39 
40 #include <vector>
41 #include <cstdlib>
42 #include <stdexcept>
43 #if __cplusplus > 199711L
44 #include <atomic>
45 #endif
46 
47 #include "TString.h"
48 #include "TTree.h"
49 #include "TDirectory.h"
50 #include "Riostream.h"
51 #include "TRandom3.h"
52 #include "TH2F.h"
53 #include "TH1.h"
54 #include "TMath.h"
55 
56 #include "TMVA/DataSetInfo.h"
57 #include "TMVA/MethodBase.h"
58 #include "TMVA/MethodANNBase.h"
59 #include "TMVA/MsgLogger.h"
60 #include "TMVA/TNeuron.h"
61 #include "TMVA/TSynapse.h"
63 #include "TMVA/TActivationTanh.h"
64 #include "TMVA/Types.h"
65 #include "TMVA/Tools.h"
67 #include "TMVA/Ranking.h"
68 #include "TMVA/Version.h"
69 
70 using std::vector;
71 
73 
74 ////////////////////////////////////////////////////////////////////////////////
75 /// standard constructor
76 /// Note: Right now it is an option to choose the neuron input function,
77 /// but only the input function "sum" leads to weight convergence --
78 /// otherwise the weights go to nan and lead to an ABORT.
79 
80 TMVA::MethodANNBase::MethodANNBase( const TString& jobName,
81  Types::EMVA methodType,
82  const TString& methodTitle,
83  DataSetInfo& theData,
84  const TString& theOption,
85  TDirectory* theTargetDir )
86  : TMVA::MethodBase( jobName, methodType, methodTitle, theData, theOption, theTargetDir )
87  , fEstimator(kMSE)
88  , fUseRegulator(kFALSE)
89  , fRandomSeed(0)
90 {
91  InitANNBase();
92 
93  DeclareOptions();
94 }
95 
96 ////////////////////////////////////////////////////////////////////////////////
97 /// construct the Method from the weight file
98 
100  DataSetInfo& theData,
101  const TString& theWeightFile,
102  TDirectory* theTargetDir )
103  : TMVA::MethodBase( methodType, theData, theWeightFile, theTargetDir )
104  , fEstimator(kMSE)
105  , fUseRegulator(kFALSE)
106  , fRandomSeed(0)
107 {
108  InitANNBase();
109 
110  DeclareOptions();
111 }
112 
113 ////////////////////////////////////////////////////////////////////////////////
114 /// define the options (their key words) that can be set in the option string
115 /// here the options valid for ALL MVA methods are declared.
116 /// know options: NCycles=xx :the number of training cycles
117 /// Normalize=kTRUE,kFALSe :if normalised in put variables should be used
118 /// HiddenLayser="N-1,N-2" :the specification of the hidden layers
119 /// NeuronType=sigmoid,tanh,radial,linar : the type of activation function
120 /// used at the neuronn
121 ///
122 
124 {
125  DeclareOptionRef( fNcycles = 500, "NCycles", "Number of training cycles" );
126  DeclareOptionRef( fLayerSpec = "N,N-1", "HiddenLayers", "Specification of hidden layer architecture" );
127  DeclareOptionRef( fNeuronType = "sigmoid", "NeuronType", "Neuron activation function type" );
128  DeclareOptionRef( fRandomSeed = 1, "RandomSeed", "Random seed for initial synapse weights (0 means unique seed for each run; default value '1')");
129 
130  DeclareOptionRef(fEstimatorS="MSE", "EstimatorType",
131  "MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood" ); //zjh
132  AddPreDefVal(TString("MSE")); //zjh
133  AddPreDefVal(TString("CE")); //zjh
134 
135 
136  TActivationChooser aChooser;
137  std::vector<TString>* names = aChooser.GetAllActivationNames();
138  Int_t nTypes = names->size();
139  for (Int_t i = 0; i < nTypes; i++)
140  AddPreDefVal(names->at(i));
141  delete names;
142 
143  DeclareOptionRef(fNeuronInputType="sum", "NeuronInputType","Neuron input function type");
144  TNeuronInputChooser iChooser;
145  names = iChooser.GetAllNeuronInputNames();
146  nTypes = names->size();
147  for (Int_t i = 0; i < nTypes; i++) AddPreDefVal(names->at(i));
148  delete names;
149 }
150 
151 
152 ////////////////////////////////////////////////////////////////////////////////
153 /// do nothing specific at this moment
154 
156 {
157  if ( DoRegression() || DoMulticlass()) fEstimatorS = "MSE"; //zjh
158  else fEstimatorS = "CE" ; //hhv
159  if (fEstimatorS == "MSE" ) fEstimator = kMSE;
160  else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
161  std::vector<Int_t>* layout = ParseLayoutString(fLayerSpec);
162  BuildNetwork(layout);
163  delete layout;
164 }
165 
166 ////////////////////////////////////////////////////////////////////////////////
167 /// parse layout specification string and return a vector, each entry
168 /// containing the number of neurons to go in each successive layer
169 
170 std::vector<Int_t>* TMVA::MethodANNBase::ParseLayoutString(TString layerSpec)
171 {
172  std::vector<Int_t>* layout = new std::vector<Int_t>();
173  layout->push_back((Int_t)GetNvar());
174  while(layerSpec.Length()>0) {
175  TString sToAdd="";
176  if (layerSpec.First(',')<0) {
177  sToAdd = layerSpec;
178  layerSpec = "";
179  }
180  else {
181  sToAdd = layerSpec(0,layerSpec.First(','));
182  layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
183  }
184  int nNodes = 0;
185  if (sToAdd.BeginsWith("n") || sToAdd.BeginsWith("N")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
186  nNodes += atoi(sToAdd);
187  layout->push_back(nNodes);
188  }
189  if( DoRegression() )
190  layout->push_back( DataInfo().GetNTargets() ); // one output node for each target
191  else if( DoMulticlass() )
192  layout->push_back( DataInfo().GetNClasses() ); // one output node for each class
193  else
194  layout->push_back(1); // one output node (for signal/background classification)
195 
196  int n = 0;
197  for( std::vector<Int_t>::iterator it = layout->begin(); it != layout->end(); it++ ){
198  n++;
199  }
200 
201  return layout;
202 }
203 
204 ////////////////////////////////////////////////////////////////////////////////
205 /// initialize ANNBase object
206 
208 {
209  fNetwork = NULL;
210  frgen = NULL;
211  fActivation = NULL;
212  fOutput = NULL; //zjh
213  fIdentity = NULL;
214  fInputCalculator = NULL;
215  fSynapses = NULL;
216  fEstimatorHistTrain = NULL;
217  fEstimatorHistTest = NULL;
218 
219  // reset monitorign histogram vectors
220  fEpochMonHistS.clear();
221  fEpochMonHistB.clear();
222  fEpochMonHistW.clear();
223 
224  // these will be set in BuildNetwork()
225  fInputLayer = NULL;
226  fOutputNeurons.clear();
227 
228  frgen = new TRandom3(fRandomSeed);
229 
230  fSynapses = new TObjArray();
231 }
232 
233 ////////////////////////////////////////////////////////////////////////////////
234 /// destructor
235 
237 {
238  DeleteNetwork();
239 }
240 
241 ////////////////////////////////////////////////////////////////////////////////
242 /// delete/clear network
243 
245 {
246  if (fNetwork != NULL) {
247  TObjArray *layer;
248  Int_t numLayers = fNetwork->GetEntriesFast();
249  for (Int_t i = 0; i < numLayers; i++) {
250  layer = (TObjArray*)fNetwork->At(i);
251  DeleteNetworkLayer(layer);
252  }
253  delete fNetwork;
254  }
255 
256  if (frgen != NULL) delete frgen;
257  if (fActivation != NULL) delete fActivation;
258  if (fOutput != NULL) delete fOutput; //zjh
259  if (fIdentity != NULL) delete fIdentity;
260  if (fInputCalculator != NULL) delete fInputCalculator;
261  if (fSynapses != NULL) delete fSynapses;
262 
263  fNetwork = NULL;
264  frgen = NULL;
265  fActivation = NULL;
266  fOutput = NULL; //zjh
267  fIdentity = NULL;
268  fInputCalculator = NULL;
269  fSynapses = NULL;
270 }
271 
272 ////////////////////////////////////////////////////////////////////////////////
273 /// delete a network layer
274 
276 {
277  TNeuron* neuron;
278  Int_t numNeurons = layer->GetEntriesFast();
279  for (Int_t i = 0; i < numNeurons; i++) {
280  neuron = (TNeuron*)layer->At(i);
281  neuron->DeletePreLinks();
282  delete neuron;
283  }
284  delete layer;
285 }
286 
287 ////////////////////////////////////////////////////////////////////////////////
288 /// build network given a layout (number of neurons in each layer)
289 /// and optional weights array
290 
291 void TMVA::MethodANNBase::BuildNetwork( std::vector<Int_t>* layout, std::vector<Double_t>* weights, Bool_t fromFile )
292 {
293  if (fEstimatorS == "MSE") fEstimator = kMSE; //zjh
294  else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
295  else Log()<<kWARNING<<"fEstimator="<<fEstimator<<"\tfEstimatorS="<<fEstimatorS<<Endl;
296  if (fEstimator!=kMSE && fEstimator!=kCE) Log()<<kWARNING<<"Estimator type unspecified \t"<<Endl; //zjh
297 
298  Log() << kINFO << "Building Network" << Endl;
299 
300  DeleteNetwork();
301  InitANNBase();
302 
303  // set activation and input functions
304  TActivationChooser aChooser;
305  fActivation = aChooser.CreateActivation(fNeuronType);
306  fIdentity = aChooser.CreateActivation("linear");
307  if (fEstimator==kMSE) fOutput = aChooser.CreateActivation("linear"); //zjh
308  else if (fEstimator==kCE) fOutput = aChooser.CreateActivation("sigmoid"); //zjh
309  TNeuronInputChooser iChooser;
310  fInputCalculator = iChooser.CreateNeuronInput(fNeuronInputType);
311 
312  fNetwork = new TObjArray();
313  fRegulatorIdx.clear(); //zjh
314  fRegulators.clear(); //zjh
315  BuildLayers( layout, fromFile );
316 
317  // cache input layer and output neuron for fast access
318  fInputLayer = (TObjArray*)fNetwork->At(0);
319  TObjArray* outputLayer = (TObjArray*)fNetwork->At(fNetwork->GetEntriesFast()-1);
320  fOutputNeurons.clear();
321  for (Int_t i = 0; i < outputLayer->GetEntries(); i++) {
322  fOutputNeurons.push_back( (TNeuron*)outputLayer->At(i) );
323  }
324 
325  if (weights == NULL) InitWeights();
326  else ForceWeights(weights);
327 }
328 
329 
330 
331 
332 ////////////////////////////////////////////////////////////////////////////////
333 /// build the network layers
334 
335 void TMVA::MethodANNBase::BuildLayers( std::vector<Int_t>* layout, Bool_t fromFile )
336 {
337  TObjArray* curLayer;
338  TObjArray* prevLayer = NULL;
339 
340  Int_t numLayers = layout->size();
341 
342  for (Int_t i = 0; i < numLayers; i++) {
343  curLayer = new TObjArray();
344  BuildLayer(layout->at(i), curLayer, prevLayer, i, numLayers, fromFile);
345  prevLayer = curLayer;
346  fNetwork->Add(curLayer);
347  }
348 
349  // cache pointers to synapses for fast access, the order matters
350  for (Int_t i = 0; i < numLayers; i++) {
351  TObjArray* layer = (TObjArray*)fNetwork->At(i);
352  Int_t numNeurons = layer->GetEntriesFast();
353  if (i!=0 && i!=numLayers-1) fRegulators.push_back(0.); //zjh
354  for (Int_t j = 0; j < numNeurons; j++) {
355  if (i==0) fRegulators.push_back(0.); //zjh
356  TNeuron* neuron = (TNeuron*)layer->At(j);
357  Int_t numSynapses = neuron->NumPostLinks();
358  for (Int_t k = 0; k < numSynapses; k++) {
359  TSynapse* synapse = neuron->PostLinkAt(k);
360  fSynapses->Add(synapse);
361  fRegulatorIdx.push_back(fRegulators.size()-1); //zjh
362  }
363  }
364  }
365 }
366 
367 ////////////////////////////////////////////////////////////////////////////////
368 /// build a single layer with neurons and synapses connecting this
369 /// layer to the previous layer
370 
371 void TMVA::MethodANNBase::BuildLayer( Int_t numNeurons, TObjArray* curLayer,
372  TObjArray* prevLayer, Int_t layerIndex,
373  Int_t numLayers, Bool_t fromFile )
374 {
375  TNeuron* neuron;
376  for (Int_t j = 0; j < numNeurons; j++) {
377  if (fromFile && (layerIndex != numLayers-1) && (j==numNeurons-1)){
378  neuron = new TNeuron();
379  neuron->SetActivationEqn(fIdentity);
380  neuron->SetBiasNeuron();
381  neuron->ForceValue(1.0);
382  curLayer->Add(neuron);
383  }
384  else {
385  neuron = new TNeuron();
386  neuron->SetInputCalculator(fInputCalculator);
387 
388  // input layer
389  if (layerIndex == 0) {
390  neuron->SetActivationEqn(fIdentity);
391  neuron->SetInputNeuron();
392  }
393  else {
394  // output layer
395  if (layerIndex == numLayers-1) {
396  neuron->SetOutputNeuron();
397  neuron->SetActivationEqn(fOutput); //zjh
398  }
399  // hidden layers
400  else neuron->SetActivationEqn(fActivation);
401  AddPreLinks(neuron, prevLayer);
402  }
403 
404  curLayer->Add(neuron);
405  }
406  }
407 
408  // add bias neutron (except to output layer)
409  if(!fromFile){
410  if (layerIndex != numLayers-1) {
411  neuron = new TNeuron();
412  neuron->SetActivationEqn(fIdentity);
413  neuron->SetBiasNeuron();
414  neuron->ForceValue(1.0);
415  curLayer->Add(neuron);
416  }
417  }
418 }
419 
420 ////////////////////////////////////////////////////////////////////////////////
421 /// add synapses connecting a neuron to its preceding layer
422 
424 {
425  TSynapse* synapse;
426  int numNeurons = prevLayer->GetEntriesFast();
427  TNeuron* preNeuron;
428 
429  for (Int_t i = 0; i < numNeurons; i++) {
430  preNeuron = (TNeuron*)prevLayer->At(i);
431  synapse = new TSynapse();
432  synapse->SetPreNeuron(preNeuron);
433  synapse->SetPostNeuron(neuron);
434  preNeuron->AddPostLink(synapse);
435  neuron->AddPreLink(synapse);
436  }
437 }
438 
439 ////////////////////////////////////////////////////////////////////////////////
440 /// initialize the synapse weights randomly
441 
443 {
444  PrintMessage("Initializing weights");
445 
446  // init synapse weights
447  Int_t numSynapses = fSynapses->GetEntriesFast();
448  TSynapse* synapse;
449  for (Int_t i = 0; i < numSynapses; i++) {
450  synapse = (TSynapse*)fSynapses->At(i);
451  synapse->SetWeight(4.0*frgen->Rndm() - 2.0);
452  }
453 }
454 
455 ////////////////////////////////////////////////////////////////////////////////
456 /// force the synapse weights
457 
458 void TMVA::MethodANNBase::ForceWeights(std::vector<Double_t>* weights)
459 {
460  PrintMessage("Forcing weights");
461 
462  Int_t numSynapses = fSynapses->GetEntriesFast();
463  TSynapse* synapse;
464  for (Int_t i = 0; i < numSynapses; i++) {
465  synapse = (TSynapse*)fSynapses->At(i);
466  synapse->SetWeight(weights->at(i));
467  }
468 }
469 
470 ////////////////////////////////////////////////////////////////////////////////
471 /// force the input values of the input neurons
472 /// force the value for each input neuron
473 
475 {
476  Double_t x;
477  TNeuron* neuron;
478 
479  // const Event* ev = GetEvent();
480  for (UInt_t j = 0; j < GetNvar(); j++) {
481 
482  x = (j != (UInt_t)ignoreIndex)?ev->GetValue(j):0;
483 
484  neuron = GetInputNeuron(j);
485  neuron->ForceValue(x);
486  }
487 }
488 
489 ////////////////////////////////////////////////////////////////////////////////
490 /// calculate input values to each neuron
491 
493 {
494  TObjArray* curLayer;
495  TNeuron* neuron;
496  Int_t numLayers = fNetwork->GetEntriesFast();
497  Int_t numNeurons;
498 
499  for (Int_t i = 0; i < numLayers; i++) {
500  curLayer = (TObjArray*)fNetwork->At(i);
501  numNeurons = curLayer->GetEntriesFast();
502 
503  for (Int_t j = 0; j < numNeurons; j++) {
504  neuron = (TNeuron*) curLayer->At(j);
505  neuron->CalculateValue();
506  neuron->CalculateActivationValue();
507 
508  }
509  }
510 }
511 
512 ////////////////////////////////////////////////////////////////////////////////
513 /// print messages, turn off printing by setting verbose and debug flag appropriately
514 
516 {
517  if (Verbose() || Debug() || force) Log() << kINFO << message << Endl;
518 }
519 
520 ////////////////////////////////////////////////////////////////////////////////
521 /// wait for keyboard input, for debugging
522 
524 {
525  std::string dummy;
526  Log() << kINFO << "***Type anything to continue (q to quit): ";
527  std::getline(std::cin, dummy);
528  if (dummy == "q" || dummy == "Q") {
529  PrintMessage( "quit" );
530  delete this;
531  exit(0);
532  }
533 }
534 
535 ////////////////////////////////////////////////////////////////////////////////
536 /// print network representation, for debugging
537 
539 {
540  if (!Debug()) return;
541 
542  Log() << kINFO << Endl;
543  PrintMessage( "Printing network " );
544  Log() << kINFO << "-------------------------------------------------------------------" << Endl;
545 
546  TObjArray* curLayer;
547  Int_t numLayers = fNetwork->GetEntriesFast();
548 
549  for (Int_t i = 0; i < numLayers; i++) {
550 
551  curLayer = (TObjArray*)fNetwork->At(i);
552  Int_t numNeurons = curLayer->GetEntriesFast();
553 
554  Log() << kINFO << "Layer #" << i << " (" << numNeurons << " neurons):" << Endl;
555  PrintLayer( curLayer );
556  }
557 }
558 
559 ////////////////////////////////////////////////////////////////////////////////
560 /// print a single layer, for debugging
561 
563 {
564  Int_t numNeurons = layer->GetEntriesFast();
565  TNeuron* neuron;
566 
567  for (Int_t j = 0; j < numNeurons; j++) {
568  neuron = (TNeuron*) layer->At(j);
569  Log() << kINFO << "\tNeuron #" << j << " (LinksIn: " << neuron->NumPreLinks()
570  << " , LinksOut: " << neuron->NumPostLinks() << ")" << Endl;
571  PrintNeuron( neuron );
572  }
573 }
574 
575 ////////////////////////////////////////////////////////////////////////////////
576 /// print a neuron, for debugging
577 
579 {
580  Log() << kINFO
581  << "\t\tValue:\t" << neuron->GetValue()
582  << "\t\tActivation: " << neuron->GetActivationValue()
583  << "\t\tDelta: " << neuron->GetDelta() << Endl;
584  Log() << kINFO << "\t\tActivationEquation:\t";
585  neuron->PrintActivationEqn();
586  Log() << kINFO << "\t\tLinksIn:" << Endl;
587  neuron->PrintPreLinks();
588  Log() << kINFO << "\t\tLinksOut:" << Endl;
589  neuron->PrintPostLinks();
590 }
591 
592 ////////////////////////////////////////////////////////////////////////////////
593 /// get the mva value generated by the NN
594 
596 {
597  TNeuron* neuron;
598 
599  TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
600 
601  const Event * ev = GetEvent();
602 
603  for (UInt_t i = 0; i < GetNvar(); i++) {
604  neuron = (TNeuron*)inputLayer->At(i);
605  neuron->ForceValue( ev->GetValue(i) );
606  }
607  ForceNetworkCalculations();
608 
609  // check the output of the network
610  TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
611  neuron = (TNeuron*)outputLayer->At(0);
612 
613  // cannot determine error
614  NoErrorCalc(err, errUpper);
615 
616  return neuron->GetActivationValue();
617 }
618 
619 ////////////////////////////////////////////////////////////////////////////////
620 /// get the regression value generated by the NN
621 
622 const std::vector<Float_t> &TMVA::MethodANNBase::GetRegressionValues()
623 {
624  TNeuron* neuron;
625 
626  TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
627 
628  const Event * ev = GetEvent();
629 
630  for (UInt_t i = 0; i < GetNvar(); i++) {
631  neuron = (TNeuron*)inputLayer->At(i);
632  neuron->ForceValue( ev->GetValue(i) );
633  }
634  ForceNetworkCalculations();
635 
636  // check the output of the network
637  TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
638 
639  if (fRegressionReturnVal == NULL) fRegressionReturnVal = new std::vector<Float_t>();
640  fRegressionReturnVal->clear();
641 
642  Event * evT = new Event(*ev);
643  UInt_t ntgts = outputLayer->GetEntriesFast();
644  for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
645  evT->SetTarget(itgt,((TNeuron*)outputLayer->At(itgt))->GetActivationValue());
646  }
647 
648  const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
649  for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
650  fRegressionReturnVal->push_back( evT2->GetTarget(itgt) );
651  }
652 
653  delete evT;
654 
655  return *fRegressionReturnVal;
656 }
657 
658 
659 
660 
661 
662 
663 
664 
665 
666 ////////////////////////////////////////////////////////////////////////////////
667 /// get the multiclass classification values generated by the NN
668 
669 const std::vector<Float_t> &TMVA::MethodANNBase::GetMulticlassValues()
670 {
671  TNeuron* neuron;
672 
673  TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
674 
675  const Event * ev = GetEvent();
676 
677  for (UInt_t i = 0; i < GetNvar(); i++) {
678  neuron = (TNeuron*)inputLayer->At(i);
679  neuron->ForceValue( ev->GetValue(i) );
680  }
681  ForceNetworkCalculations();
682 
683  // check the output of the network
684 
685  if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector<Float_t>();
686  fMulticlassReturnVal->clear();
687  std::vector<Float_t> temp;
688 
689  UInt_t nClasses = DataInfo().GetNClasses();
690  for (UInt_t icls = 0; icls < nClasses; icls++) {
691  temp.push_back(GetOutputNeuron( icls )->GetActivationValue() );
692  }
693 
694  for(UInt_t iClass=0; iClass<nClasses; iClass++){
695  Double_t norm = 0.0;
696  for(UInt_t j=0;j<nClasses;j++){
697  if(iClass!=j)
698  norm+=exp(temp[j]-temp[iClass]);
699  }
700  (*fMulticlassReturnVal).push_back(1.0/(1.0+norm));
701  }
702 
703 
704 
705  return *fMulticlassReturnVal;
706 }
707 
708 
709 ////////////////////////////////////////////////////////////////////////////////
710 /// create XML description of ANN classifier
711 
712 void TMVA::MethodANNBase::AddWeightsXMLTo( void* parent ) const
713 {
714  Int_t numLayers = fNetwork->GetEntriesFast();
715  void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
716  void* xmlLayout = gTools().xmlengine().NewChild(wght, 0, "Layout");
717  gTools().xmlengine().NewAttr(xmlLayout, 0, "NLayers", gTools().StringFromInt(fNetwork->GetEntriesFast()) );
718  TString weights = "";
719  for (Int_t i = 0; i < numLayers; i++) {
720  TObjArray* layer = (TObjArray*)fNetwork->At(i);
721  Int_t numNeurons = layer->GetEntriesFast();
722  void* layerxml = gTools().xmlengine().NewChild(xmlLayout, 0, "Layer");
723  gTools().xmlengine().NewAttr(layerxml, 0, "Index", gTools().StringFromInt(i) );
724  gTools().xmlengine().NewAttr(layerxml, 0, "NNeurons", gTools().StringFromInt(numNeurons) );
725  for (Int_t j = 0; j < numNeurons; j++) {
726  TNeuron* neuron = (TNeuron*)layer->At(j);
727  Int_t numSynapses = neuron->NumPostLinks();
728  void* neuronxml = gTools().AddChild(layerxml, "Neuron");
729  gTools().AddAttr(neuronxml, "NSynapses", gTools().StringFromInt(numSynapses) );
730  if(numSynapses==0) continue;
731  std::stringstream s("");
732  s.precision( 16 );
733  for (Int_t k = 0; k < numSynapses; k++) {
734  TSynapse* synapse = neuron->PostLinkAt(k);
735  s << std::scientific << synapse->GetWeight() << " ";
736  }
737  gTools().AddRawLine( neuronxml, s.str().c_str() );
738  }
739  }
740 
741  // if inverse hessian exists, write inverse hessian to weight file
742  if( fInvHessian.GetNcols()>0 ){
743  void* xmlInvHessian = gTools().xmlengine().NewChild(wght, 0, "InverseHessian");
744 
745  // get the matrix dimensions
746  Int_t nElements = fInvHessian.GetNoElements();
747  Int_t nRows = fInvHessian.GetNrows();
748  Int_t nCols = fInvHessian.GetNcols();
749  gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NElements", gTools().StringFromInt(nElements) );
750  gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NRows", gTools().StringFromInt(nRows) );
751  gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NCols", gTools().StringFromInt(nCols) );
752 
753  // read in the matrix elements
754  Double_t* elements = new Double_t[nElements+10];
755  fInvHessian.GetMatrix2Array( elements );
756 
757  // store the matrix elements row-wise
758  Int_t index = 0;
759  for( Int_t row = 0; row < nRows; ++row ){
760  void* xmlRow = gTools().xmlengine().NewChild(xmlInvHessian, 0, "Row");
761  gTools().xmlengine().NewAttr(xmlRow, 0, "Index", gTools().StringFromInt(row) );
762 
763  // create the rows
764  std::stringstream s("");
765  s.precision( 16 );
766  for( Int_t col = 0; col < nCols; ++col ){
767  s << std::scientific << (*(elements+index)) << " ";
768  ++index;
769  }
770  gTools().xmlengine().AddRawLine( xmlRow, s.str().c_str() );
771  }
772  delete[] elements;
773  }
774 }
775 
776 
777 ////////////////////////////////////////////////////////////////////////////////
778 /// read MLP from xml weight file
779 
781 {
782  // build the layout first
783  Bool_t fromFile = kTRUE;
784  std::vector<Int_t>* layout = new std::vector<Int_t>();
785 
786  void* xmlLayout = NULL;
787  xmlLayout = gTools().GetChild(wghtnode, "Layout");
788  if( !xmlLayout )
789  xmlLayout = wghtnode;
790 
791  UInt_t nLayers;
792  gTools().ReadAttr( xmlLayout, "NLayers", nLayers );
793  layout->resize( nLayers );
794 
795  void* ch = gTools().xmlengine().GetChild(xmlLayout);
796  UInt_t index;
797  UInt_t nNeurons;
798  while (ch) {
799  gTools().ReadAttr( ch, "Index", index );
800  gTools().ReadAttr( ch, "NNeurons", nNeurons );
801  layout->at(index) = nNeurons;
802  ch = gTools().GetNextChild(ch);
803  }
804 
805  BuildNetwork( layout, NULL, fromFile );
806  // use 'slow' (exact) TanH if processing old weighfile to ensure 100% compatible results
807  // otherwise use the new default, the 'tast tanh' approximation
808  if (GetTrainingTMVAVersionCode() < TMVA_VERSION(4,2,1) && fActivation->GetExpression().Contains("tanh")){
809  TActivationTanh* act = dynamic_cast<TActivationTanh*>( fActivation );
810  if (act) act->SetSlow();
811  }
812 
813  // fill the weights of the synapses
814  UInt_t nSyn;
815  Float_t weight;
816  ch = gTools().xmlengine().GetChild(xmlLayout);
817  UInt_t iLayer = 0;
818  while (ch) { // layers
819  TObjArray* layer = (TObjArray*)fNetwork->At(iLayer);
820  gTools().ReadAttr( ch, "Index", index );
821  gTools().ReadAttr( ch, "NNeurons", nNeurons );
822 
823  void* nodeN = gTools().GetChild(ch);
824  UInt_t iNeuron = 0;
825  while( nodeN ){ // neurons
826  TNeuron *neuron = (TNeuron*)layer->At(iNeuron);
827  gTools().ReadAttr( nodeN, "NSynapses", nSyn );
828  if( nSyn > 0 ){
829  const char* content = gTools().GetContent(nodeN);
830  std::stringstream s(content);
831  for (UInt_t iSyn = 0; iSyn<nSyn; iSyn++) { // synapses
832 
833  TSynapse* synapse = neuron->PostLinkAt(iSyn);
834  s >> weight;
835  //Log() << kWARNING << neuron << " " << weight << Endl;
836  synapse->SetWeight(weight);
837  }
838  }
839  nodeN = gTools().GetNextChild(nodeN);
840  iNeuron++;
841  }
842  ch = gTools().GetNextChild(ch);
843  iLayer++;
844  }
845 
846  delete layout;
847 
848  void* xmlInvHessian = NULL;
849  xmlInvHessian = gTools().GetChild(wghtnode, "InverseHessian");
850  if( !xmlInvHessian )
851  // no inverse hessian available
852  return;
853 
854  fUseRegulator = kTRUE;
855 
856  Int_t nElements = 0;
857  Int_t nRows = 0;
858  Int_t nCols = 0;
859  gTools().ReadAttr( xmlInvHessian, "NElements", nElements );
860  gTools().ReadAttr( xmlInvHessian, "NRows", nRows );
861  gTools().ReadAttr( xmlInvHessian, "NCols", nCols );
862 
863  // adjust the matrix dimensions
864  fInvHessian.ResizeTo( nRows, nCols );
865 
866  // prepare an array to read in the values
867  Double_t* elements;
868  if (nElements > std::numeric_limits<int>::max()-100){
869  Log() << kFATAL << "you tried to read a hessian matrix with " << nElements << " elements, --> too large, guess s.th. went wrong reading from the weight file" << Endl;
870  return;
871  } else {
872  elements = new Double_t[nElements+10];
873  }
874 
875 
876 
877  void* xmlRow = gTools().xmlengine().GetChild(xmlInvHessian);
878  Int_t row = 0;
879  index = 0;
880  while (xmlRow) { // rows
881  gTools().ReadAttr( xmlRow, "Index", row );
882 
883  const char* content = gTools().xmlengine().GetNodeContent(xmlRow);
884 
885  std::stringstream s(content);
886  for (Int_t iCol = 0; iCol<nCols; iCol++) { // columns
887  s >> (*(elements+index));
888  ++index;
889  }
890  xmlRow = gTools().xmlengine().GetNext(xmlRow);
891  ++row;
892  }
893 
894  fInvHessian.SetMatrixArray( elements );
895 
896  delete[] elements;
897 }
898 
899 
900 ////////////////////////////////////////////////////////////////////////////////
901 /// destroy/clear the network then read it back in from the weights file
902 
904 {
905  // delete network so we can reconstruct network from scratch
906 
907  TString dummy;
908 
909  // synapse weights
910  Double_t weight;
911  std::vector<Double_t>* weights = new std::vector<Double_t>();
912  istr>> dummy;
913  while (istr>> dummy >> weight) weights->push_back(weight); // use w/ slower write-out
914 
915  ForceWeights(weights);
916 
917 
918  delete weights;
919 }
920 
921 ////////////////////////////////////////////////////////////////////////////////
922 /// compute ranking of input variables by summing function of weights
923 
925 {
926  // create the ranking object
927  fRanking = new Ranking( GetName(), "Importance" );
928 
929  TNeuron* neuron;
930  TSynapse* synapse;
931  Double_t importance, avgVal;
932  TString varName;
933 
934  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) {
935 
936  neuron = GetInputNeuron(ivar);
937  Int_t numSynapses = neuron->NumPostLinks();
938  importance = 0;
939  varName = GetInputVar(ivar); // fix this line
940 
941  // figure out average value of variable i
942  Double_t meanS, meanB, rmsS, rmsB, xmin, xmax;
943  Statistics( TMVA::Types::kTraining, varName,
944  meanS, meanB, rmsS, rmsB, xmin, xmax );
945 
946  avgVal = (TMath::Abs(meanS) + TMath::Abs(meanB))/2.0;
947  double meanrms = (TMath::Abs(rmsS) + TMath::Abs(rmsB))/2.;
948  if (avgVal<meanrms) avgVal = meanrms;
949  if (IsNormalised()) avgVal = 0.5*(1 + gTools().NormVariable( avgVal, GetXmin( ivar ), GetXmax( ivar )));
950 
951  for (Int_t j = 0; j < numSynapses; j++) {
952  synapse = neuron->PostLinkAt(j);
953  importance += synapse->GetWeight() * synapse->GetWeight();
954  }
955 
956  importance *= avgVal * avgVal;
957 
958  fRanking->AddRank( Rank( varName, importance ) );
959  }
960 
961  return fRanking;
962 }
963 
964 ////////////////////////////////////////////////////////////////////////////////
965 
967  std::vector<TH1*>* hv ) const
968 {
969  TH2F* hist;
970  Int_t numLayers = fNetwork->GetEntriesFast();
971 
972  for (Int_t i = 0; i < numLayers-1; i++) {
973 
974  TObjArray* layer1 = (TObjArray*)fNetwork->At(i);
975  TObjArray* layer2 = (TObjArray*)fNetwork->At(i+1);
976  Int_t numNeurons1 = layer1->GetEntriesFast();
977  Int_t numNeurons2 = layer2->GetEntriesFast();
978 
979  TString name = Form("%s%i%i", bulkname.Data(), i, i+1);
980  hist = new TH2F(name + "", name + "",
981  numNeurons1, 0, numNeurons1, numNeurons2, 0, numNeurons2);
982 
983  for (Int_t j = 0; j < numNeurons1; j++) {
984 
985  TNeuron* neuron = (TNeuron*)layer1->At(j);
986  Int_t numSynapses = neuron->NumPostLinks();
987 
988  for (Int_t k = 0; k < numSynapses; k++) {
989 
990  TSynapse* synapse = neuron->PostLinkAt(k);
991  hist->SetBinContent(j+1, k+1, synapse->GetWeight());
992 
993  }
994  }
995 
996  if (hv) hv->push_back( hist );
997  else {
998  hist->Write();
999  delete hist;
1000  }
1001  }
1002 }
1003 
1004 ////////////////////////////////////////////////////////////////////////////////
1005 /// write histograms to file
1006 
1008 {
1009  PrintMessage(Form("Write special histos to file: %s", BaseDir()->GetPath()), kTRUE);
1010 
1011  if (fEstimatorHistTrain) fEstimatorHistTrain->Write();
1012  if (fEstimatorHistTest ) fEstimatorHistTest ->Write();
1013 
1014  // histograms containing weights for architecture plotting (used in macro "network.cxx")
1015  CreateWeightMonitoringHists( "weights_hist" );
1016 
1017  // now save all the epoch-wise monitoring information
1018 #if __cplusplus > 199711L
1019  static std::atomic<int> epochMonitoringDirectoryNumber{0};
1020 #else
1021  static int epochMonitoringDirectoryNumber = 0;
1022 #endif
1023  int epochVal = epochMonitoringDirectoryNumber++;
1024  TDirectory* epochdir = NULL;
1025  if( epochVal == 0 )
1026  epochdir = BaseDir()->mkdir( "EpochMonitoring" );
1027  else
1028  epochdir = BaseDir()->mkdir( Form("EpochMonitoring_%4d",epochVal) );
1029 
1030  epochdir->cd();
1031  for (std::vector<TH1*>::const_iterator it = fEpochMonHistS.begin(); it != fEpochMonHistS.end(); it++) {
1032  (*it)->Write();
1033  delete (*it);
1034  }
1035  for (std::vector<TH1*>::const_iterator it = fEpochMonHistB.begin(); it != fEpochMonHistB.end(); it++) {
1036  (*it)->Write();
1037  delete (*it);
1038  }
1039  for (std::vector<TH1*>::const_iterator it = fEpochMonHistW.begin(); it != fEpochMonHistW.end(); it++) {
1040  (*it)->Write();
1041  delete (*it);
1042  }
1043  BaseDir()->cd();
1044 }
1045 
1046 ////////////////////////////////////////////////////////////////////////////////
1047 /// write specific classifier response
1048 
1049 void TMVA::MethodANNBase::MakeClassSpecific( std::ostream& fout, const TString& className ) const
1050 {
1051  Int_t numLayers = fNetwork->GetEntries();
1052 
1053  fout << std::endl;
1054  fout << " double ActivationFnc(double x) const;" << std::endl;
1055  fout << " double OutputActivationFnc(double x) const;" << std::endl; //zjh
1056  fout << std::endl;
1057  fout << " int fLayers;" << std::endl;
1058  fout << " int fLayerSize["<<numLayers<<"];" << std::endl;
1059  int numNodesFrom = -1;
1060  for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1061  int numNodesTo = ((TObjArray*)fNetwork->At(lIdx))->GetEntries();
1062  if (numNodesFrom<0) { numNodesFrom=numNodesTo; continue; }
1063  fout << " double fWeightMatrix" << lIdx-1 << "to" << lIdx << "[" << numNodesTo << "][" << numNodesFrom << "];";
1064  fout << " // weight matrix from layer " << lIdx-1 << " to " << lIdx << std::endl;
1065  numNodesFrom = numNodesTo;
1066  }
1067  fout << std::endl;
1068  fout << " double * fWeights["<<numLayers<<"];" << std::endl;
1069  fout << "};" << std::endl;
1070 
1071  fout << std::endl;
1072 
1073  fout << "inline void " << className << "::Initialize()" << std::endl;
1074  fout << "{" << std::endl;
1075  fout << " // build network structure" << std::endl;
1076  fout << " fLayers = " << numLayers << ";" << std::endl;
1077  for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1078  TObjArray* layer = (TObjArray*)fNetwork->At(lIdx);
1079  int numNodes = layer->GetEntries();
1080  fout << " fLayerSize[" << lIdx << "] = " << numNodes << "; fWeights["<<lIdx<<"] = new double["<<numNodes<<"]; " << std::endl;
1081  }
1082 
1083  for (Int_t i = 0; i < numLayers-1; i++) {
1084  fout << " // weight matrix from layer " << i << " to " << i+1 << std::endl;
1085  TObjArray* layer = (TObjArray*)fNetwork->At(i);
1086  Int_t numNeurons = layer->GetEntriesFast();
1087  for (Int_t j = 0; j < numNeurons; j++) {
1088  TNeuron* neuron = (TNeuron*)layer->At(j);
1089  Int_t numSynapses = neuron->NumPostLinks();
1090  for (Int_t k = 0; k < numSynapses; k++) {
1091  TSynapse* synapse = neuron->PostLinkAt(k);
1092  fout << " fWeightMatrix" << i << "to" << i+1 << "[" << k << "][" << j << "] = " << synapse->GetWeight() << ";" << std::endl;
1093  }
1094  }
1095  }
1096 
1097  fout << "}" << std::endl;
1098  fout << std::endl;
1099 
1100  // writing of the GetMvaValue__ method
1101  fout << "inline double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
1102  fout << "{" << std::endl;
1103  fout << " if (inputValues.size() != (unsigned int)fLayerSize[0]-1) {" << std::endl;
1104  fout << " std::cout << \"Input vector needs to be of size \" << fLayerSize[0]-1 << std::endl;" << std::endl;
1105  fout << " return 0;" << std::endl;
1106  fout << " }" << std::endl;
1107  fout << std::endl;
1108  fout << " for (int l=0; l<fLayers; l++)" << std::endl;
1109  fout << " for (int i=0; i<fLayerSize[l]; i++) fWeights[l][i]=0;" << std::endl;
1110  fout << std::endl;
1111  fout << " for (int l=0; l<fLayers-1; l++)" << std::endl;
1112  fout << " fWeights[l][fLayerSize[l]-1]=1;" << std::endl;
1113  fout << std::endl;
1114  fout << " for (int i=0; i<fLayerSize[0]-1; i++)" << std::endl;
1115  fout << " fWeights[0][i]=inputValues[i];" << std::endl;
1116  fout << std::endl;
1117  for (Int_t i = 0; i < numLayers-1; i++) {
1118  fout << " // layer " << i << " to " << i+1 << std::endl;
1119  if (i+1 == numLayers-1) {
1120  fout << " for (int o=0; o<fLayerSize[" << i+1 << "]; o++) {" << std::endl;
1121  }
1122  else {
1123  fout << " for (int o=0; o<fLayerSize[" << i+1 << "]-1; o++) {" << std::endl;
1124  }
1125  fout << " for (int i=0; i<fLayerSize[" << i << "]; i++) {" << std::endl;
1126  fout << " double inputVal = fWeightMatrix" << i << "to" << i+1 << "[o][i] * fWeights[" << i << "][i];" << std::endl;
1127 
1128  if ( fNeuronInputType == "sum") {
1129  fout << " fWeights[" << i+1 << "][o] += inputVal;" << std::endl;
1130  }
1131  else if ( fNeuronInputType == "sqsum") {
1132  fout << " fWeights[" << i+1 << "][o] += inputVal*inputVal;" << std::endl;
1133  }
1134  else { // fNeuronInputType == TNeuronInputChooser::kAbsSum
1135  fout << " fWeights[" << i+1 << "][o] += fabs(inputVal);" << std::endl;
1136  }
1137  fout << " }" << std::endl;
1138  if (i+1 != numLayers-1) // in the last layer no activation function is applied
1139  fout << " fWeights[" << i+1 << "][o] = ActivationFnc(fWeights[" << i+1 << "][o]);" << std::endl;
1140  else fout << " fWeights[" << i+1 << "][o] = OutputActivationFnc(fWeights[" << i+1 << "][o]);" << std::endl; //zjh
1141  fout << " }" << std::endl;
1142  }
1143  fout << std::endl;
1144  fout << " return fWeights[" << numLayers-1 << "][0];" << std::endl;
1145  fout << "}" << std::endl;
1146 
1147  fout << std::endl;
1148  TString fncName = className+"::ActivationFnc";
1149  fActivation->MakeFunction(fout, fncName);
1150  fncName = className+"::OutputActivationFnc"; //zjh
1151  fOutput->MakeFunction(fout, fncName); //zjh
1152 
1153  fout << " " << std::endl;
1154  fout << "// Clean up" << std::endl;
1155  fout << "inline void " << className << "::Clear() " << std::endl;
1156  fout << "{" << std::endl;
1157  fout << " // clean up the arrays" << std::endl;
1158  fout << " for (int lIdx = 0; lIdx < "<<numLayers<<"; lIdx++) {" << std::endl;
1159  fout << " delete[] fWeights[lIdx];" << std::endl;
1160  fout << " }" << std::endl;
1161  fout << "}" << std::endl;
1162 }
1163 
1164 ////////////////////////////////////////////////////////////////////////////////
1165 /// who the hell makes such strange Debug flags that even use "global pointers"..
1166 
1168 {
1169  return fgDEBUG;
1170 }
void WaitForKeyboard()
wait for keyboard input, for debugging
Double_t GetDelta() const
Definition: TNeuron.h:118
tuple row
Definition: mrt.py:26
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
void BuildLayer(Int_t numNeurons, TObjArray *curLayer, TObjArray *prevLayer, Int_t layerIndex, Int_t numLayers, Bool_t from_file=false)
build a single layer with neurons and synapses connecting this layer to the previous layer ...
void AddWeightsXMLTo(void *parent) const
create XML description of ANN classifier
An array of TObjects.
Definition: TObjArray.h:39
TXMLEngine & xmlengine()
Definition: Tools.h:277
float xmin
Definition: THbookFile.cxx:93
Random number generator class based on M.
Definition: TRandom3.h:29
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:162
void ForceNetworkCalculations()
calculate input values to each neuron
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
Definition: TNeuron.cxx:87
void DeleteNetwork()
delete/clear network
Ssiz_t Length() const
Definition: TString.h:390
float Float_t
Definition: RtypesCore.h:53
void AddPreLinks(TNeuron *neuron, TObjArray *prevLayer)
add synapses connecting a neuron to its preceding layer
Double_t GetValue() const
Definition: TNeuron.h:116
const Ranking * CreateRanking()
compute ranking of input variables by summing function of weights
void SetPostNeuron(TNeuron *post)
Definition: TSynapse.h:74
virtual void ReadWeightsFromStream(std::istream &istr)
destroy/clear the network then read it back in from the weights file
void SetInputNeuron()
Definition: TNeuron.h:124
XMLNodePointer_t GetNext(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
return next to xmlnode node if realnode==kTRUE, any special nodes in between will be skipped ...
Basic string class.
Definition: TString.h:137
int Int_t
Definition: RtypesCore.h:41
virtual TDirectory * mkdir(const char *name, const char *title="")
Create a sub-directory and return a pointer to the created directory.
Definition: TDirectory.cxx:955
bool Bool_t
Definition: RtypesCore.h:59
const Bool_t kFALSE
Definition: Rtypes.h:92
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition: Event.cxx:354
void PrintPostLinks() const
Definition: TNeuron.h:131
Int_t GetEntriesFast() const
Definition: TObjArray.h:66
virtual void DeclareOptions()
define the options (their key words) that can be set in the option string here the options valid for ...
void SetActivationEqn(TActivation *activation)
set activation equation
Definition: TNeuron.cxx:165
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
Definition: Tools.h:308
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition: TString.h:558
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1134
Short_t Abs(Short_t d)
Definition: TMathBase.h:110
const char * GetNodeContent(XMLNodePointer_t xmlnode)
get contents (if any) of xml node
Definition: TXMLEngine.cxx:938
void ForceWeights(std::vector< Double_t > *weights)
force the synapse weights
const char * Data() const
Definition: TString.h:349
Tools & gTools()
Definition: Tools.cxx:79
Double_t x[n]
Definition: legend1.C:17
void PrintLayer(TObjArray *layer) const
print a single layer, for debugging
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1158
void PrintMessage(TString message, Bool_t force=kFALSE) const
print messages, turn off printing by setting verbose and debug flag appropriately ...
virtual void ProcessOptions()
do nothing specific at this moment
virtual void BuildNetwork(std::vector< Int_t > *layout, std::vector< Double_t > *weights=NULL, Bool_t fromFile=kFALSE)
build network given a layout (number of neurons in each layer) and optional weights array ...
Bool_t AddRawLine(void *node, const char *raw)
XML helpers.
Definition: Tools.cxx:1198
TActivation * CreateActivation(EActivationType type) const
ClassImp(TMVA::MethodANNBase) TMVA
standard constructor Note: Right now it is an option to choose the neuron input function, but only the input function "sum" leads to weight convergence – otherwise the weights go to nan and lead to an ABORT.
void ReadWeightsFromXML(void *wghtnode)
read MLP from xml weight file
Double_t GetActivationValue() const
Definition: TNeuron.h:117
Int_t NumPostLinks() const
Definition: TNeuron.h:121
Bool_t AddRawLine(XMLNodePointer_t parent, const char *line)
Add just line into xml file Line should has correct xml syntax that later it can be decoded by xml pa...
Definition: TXMLEngine.cxx:769
virtual void WriteMonitoringHistosToFile() const
write histograms to file
2-D histogram with a float per channel (see TH1 documentation)}
Definition: TH2.h:256
Bool_t Debug() const
who the hell makes such strange Debug flags that even use "global pointers"..
unsigned int UInt_t
Definition: RtypesCore.h:42
void PrintActivationEqn()
print activation equation, for debugging
Definition: TNeuron.cxx:332
char * Form(const char *fmt,...)
const char * GetContent(void *node)
XML helpers.
Definition: Tools.cxx:1182
void SetBiasNeuron()
Definition: TNeuron.h:126
void ReadAttr(void *node, const char *, T &value)
Definition: Tools.h:295
float xmax
Definition: THbookFile.cxx:93
void PrintPreLinks() const
Definition: TNeuron.h:130
void CreateWeightMonitoringHists(const TString &bulkname, std::vector< TH1 * > *hv=0) const
void Debug(Int_t level, const char *va_(fmt),...)
void InitWeights()
initialize the synapse weights randomly
Double_t GetWeight()
Definition: TSynapse.h:59
TString & Remove(Ssiz_t pos)
Definition: TString.h:616
#define TMVA_VERSION(a, b, c)
Definition: Version.h:48
std::vector< Int_t > * ParseLayoutString(TString layerSpec)
parse layout specification string and return a vector, each entry containing the number of neurons to...
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
Definition: TXMLEngine.cxx:488
double Double_t
Definition: RtypesCore.h:55
virtual const std::vector< Float_t > & GetMulticlassValues()
get the multiclass classification values generated by the NN
Describe directory structure in memory.
Definition: TDirectory.h:44
void ForceNetworkInputs(const Event *ev, Int_t ignoreIndex=-1)
force the input values of the input neurons force the value for each input neuron ...
void SetOutputNeuron()
Definition: TNeuron.h:125
static RooMathCoreReg dummy
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1170
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
Definition: Event.cxx:231
void CalculateValue()
calculate neuron input
Definition: TNeuron.cxx:96
std::vector< TString > * GetAllActivationNames() const
Int_t GetEntries() const
Return the number of objects in array (i.e.
Definition: TObjArray.cxx:494
RooCmdArg Verbose(Bool_t flag=kTRUE)
virtual ~MethodANNBase()
destructor
static Vc_ALWAYS_INLINE int_v max(const int_v &x, const int_v &y)
Definition: vector.h:440
#define name(a, b)
Definition: linkTestLib0.cpp:5
void SetPreNeuron(TNeuron *pre)
Definition: TSynapse.h:71
virtual void PrintNetwork() const
print network representation, for debugging
void SetWeight(Double_t weight)
set synapse weight
Definition: TSynapse.cxx:73
Float_t GetTarget(UInt_t itgt) const
Definition: Event.h:101
TNeuronInput * CreateNeuronInput(ENeuronInputType type) const
void CalculateActivationValue()
calculate neuron activation/output
Definition: TNeuron.cxx:105
void AddPostLink(TSynapse *post)
add synapse as a post-link to this neuron
Definition: TNeuron.cxx:183
MethodANNBase(const TString &jobName, Types::EMVA methodType, const TString &methodTitle, DataSetInfo &theData, const TString &theOption, TDirectory *theTargetDir)
virtual Bool_t cd(const char *path=0)
Change current directory to "this" directory.
Definition: TDirectory.cxx:433
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xml node
Definition: TXMLEngine.cxx:993
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=0)
create new child element for parent node
Definition: TXMLEngine.cxx:614
virtual const std::vector< Float_t > & GetRegressionValues()
get the regression value generated by the NN
TSynapse * PostLinkAt(Int_t index) const
Definition: TNeuron.h:123
#define NULL
Definition: Rtypes.h:82
Int_t NumPreLinks() const
Definition: TNeuron.h:120
void Add(TObject *obj)
Definition: TObjArray.h:75
Double_t NormVariable(Double_t x, Double_t xmin, Double_t xmax)
normalise to output range: [-1, 1]
Definition: Tools.cxx:127
TObject * At(Int_t idx) const
Definition: TObjArray.h:167
std::vector< TString > * GetAllNeuronInputNames() const
string message
Definition: ROOT.py:94
void AddPreLink(TSynapse *pre)
add synapse as a pre-link to this neuron
Definition: TNeuron.cxx:174
void DeletePreLinks()
delete all pre-links
Definition: TNeuron.cxx:192
void DeleteNetworkLayer(TObjArray *&layer)
delete a network layer
double exp(double)
const Bool_t kTRUE
Definition: Rtypes.h:91
void BuildLayers(std::vector< Int_t > *layout, Bool_t from_file=false)
build the network layers
double norm(double *x, double *p)
Definition: unuranDistr.cxx:40
virtual void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
const Int_t n
Definition: legend1.C:16
void SetInputCalculator(TNeuronInput *calculator)
set input calculator
Definition: TNeuron.cxx:156
Definition: math.cpp:60
Ssiz_t First(char c) const
Find first occurrence of a character c.
Definition: TString.cxx:453
void PrintNeuron(TNeuron *neuron) const
print a neuron, for debugging
void InitANNBase()
initialize ANNBase object