Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
MethodANNBase.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Andreas Hoecker, Peter Speckmayer, Matt Jachowski, Jan Therhaag, Jiahang Zhong
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : MethodANNBase *
8 * Web : http://tmva.sourceforge.net *
9 * *
10 * Description: *
11 * Artificial neural network base class for the discrimination of signal *
12 * from background. *
13 * *
14 * Authors (alphabetical): *
15 * Krzysztof Danielowski <danielow@cern.ch> - IFJ & AGH, Poland *
16 * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
17 * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA *
18 * Kamil Kraszewski <kalq@cern.ch> - IFJ & UJ, Poland *
19 * Maciej Kruk <mkruk@cern.ch> - IFJ & AGH, Poland *
20 * Peter Speckmayer <peter.speckmayer@cern.ch> - CERN, Switzerland *
21 * Joerg Stelzer <stelzer@cern.ch> - DESY, Germany *
22 * Jan Therhaag <Jan.Therhaag@cern.ch> - U of Bonn, Germany *
23 * Jiahang Zhong <Jiahang.Zhong@cern.ch> - Academia Sinica, Taipei *
24 * *
25 * Copyright (c) 2005-2011: *
26 * CERN, Switzerland *
27 * U. of Bonn, Germany *
28 * *
29 * Redistribution and use in source and binary forms, with or without *
30 * modification, are permitted according to the terms listed in LICENSE *
31 * (http://tmva.sourceforge.net/LICENSE) *
32 **********************************************************************************/
33
34/*! \class TMVA::MethodANNBase
35\ingroup TMVA
36
37Base class for all TMVA methods using artificial neural networks.
38
39*/
40
41#include "TMVA/MethodBase.h"
42
43#include "TMVA/Configurable.h"
44#include "TMVA/DataSetInfo.h"
45#include "TMVA/MethodANNBase.h"
46#include "TMVA/MsgLogger.h"
47#include "TMVA/TNeuron.h"
48#include "TMVA/TSynapse.h"
51#include "TMVA/Types.h"
52#include "TMVA/Tools.h"
54#include "TMVA/Ranking.h"
55#include "TMVA/Version.h"
56
57#include "TString.h"
58#include "TDirectory.h"
59#include "TRandom3.h"
60#include "TH2F.h"
61#include "TH1.h"
62#include "TMath.h"
63#include "TMatrixT.h"
64
65#include <iostream>
66#include <vector>
67#include <cstdlib>
68#include <stdexcept>
69#include <atomic>
70
71
72using std::vector;
73
75
76////////////////////////////////////////////////////////////////////////////////
77/// standard constructor
78/// Note: Right now it is an option to choose the neuron input function,
79/// but only the input function "sum" leads to weight convergence --
80/// otherwise the weights go to nan and lead to an ABORT.
81
83 Types::EMVA methodType,
84 const TString& methodTitle,
85 DataSetInfo& theData,
86 const TString& theOption )
87: TMVA::MethodBase( jobName, methodType, methodTitle, theData, theOption)
88 , fEstimator(kMSE)
89 , fUseRegulator(kFALSE)
90 , fRandomSeed(0)
91{
93
95}
96
97////////////////////////////////////////////////////////////////////////////////
98/// construct the Method from the weight file
99
101 DataSetInfo& theData,
102 const TString& theWeightFile)
103 : TMVA::MethodBase( methodType, theData, theWeightFile)
104 , fEstimator(kMSE)
105 , fUseRegulator(kFALSE)
106 , fRandomSeed(0)
107{
108 InitANNBase();
109
111}
112
113////////////////////////////////////////////////////////////////////////////////
114/// define the options (their key words) that can be set in the option string
115/// here the options valid for ALL MVA methods are declared.
116///
117/// know options:
118///
119/// - NCycles=xx :the number of training cycles
120/// - Normalize=kTRUE,kFALSe :if normalised in put variables should be used
121/// - HiddenLayser="N-1,N-2" :the specification of the hidden layers
122/// - NeuronType=sigmoid,tanh,radial,linar : the type of activation function
123/// used at the neuron
124
126{
127 DeclareOptionRef( fNcycles = 500, "NCycles", "Number of training cycles" );
128 DeclareOptionRef( fLayerSpec = "N,N-1", "HiddenLayers", "Specification of hidden layer architecture" );
129 DeclareOptionRef( fNeuronType = "sigmoid", "NeuronType", "Neuron activation function type" );
130 DeclareOptionRef( fRandomSeed = 1, "RandomSeed", "Random seed for initial synapse weights (0 means unique seed for each run; default value '1')");
131
132 DeclareOptionRef(fEstimatorS="MSE", "EstimatorType",
133 "MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood" ); //zjh
134 AddPreDefVal(TString("MSE")); //zjh
135 AddPreDefVal(TString("CE")); //zjh
136
137
138 TActivationChooser aChooser;
139 std::vector<TString>* names = aChooser.GetAllActivationNames();
140 Int_t nTypes = names->size();
141 for (Int_t i = 0; i < nTypes; i++)
142 AddPreDefVal(names->at(i));
143 delete names;
144
145 DeclareOptionRef(fNeuronInputType="sum", "NeuronInputType","Neuron input function type");
146 TNeuronInputChooser iChooser;
147 names = iChooser.GetAllNeuronInputNames();
148 nTypes = names->size();
149 for (Int_t i = 0; i < nTypes; i++) AddPreDefVal(names->at(i));
150 delete names;
151}
152
153
154////////////////////////////////////////////////////////////////////////////////
155/// do nothing specific at this moment
156
158{
159 if ( DoRegression() || DoMulticlass()) fEstimatorS = "MSE"; //zjh
160 else fEstimatorS = "CE" ; //hhv
161 if (fEstimatorS == "MSE" ) fEstimator = kMSE;
162 else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
163 std::vector<Int_t>* layout = ParseLayoutString(fLayerSpec);
164 BuildNetwork(layout);
165 delete layout;
166}
167
168////////////////////////////////////////////////////////////////////////////////
169/// parse layout specification string and return a vector, each entry
170/// containing the number of neurons to go in each successive layer
171
173{
174 std::vector<Int_t>* layout = new std::vector<Int_t>();
175 layout->push_back((Int_t)GetNvar());
176 while(layerSpec.Length()>0) {
177 TString sToAdd="";
178 if (layerSpec.First(',')<0) {
179 sToAdd = layerSpec;
180 layerSpec = "";
181 }
182 else {
183 sToAdd = layerSpec(0,layerSpec.First(','));
184 layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
185 }
186 int nNodes = 0;
187 if (sToAdd.BeginsWith("n") || sToAdd.BeginsWith("N")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
188 nNodes += atoi(sToAdd);
189 layout->push_back(nNodes);
190 }
191 if( DoRegression() )
192 layout->push_back( DataInfo().GetNTargets() ); // one output node for each target
193 else if( DoMulticlass() )
194 layout->push_back( DataInfo().GetNClasses() ); // one output node for each class
195 else
196 layout->push_back(1); // one output node (for signal/background classification)
197
198 int n = 0;
199 for( std::vector<Int_t>::iterator it = layout->begin(); it != layout->end(); ++it ){
200 n++;
201 }
202
203 return layout;
204}
205
206////////////////////////////////////////////////////////////////////////////////
207/// initialize ANNBase object
208
210{
211 fNetwork = NULL;
212 frgen = NULL;
213 fActivation = NULL;
214 fOutput = NULL; //zjh
215 fIdentity = NULL;
216 fInputCalculator = NULL;
217 fSynapses = NULL;
218 fEstimatorHistTrain = NULL;
219 fEstimatorHistTest = NULL;
220
221 // reset monitoring histogram vectors
222 fEpochMonHistS.clear();
223 fEpochMonHistB.clear();
224 fEpochMonHistW.clear();
225
226 // these will be set in BuildNetwork()
227 fInputLayer = NULL;
228 fOutputNeurons.clear();
229
230 frgen = new TRandom3(fRandomSeed);
231
232 fSynapses = new TObjArray();
233}
234
235////////////////////////////////////////////////////////////////////////////////
236/// destructor
237
239{
240 DeleteNetwork();
241}
242
243////////////////////////////////////////////////////////////////////////////////
244/// delete/clear network
245
247{
248 if (fNetwork != NULL) {
249 TObjArray *layer;
250 Int_t numLayers = fNetwork->GetEntriesFast();
251 for (Int_t i = 0; i < numLayers; i++) {
252 layer = (TObjArray*)fNetwork->At(i);
253 DeleteNetworkLayer(layer);
254 }
255 delete fNetwork;
256 }
257
258 if (frgen != NULL) delete frgen;
259 if (fActivation != NULL) delete fActivation;
260 if (fOutput != NULL) delete fOutput; //zjh
261 if (fIdentity != NULL) delete fIdentity;
262 if (fInputCalculator != NULL) delete fInputCalculator;
263 if (fSynapses != NULL) delete fSynapses;
264
265 fNetwork = NULL;
266 frgen = NULL;
267 fActivation = NULL;
268 fOutput = NULL; //zjh
269 fIdentity = NULL;
270 fInputCalculator = NULL;
271 fSynapses = NULL;
272}
273
274////////////////////////////////////////////////////////////////////////////////
275/// delete a network layer
276
278{
279 TNeuron* neuron;
280 Int_t numNeurons = layer->GetEntriesFast();
281 for (Int_t i = 0; i < numNeurons; i++) {
282 neuron = (TNeuron*)layer->At(i);
283 neuron->DeletePreLinks();
284 delete neuron;
285 }
286 delete layer;
287}
288
289////////////////////////////////////////////////////////////////////////////////
290/// build network given a layout (number of neurons in each layer)
291/// and optional weights array
292
293void TMVA::MethodANNBase::BuildNetwork( std::vector<Int_t>* layout, std::vector<Double_t>* weights, Bool_t fromFile )
294{
295 if (fEstimatorS == "MSE") fEstimator = kMSE; //zjh
296 else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
297 else Log()<<kWARNING<<"fEstimator="<<fEstimator<<"\tfEstimatorS="<<fEstimatorS<<Endl;
298 if (fEstimator!=kMSE && fEstimator!=kCE) Log()<<kWARNING<<"Estimator type unspecified \t"<<Endl; //zjh
299
300
301 Log() << kHEADER << "Building Network. " << Endl;
302
303 DeleteNetwork();
304 InitANNBase();
305
306 // set activation and input functions
307 TActivationChooser aChooser;
308 fActivation = aChooser.CreateActivation(fNeuronType);
309 fIdentity = aChooser.CreateActivation("linear");
310 if (fEstimator==kMSE) fOutput = aChooser.CreateActivation("linear"); //zjh
311 else if (fEstimator==kCE) fOutput = aChooser.CreateActivation("sigmoid"); //zjh
312 TNeuronInputChooser iChooser;
313 fInputCalculator = iChooser.CreateNeuronInput(fNeuronInputType);
314
315 fNetwork = new TObjArray();
316 fRegulatorIdx.clear();
317 fRegulators.clear();
318 BuildLayers( layout, fromFile );
319
320 // cache input layer and output neuron for fast access
321 fInputLayer = (TObjArray*)fNetwork->At(0);
322 TObjArray* outputLayer = (TObjArray*)fNetwork->At(fNetwork->GetEntriesFast()-1);
323 fOutputNeurons.clear();
324 for (Int_t i = 0; i < outputLayer->GetEntries(); i++) {
325 fOutputNeurons.push_back( (TNeuron*)outputLayer->At(i) );
326 }
327
328 if (weights == NULL) InitWeights();
329 else ForceWeights(weights);
330}
331
332////////////////////////////////////////////////////////////////////////////////
333/// build the network layers
334
335void TMVA::MethodANNBase::BuildLayers( std::vector<Int_t>* layout, Bool_t fromFile )
336{
337 TObjArray* curLayer;
338 TObjArray* prevLayer = NULL;
339
340 Int_t numLayers = layout->size();
341
342 for (Int_t i = 0; i < numLayers; i++) {
343 curLayer = new TObjArray();
344 BuildLayer(layout->at(i), curLayer, prevLayer, i, numLayers, fromFile);
345 prevLayer = curLayer;
346 fNetwork->Add(curLayer);
347 }
348
349 // cache pointers to synapses for fast access, the order matters
350 for (Int_t i = 0; i < numLayers; i++) {
351 TObjArray* layer = (TObjArray*)fNetwork->At(i);
352 Int_t numNeurons = layer->GetEntriesFast();
353 if (i!=0 && i!=numLayers-1) fRegulators.push_back(0.); //zjh
354 for (Int_t j = 0; j < numNeurons; j++) {
355 if (i==0) fRegulators.push_back(0.);//zjh
356 TNeuron* neuron = (TNeuron*)layer->At(j);
357 Int_t numSynapses = neuron->NumPostLinks();
358 for (Int_t k = 0; k < numSynapses; k++) {
359 TSynapse* synapse = neuron->PostLinkAt(k);
360 fSynapses->Add(synapse);
361 fRegulatorIdx.push_back(fRegulators.size()-1);//zjh
362 }
363 }
364 }
365}
366
367////////////////////////////////////////////////////////////////////////////////
368/// build a single layer with neurons and synapses connecting this
369/// layer to the previous layer
370
372 TObjArray* prevLayer, Int_t layerIndex,
373 Int_t numLayers, Bool_t fromFile )
374{
375 TNeuron* neuron;
376 for (Int_t j = 0; j < numNeurons; j++) {
377 if (fromFile && (layerIndex != numLayers-1) && (j==numNeurons-1)){
378 neuron = new TNeuron();
379 neuron->SetActivationEqn(fIdentity);
380 neuron->SetBiasNeuron();
381 neuron->ForceValue(1.0);
382 curLayer->Add(neuron);
383 }
384 else {
385 neuron = new TNeuron();
386 neuron->SetInputCalculator(fInputCalculator);
387
388 // input layer
389 if (layerIndex == 0) {
390 neuron->SetActivationEqn(fIdentity);
391 neuron->SetInputNeuron();
392 }
393 else {
394 // output layer
395 if (layerIndex == numLayers-1) {
396 neuron->SetOutputNeuron();
397 neuron->SetActivationEqn(fOutput); //zjh
398 }
399 // hidden layers
400 else neuron->SetActivationEqn(fActivation);
401 AddPreLinks(neuron, prevLayer);
402 }
403
404 curLayer->Add(neuron);
405 }
406 }
407
408 // add bias neutron (except to output layer)
409 if(!fromFile){
410 if (layerIndex != numLayers-1) {
411 neuron = new TNeuron();
412 neuron->SetActivationEqn(fIdentity);
413 neuron->SetBiasNeuron();
414 neuron->ForceValue(1.0);
415 curLayer->Add(neuron);
416 }
417 }
418}
419
420////////////////////////////////////////////////////////////////////////////////
421/// add synapses connecting a neuron to its preceding layer
422
424{
425 TSynapse* synapse;
426 int numNeurons = prevLayer->GetEntriesFast();
427 TNeuron* preNeuron;
428
429 for (Int_t i = 0; i < numNeurons; i++) {
430 preNeuron = (TNeuron*)prevLayer->At(i);
431 synapse = new TSynapse();
432 synapse->SetPreNeuron(preNeuron);
433 synapse->SetPostNeuron(neuron);
434 preNeuron->AddPostLink(synapse);
435 neuron->AddPreLink(synapse);
436 }
437}
438
439////////////////////////////////////////////////////////////////////////////////
440/// initialize the synapse weights randomly
441
443{
444 PrintMessage("Initializing weights");
445
446 // init synapse weights
447 Int_t numSynapses = fSynapses->GetEntriesFast();
448 TSynapse* synapse;
449 for (Int_t i = 0; i < numSynapses; i++) {
450 synapse = (TSynapse*)fSynapses->At(i);
451 synapse->SetWeight(4.0*frgen->Rndm() - 2.0);
452 }
453}
454
455////////////////////////////////////////////////////////////////////////////////
456/// force the synapse weights
457
458void TMVA::MethodANNBase::ForceWeights(std::vector<Double_t>* weights)
459{
460 PrintMessage("Forcing weights");
461
462 Int_t numSynapses = fSynapses->GetEntriesFast();
463 TSynapse* synapse;
464 for (Int_t i = 0; i < numSynapses; i++) {
465 synapse = (TSynapse*)fSynapses->At(i);
466 synapse->SetWeight(weights->at(i));
467 }
468}
469
470////////////////////////////////////////////////////////////////////////////////
471/// force the input values of the input neurons
472/// force the value for each input neuron
473
475{
476 Double_t x;
477 TNeuron* neuron;
478
479 // const Event* ev = GetEvent();
480 for (UInt_t j = 0; j < GetNvar(); j++) {
481
482 x = (j != (UInt_t)ignoreIndex)?ev->GetValue(j):0;
483
484 neuron = GetInputNeuron(j);
485 neuron->ForceValue(x);
486 }
487}
488
489////////////////////////////////////////////////////////////////////////////////
490/// calculate input values to each neuron
491
493{
494 TObjArray* curLayer;
495 TNeuron* neuron;
496 Int_t numLayers = fNetwork->GetEntriesFast();
497 Int_t numNeurons;
498
499 for (Int_t i = 0; i < numLayers; i++) {
500 curLayer = (TObjArray*)fNetwork->At(i);
501 numNeurons = curLayer->GetEntriesFast();
502
503 for (Int_t j = 0; j < numNeurons; j++) {
504 neuron = (TNeuron*) curLayer->At(j);
505 neuron->CalculateValue();
506 neuron->CalculateActivationValue();
507
508 }
509 }
510}
511
512////////////////////////////////////////////////////////////////////////////////
513/// print messages, turn off printing by setting verbose and debug flag appropriately
514
516{
517 if (Verbose() || Debug() || force) Log() << kINFO << message << Endl;
518}
519
520////////////////////////////////////////////////////////////////////////////////
521/// wait for keyboard input, for debugging
522
524{
525 std::string dummy;
526 Log() << kINFO << "***Type anything to continue (q to quit): ";
527 std::getline(std::cin, dummy);
528 if (dummy == "q" || dummy == "Q") {
529 PrintMessage( "quit" );
530 delete this;
531 exit(0);
532 }
533}
534
535////////////////////////////////////////////////////////////////////////////////
536/// print network representation, for debugging
537
539{
540 if (!Debug()) return;
541
542 Log() << kINFO << Endl;
543 PrintMessage( "Printing network " );
544 Log() << kINFO << "-------------------------------------------------------------------" << Endl;
545
546 TObjArray* curLayer;
547 Int_t numLayers = fNetwork->GetEntriesFast();
548
549 for (Int_t i = 0; i < numLayers; i++) {
550
551 curLayer = (TObjArray*)fNetwork->At(i);
552 Int_t numNeurons = curLayer->GetEntriesFast();
553
554 Log() << kINFO << "Layer #" << i << " (" << numNeurons << " neurons):" << Endl;
555 PrintLayer( curLayer );
556 }
557}
558
559////////////////////////////////////////////////////////////////////////////////
560/// print a single layer, for debugging
561
563{
564 Int_t numNeurons = layer->GetEntriesFast();
565 TNeuron* neuron;
566
567 for (Int_t j = 0; j < numNeurons; j++) {
568 neuron = (TNeuron*) layer->At(j);
569 Log() << kINFO << "\tNeuron #" << j << " (LinksIn: " << neuron->NumPreLinks()
570 << " , LinksOut: " << neuron->NumPostLinks() << ")" << Endl;
571 PrintNeuron( neuron );
572 }
573}
574
575////////////////////////////////////////////////////////////////////////////////
576/// print a neuron, for debugging
577
579{
580 Log() << kINFO
581 << "\t\tValue:\t" << neuron->GetValue()
582 << "\t\tActivation: " << neuron->GetActivationValue()
583 << "\t\tDelta: " << neuron->GetDelta() << Endl;
584 Log() << kINFO << "\t\tActivationEquation:\t";
585 neuron->PrintActivationEqn();
586 Log() << kINFO << "\t\tLinksIn:" << Endl;
587 neuron->PrintPreLinks();
588 Log() << kINFO << "\t\tLinksOut:" << Endl;
589 neuron->PrintPostLinks();
590}
591
592////////////////////////////////////////////////////////////////////////////////
593/// get the mva value generated by the NN
594
596{
597 TNeuron* neuron;
598
599 TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
600
601 const Event * ev = GetEvent();
602
603 for (UInt_t i = 0; i < GetNvar(); i++) {
604 neuron = (TNeuron*)inputLayer->At(i);
605 neuron->ForceValue( ev->GetValue(i) );
606 }
607 ForceNetworkCalculations();
608
609 // check the output of the network
610 TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
611 neuron = (TNeuron*)outputLayer->At(0);
612
613 // cannot determine error
614 NoErrorCalc(err, errUpper);
615
616 return neuron->GetActivationValue();
617}
618
619////////////////////////////////////////////////////////////////////////////////
620/// get the regression value generated by the NN
621
622const std::vector<Float_t> &TMVA::MethodANNBase::GetRegressionValues()
623{
624 TNeuron* neuron;
625
626 TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
627
628 const Event * ev = GetEvent();
629
630 for (UInt_t i = 0; i < GetNvar(); i++) {
631 neuron = (TNeuron*)inputLayer->At(i);
632 neuron->ForceValue( ev->GetValue(i) );
633 }
634 ForceNetworkCalculations();
635
636 // check the output of the network
637 TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
638
639 if (fRegressionReturnVal == NULL) fRegressionReturnVal = new std::vector<Float_t>();
640 fRegressionReturnVal->clear();
641
642 Event * evT = new Event(*ev);
643 UInt_t ntgts = outputLayer->GetEntriesFast();
644 for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
645 evT->SetTarget(itgt,((TNeuron*)outputLayer->At(itgt))->GetActivationValue());
646 }
647
648 const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
649 for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
650 fRegressionReturnVal->push_back( evT2->GetTarget(itgt) );
651 }
652
653 delete evT;
654
655 return *fRegressionReturnVal;
656}
657
658////////////////////////////////////////////////////////////////////////////////
659/// get the multiclass classification values generated by the NN
660
661const std::vector<Float_t> &TMVA::MethodANNBase::GetMulticlassValues()
662{
663 TNeuron* neuron;
664
665 TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
666
667 const Event * ev = GetEvent();
668
669 for (UInt_t i = 0; i < GetNvar(); i++) {
670 neuron = (TNeuron*)inputLayer->At(i);
671 neuron->ForceValue( ev->GetValue(i) );
672 }
673 ForceNetworkCalculations();
674
675 // check the output of the network
676
677 if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector<Float_t>();
678 fMulticlassReturnVal->clear();
679 std::vector<Float_t> temp;
680
681 UInt_t nClasses = DataInfo().GetNClasses();
682 for (UInt_t icls = 0; icls < nClasses; icls++) {
683 temp.push_back(GetOutputNeuron( icls )->GetActivationValue() );
684 }
685
686 for(UInt_t iClass=0; iClass<nClasses; iClass++){
687 Double_t norm = 0.0;
688 for(UInt_t j=0;j<nClasses;j++){
689 if(iClass!=j)
690 norm+=exp(temp[j]-temp[iClass]);
691 }
692 (*fMulticlassReturnVal).push_back(1.0/(1.0+norm));
693 }
694
695
696
697 return *fMulticlassReturnVal;
698}
699
700
701////////////////////////////////////////////////////////////////////////////////
702/// create XML description of ANN classifier
703
704void TMVA::MethodANNBase::AddWeightsXMLTo( void* parent ) const
705{
706 Int_t numLayers = fNetwork->GetEntriesFast();
707 void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
708 void* xmlLayout = gTools().xmlengine().NewChild(wght, 0, "Layout");
709 gTools().xmlengine().NewAttr(xmlLayout, 0, "NLayers", gTools().StringFromInt(fNetwork->GetEntriesFast()) );
710 TString weights = "";
711 for (Int_t i = 0; i < numLayers; i++) {
712 TObjArray* layer = (TObjArray*)fNetwork->At(i);
713 Int_t numNeurons = layer->GetEntriesFast();
714 void* layerxml = gTools().xmlengine().NewChild(xmlLayout, 0, "Layer");
715 gTools().xmlengine().NewAttr(layerxml, 0, "Index", gTools().StringFromInt(i) );
716 gTools().xmlengine().NewAttr(layerxml, 0, "NNeurons", gTools().StringFromInt(numNeurons) );
717 for (Int_t j = 0; j < numNeurons; j++) {
718 TNeuron* neuron = (TNeuron*)layer->At(j);
719 Int_t numSynapses = neuron->NumPostLinks();
720 void* neuronxml = gTools().AddChild(layerxml, "Neuron");
721 gTools().AddAttr(neuronxml, "NSynapses", gTools().StringFromInt(numSynapses) );
722 if(numSynapses==0) continue;
723 std::stringstream s("");
724 s.precision( 16 );
725 for (Int_t k = 0; k < numSynapses; k++) {
726 TSynapse* synapse = neuron->PostLinkAt(k);
727 s << std::scientific << synapse->GetWeight() << " ";
728 }
729 gTools().AddRawLine( neuronxml, s.str().c_str() );
730 }
731 }
732
733 // if inverse hessian exists, write inverse hessian to weight file
734 if( fInvHessian.GetNcols()>0 ){
735 void* xmlInvHessian = gTools().xmlengine().NewChild(wght, 0, "InverseHessian");
736
737 // get the matrix dimensions
738 Int_t nElements = fInvHessian.GetNoElements();
739 Int_t nRows = fInvHessian.GetNrows();
740 Int_t nCols = fInvHessian.GetNcols();
741 gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NElements", gTools().StringFromInt(nElements) );
742 gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NRows", gTools().StringFromInt(nRows) );
743 gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NCols", gTools().StringFromInt(nCols) );
744
745 // read in the matrix elements
746 Double_t* elements = new Double_t[nElements+10];
747 fInvHessian.GetMatrix2Array( elements );
748
749 // store the matrix elements row-wise
750 Int_t index = 0;
751 for( Int_t row = 0; row < nRows; ++row ){
752 void* xmlRow = gTools().xmlengine().NewChild(xmlInvHessian, 0, "Row");
753 gTools().xmlengine().NewAttr(xmlRow, 0, "Index", gTools().StringFromInt(row) );
754
755 // create the rows
756 std::stringstream s("");
757 s.precision( 16 );
758 for( Int_t col = 0; col < nCols; ++col ){
759 s << std::scientific << (*(elements+index)) << " ";
760 ++index;
761 }
762 gTools().xmlengine().AddRawLine( xmlRow, s.str().c_str() );
763 }
764 delete[] elements;
765 }
766}
767
768
769////////////////////////////////////////////////////////////////////////////////
770/// read MLP from xml weight file
771
773{
774 // build the layout first
775 Bool_t fromFile = kTRUE;
776 std::vector<Int_t>* layout = new std::vector<Int_t>();
777
778 void* xmlLayout = NULL;
779 xmlLayout = gTools().GetChild(wghtnode, "Layout");
780 if( !xmlLayout )
781 xmlLayout = wghtnode;
782
783 UInt_t nLayers;
784 gTools().ReadAttr( xmlLayout, "NLayers", nLayers );
785 layout->resize( nLayers );
786
787 void* ch = gTools().xmlengine().GetChild(xmlLayout);
788 UInt_t index;
789 UInt_t nNeurons;
790 while (ch) {
791 gTools().ReadAttr( ch, "Index", index );
792 gTools().ReadAttr( ch, "NNeurons", nNeurons );
793 layout->at(index) = nNeurons;
794 ch = gTools().GetNextChild(ch);
795 }
796
797 BuildNetwork( layout, NULL, fromFile );
798 // use 'slow' (exact) TanH if processing old weigh file to ensure 100% compatible results
799 // otherwise use the new default, the 'tast tanh' approximation
800 if (GetTrainingTMVAVersionCode() < TMVA_VERSION(4,2,1) && fActivation->GetExpression().Contains("tanh")){
801 TActivationTanh* act = dynamic_cast<TActivationTanh*>( fActivation );
802 if (act) act->SetSlow();
803 }
804
805 // fill the weights of the synapses
806 UInt_t nSyn;
807 Float_t weight;
808 ch = gTools().xmlengine().GetChild(xmlLayout);
809 UInt_t iLayer = 0;
810 while (ch) { // layers
811 TObjArray* layer = (TObjArray*)fNetwork->At(iLayer);
812 gTools().ReadAttr( ch, "Index", index );
813 gTools().ReadAttr( ch, "NNeurons", nNeurons );
814
815 void* nodeN = gTools().GetChild(ch);
816 UInt_t iNeuron = 0;
817 while( nodeN ){ // neurons
818 TNeuron *neuron = (TNeuron*)layer->At(iNeuron);
819 gTools().ReadAttr( nodeN, "NSynapses", nSyn );
820 if( nSyn > 0 ){
821 const char* content = gTools().GetContent(nodeN);
822 std::stringstream s(content);
823 for (UInt_t iSyn = 0; iSyn<nSyn; iSyn++) { // synapses
824
825 TSynapse* synapse = neuron->PostLinkAt(iSyn);
826 s >> weight;
827 //Log() << kWARNING << neuron << " " << weight << Endl;
828 synapse->SetWeight(weight);
829 }
830 }
831 nodeN = gTools().GetNextChild(nodeN);
832 iNeuron++;
833 }
834 ch = gTools().GetNextChild(ch);
835 iLayer++;
836 }
837
838 delete layout;
839
840 void* xmlInvHessian = NULL;
841 xmlInvHessian = gTools().GetChild(wghtnode, "InverseHessian");
842 if( !xmlInvHessian )
843 // no inverse hessian available
844 return;
845
846 fUseRegulator = kTRUE;
847
848 Int_t nElements = 0;
849 Int_t nRows = 0;
850 Int_t nCols = 0;
851 gTools().ReadAttr( xmlInvHessian, "NElements", nElements );
852 gTools().ReadAttr( xmlInvHessian, "NRows", nRows );
853 gTools().ReadAttr( xmlInvHessian, "NCols", nCols );
854
855 // adjust the matrix dimensions
856 fInvHessian.ResizeTo( nRows, nCols );
857
858 // prepare an array to read in the values
859 Double_t* elements;
860 if (nElements > std::numeric_limits<int>::max()-100){
861 Log() << kFATAL << "you tried to read a hessian matrix with " << nElements << " elements, --> too large, guess s.th. went wrong reading from the weight file" << Endl;
862 return;
863 } else {
864 elements = new Double_t[nElements+10];
865 }
866
867
868
869 void* xmlRow = gTools().xmlengine().GetChild(xmlInvHessian);
870 Int_t row = 0;
871 index = 0;
872 while (xmlRow) { // rows
873 gTools().ReadAttr( xmlRow, "Index", row );
874
875 const char* content = gTools().xmlengine().GetNodeContent(xmlRow);
876
877 std::stringstream s(content);
878 for (Int_t iCol = 0; iCol<nCols; iCol++) { // columns
879 s >> (*(elements+index));
880 ++index;
881 }
882 xmlRow = gTools().xmlengine().GetNext(xmlRow);
883 ++row;
884 }
885
886 fInvHessian.SetMatrixArray( elements );
887
888 delete[] elements;
889}
890
891////////////////////////////////////////////////////////////////////////////////
892/// destroy/clear the network then read it back in from the weights file
893
895{
896 // delete network so we can reconstruct network from scratch
897
898 TString dummy;
899
900 // synapse weights
901 Double_t weight;
902 std::vector<Double_t>* weights = new std::vector<Double_t>();
903 istr>> dummy;
904 while (istr>> dummy >> weight) weights->push_back(weight); // use w/ slower write-out
905
906 ForceWeights(weights);
907
908
909 delete weights;
910}
911
912////////////////////////////////////////////////////////////////////////////////
913/// compute ranking of input variables by summing function of weights
914
916{
917 // create the ranking object
918 fRanking = new Ranking( GetName(), "Importance" );
919
920 TNeuron* neuron;
921 TSynapse* synapse;
922 Double_t importance, avgVal;
923 TString varName;
924
925 for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) {
926
927 neuron = GetInputNeuron(ivar);
928 Int_t numSynapses = neuron->NumPostLinks();
929 importance = 0;
930 varName = GetInputVar(ivar); // fix this line
931
932 // figure out average value of variable i
933 Double_t meanS, meanB, rmsS, rmsB, xmin, xmax;
934 Statistics( TMVA::Types::kTraining, varName,
935 meanS, meanB, rmsS, rmsB, xmin, xmax );
936
937 avgVal = (TMath::Abs(meanS) + TMath::Abs(meanB))/2.0;
938 double meanrms = (TMath::Abs(rmsS) + TMath::Abs(rmsB))/2.;
939 if (avgVal<meanrms) avgVal = meanrms;
940 if (IsNormalised()) avgVal = 0.5*(1 + gTools().NormVariable( avgVal, GetXmin( ivar ), GetXmax( ivar )));
941
942 for (Int_t j = 0; j < numSynapses; j++) {
943 synapse = neuron->PostLinkAt(j);
944 importance += synapse->GetWeight() * synapse->GetWeight();
945 }
946
947 importance *= avgVal * avgVal;
948
949 fRanking->AddRank( Rank( varName, importance ) );
950 }
951
952 return fRanking;
953}
954
955////////////////////////////////////////////////////////////////////////////////
956
958 std::vector<TH1*>* hv ) const
959{
960 TH2F* hist;
961 Int_t numLayers = fNetwork->GetEntriesFast();
962
963 for (Int_t i = 0; i < numLayers-1; i++) {
964
965 TObjArray* layer1 = (TObjArray*)fNetwork->At(i);
966 TObjArray* layer2 = (TObjArray*)fNetwork->At(i+1);
967 Int_t numNeurons1 = layer1->GetEntriesFast();
968 Int_t numNeurons2 = layer2->GetEntriesFast();
969
970 TString name = Form("%s%i%i", bulkname.Data(), i, i+1);
971 hist = new TH2F(name + "", name + "",
972 numNeurons1, 0, numNeurons1, numNeurons2, 0, numNeurons2);
973
974 for (Int_t j = 0; j < numNeurons1; j++) {
975
976 TNeuron* neuron = (TNeuron*)layer1->At(j);
977 Int_t numSynapses = neuron->NumPostLinks();
978
979 for (Int_t k = 0; k < numSynapses; k++) {
980
981 TSynapse* synapse = neuron->PostLinkAt(k);
982 hist->SetBinContent(j+1, k+1, synapse->GetWeight());
983
984 }
985 }
986
987 if (hv) hv->push_back( hist );
988 else {
989 hist->Write();
990 delete hist;
991 }
992 }
993}
994
995////////////////////////////////////////////////////////////////////////////////
996/// write histograms to file
997
999{
1000 PrintMessage(Form("Write special histos to file: %s", BaseDir()->GetPath()), kTRUE);
1001
1002 if (fEstimatorHistTrain) fEstimatorHistTrain->Write();
1003 if (fEstimatorHistTest ) fEstimatorHistTest ->Write();
1004
1005 // histograms containing weights for architecture plotting (used in macro "network.cxx")
1006 CreateWeightMonitoringHists( "weights_hist" );
1007
1008 // now save all the epoch-wise monitoring information
1009 static std::atomic<int> epochMonitoringDirectoryNumber{0};
1010 int epochVal = epochMonitoringDirectoryNumber++;
1011 TDirectory* epochdir = NULL;
1012 if( epochVal == 0 )
1013 epochdir = BaseDir()->mkdir( "EpochMonitoring" );
1014 else
1015 epochdir = BaseDir()->mkdir( Form("EpochMonitoring_%4d",epochVal) );
1016
1017 epochdir->cd();
1018 for (std::vector<TH1*>::const_iterator it = fEpochMonHistS.begin(); it != fEpochMonHistS.end(); ++it) {
1019 (*it)->Write();
1020 delete (*it);
1021 }
1022 for (std::vector<TH1*>::const_iterator it = fEpochMonHistB.begin(); it != fEpochMonHistB.end(); ++it) {
1023 (*it)->Write();
1024 delete (*it);
1025 }
1026 for (std::vector<TH1*>::const_iterator it = fEpochMonHistW.begin(); it != fEpochMonHistW.end(); ++it) {
1027 (*it)->Write();
1028 delete (*it);
1029 }
1030 BaseDir()->cd();
1031}
1032
1033////////////////////////////////////////////////////////////////////////////////
1034/// write specific classifier response
1035
1036void TMVA::MethodANNBase::MakeClassSpecific( std::ostream& fout, const TString& className ) const
1037{
1038 Int_t numLayers = fNetwork->GetEntries();
1039
1040 fout << std::endl;
1041 fout << " double ActivationFnc(double x) const;" << std::endl;
1042 fout << " double OutputActivationFnc(double x) const;" << std::endl; //zjh
1043 fout << std::endl;
1044 int numNodesFrom = -1;
1045 for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1046 int numNodesTo = ((TObjArray*)fNetwork->At(lIdx))->GetEntries();
1047 if (numNodesFrom<0) { numNodesFrom=numNodesTo; continue; }
1048 fout << " double fWeightMatrix" << lIdx-1 << "to" << lIdx << "[" << numNodesTo << "][" << numNodesFrom << "];";
1049 fout << " // weight matrix from layer " << lIdx-1 << " to " << lIdx << std::endl;
1050 numNodesFrom = numNodesTo;
1051 }
1052 fout << std::endl;
1053 fout << "};" << std::endl;
1054
1055 fout << std::endl;
1056
1057 fout << "inline void " << className << "::Initialize()" << std::endl;
1058 fout << "{" << std::endl;
1059 fout << " // build network structure" << std::endl;
1060
1061 for (Int_t i = 0; i < numLayers-1; i++) {
1062 fout << " // weight matrix from layer " << i << " to " << i+1 << std::endl;
1063 TObjArray* layer = (TObjArray*)fNetwork->At(i);
1064 Int_t numNeurons = layer->GetEntriesFast();
1065 for (Int_t j = 0; j < numNeurons; j++) {
1066 TNeuron* neuron = (TNeuron*)layer->At(j);
1067 Int_t numSynapses = neuron->NumPostLinks();
1068 for (Int_t k = 0; k < numSynapses; k++) {
1069 TSynapse* synapse = neuron->PostLinkAt(k);
1070 fout << " fWeightMatrix" << i << "to" << i+1 << "[" << k << "][" << j << "] = " << synapse->GetWeight() << ";" << std::endl;
1071 }
1072 }
1073 }
1074
1075 fout << "}" << std::endl;
1076 fout << std::endl;
1077
1078 // writing of the GetMvaValue__ method
1079 fout << "inline double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
1080 fout << "{" << std::endl;
1081 fout << " if (inputValues.size() != (unsigned int)" << ((TObjArray *)fNetwork->At(0))->GetEntries() - 1 << ") {"
1082 << std::endl;
1083 fout << " std::cout << \"Input vector needs to be of size \" << "
1084 << ((TObjArray *)fNetwork->At(0))->GetEntries() - 1 << " << std::endl;" << std::endl;
1085 fout << " return 0;" << std::endl;
1086 fout << " }" << std::endl;
1087 fout << std::endl;
1088 for (Int_t lIdx = 1; lIdx < numLayers; lIdx++) {
1089 TObjArray *layer = (TObjArray *)fNetwork->At(lIdx);
1090 int numNodes = layer->GetEntries();
1091 fout << " std::array<double, " << numNodes << "> fWeights" << lIdx << " {{}};" << std::endl;
1092 }
1093 for (Int_t lIdx = 1; lIdx < numLayers - 1; lIdx++) {
1094 fout << " fWeights" << lIdx << ".back() = 1.;" << std::endl;
1095 }
1096 fout << std::endl;
1097 for (Int_t i = 0; i < numLayers - 1; i++) {
1098 fout << " // layer " << i << " to " << i + 1 << std::endl;
1099 if (i + 1 == numLayers - 1) {
1100 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() << "; o++) {" << std::endl;
1101 } else {
1102 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() - 1 << "; o++) {"
1103 << std::endl;
1104 }
1105 if (0 == i) {
1106 fout << " std::array<double, " << ((TObjArray *)fNetwork->At(i))->GetEntries()
1107 << "> buffer; // no need to initialise" << std::endl;
1108 fout << " for (int i = 0; i<" << ((TObjArray *)fNetwork->At(i))->GetEntries() << " - 1; i++) {"
1109 << std::endl;
1110 fout << " buffer[i] = fWeightMatrix" << i << "to" << i + 1 << "[o][i] * inputValues[i];" << std::endl;
1111 fout << " } // loop over i" << std::endl;
1112 fout << " buffer.back() = fWeightMatrix" << i << "to" << i + 1 << "[o]["
1113 << ((TObjArray *)fNetwork->At(i))->GetEntries() - 1 << "];" << std::endl;
1114 } else {
1115 fout << " std::array<double, " << ((TObjArray *)fNetwork->At(i))->GetEntries()
1116 << "> buffer; // no need to initialise" << std::endl;
1117 fout << " for (int i=0; i<" << ((TObjArray *)fNetwork->At(i))->GetEntries() << "; i++) {" << std::endl;
1118 fout << " buffer[i] = fWeightMatrix" << i << "to" << i + 1 << "[o][i] * fWeights" << i << "[i];"
1119 << std::endl;
1120 fout << " } // loop over i" << std::endl;
1121 }
1122 fout << " for (int i=0; i<" << ((TObjArray *)fNetwork->At(i))->GetEntries() << "; i++) {" << std::endl;
1123 if (fNeuronInputType == "sum") {
1124 fout << " fWeights" << i + 1 << "[o] += buffer[i];" << std::endl;
1125 } else if (fNeuronInputType == "sqsum") {
1126 fout << " fWeights" << i + 1 << "[o] += buffer[i]*buffer[i];" << std::endl;
1127 } else { // fNeuronInputType == TNeuronInputChooser::kAbsSum
1128 fout << " fWeights" << i + 1 << "[o] += fabs(buffer[i]);" << std::endl;
1129 }
1130 fout << " } // loop over i" << std::endl;
1131 fout << " } // loop over o" << std::endl;
1132 if (i + 1 == numLayers - 1) {
1133 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() << "; o++) {" << std::endl;
1134 } else {
1135 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() - 1 << "; o++) {"
1136 << std::endl;
1137 }
1138 if (i+1 != numLayers-1) // in the last layer no activation function is applied
1139 fout << " fWeights" << i + 1 << "[o] = ActivationFnc(fWeights" << i + 1 << "[o]);" << std::endl;
1140 else
1141 fout << " fWeights" << i + 1 << "[o] = OutputActivationFnc(fWeights" << i + 1 << "[o]);"
1142 << std::endl; // zjh
1143 fout << " } // loop over o" << std::endl;
1144 }
1145 fout << std::endl;
1146 fout << " return fWeights" << numLayers - 1 << "[0];" << std::endl;
1147 fout << "}" << std::endl;
1148
1149 fout << std::endl;
1150 TString fncName = className+"::ActivationFnc";
1151 fActivation->MakeFunction(fout, fncName);
1152 fncName = className+"::OutputActivationFnc"; //zjh
1153 fOutput->MakeFunction(fout, fncName);//zjh
1154
1155 fout << std::endl;
1156 fout << "// Clean up" << std::endl;
1157 fout << "inline void " << className << "::Clear()" << std::endl;
1158 fout << "{" << std::endl;
1159 fout << "}" << std::endl;
1160}
1161
1162////////////////////////////////////////////////////////////////////////////////
1163/// who the hell makes such strange Debug flags that even use "global pointers"..
1164
1166{
1167 return fgDEBUG;
1168}
unsigned int UInt_t
Definition RtypesCore.h:46
const Bool_t kFALSE
Definition RtypesCore.h:101
double Double_t
Definition RtypesCore.h:59
float Float_t
Definition RtypesCore.h:57
const Bool_t kTRUE
Definition RtypesCore.h:100
#define ClassImp(name)
Definition Rtypes.h:364
char name[80]
Definition TGX11.cxx:110
float xmin
float xmax
char * Form(const char *fmt,...)
void Debug(Int_t level, const char *va_(fmt),...)
#define TMVA_VERSION(a, b, c)
Definition Version.h:48
Describe directory structure in memory.
Definition TDirectory.h:45
virtual Bool_t cd()
Change current directory to "this" directory.
virtual TDirectory * mkdir(const char *name, const char *title="", Bool_t returnExistingDirectory=kFALSE)
Create a sub-directory "a" or a hierarchy of sub-directories "a/b/c/...".
2-D histogram with a float per channel (see TH1 documentation)}
Definition TH2.h:251
virtual void SetBinContent(Int_t bin, Double_t content)
Set bin content.
Definition TH2.cxx:2569
Class that contains all the data information.
Definition DataSetInfo.h:62
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
Definition Event.cxx:236
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition Event.cxx:367
Float_t GetTarget(UInt_t itgt) const
Definition Event.h:102
Base class for all TMVA methods using artificial neural networks.
std::vector< Int_t > * ParseLayoutString(TString layerSpec)
parse layout specification string and return a vector, each entry containing the number of neurons to...
virtual void ProcessOptions()
do nothing specific at this moment
virtual ~MethodANNBase()
destructor
void DeleteNetworkLayer(TObjArray *&layer)
delete a network layer
const Ranking * CreateRanking()
compute ranking of input variables by summing function of weights
void DeleteNetwork()
delete/clear network
void WaitForKeyboard()
wait for keyboard input, for debugging
MethodANNBase(const TString &jobName, Types::EMVA methodType, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
standard constructor Note: Right now it is an option to choose the neuron input function,...
void AddPreLinks(TNeuron *neuron, TObjArray *prevLayer)
add synapses connecting a neuron to its preceding layer
void CreateWeightMonitoringHists(const TString &bulkname, std::vector< TH1 * > *hv=0) const
void PrintNeuron(TNeuron *neuron) const
print a neuron, for debugging
void PrintMessage(TString message, Bool_t force=kFALSE) const
print messages, turn off printing by setting verbose and debug flag appropriately
void AddWeightsXMLTo(void *parent) const
create XML description of ANN classifier
void InitANNBase()
initialize ANNBase object
void PrintLayer(TObjArray *layer) const
print a single layer, for debugging
virtual void BuildNetwork(std::vector< Int_t > *layout, std::vector< Double_t > *weights=NULL, Bool_t fromFile=kFALSE)
build network given a layout (number of neurons in each layer) and optional weights array
void InitWeights()
initialize the synapse weights randomly
virtual void DeclareOptions()
define the options (their key words) that can be set in the option string here the options valid for ...
virtual void ReadWeightsFromStream(std::istream &istr)
destroy/clear the network then read it back in from the weights file
void BuildLayers(std::vector< Int_t > *layout, Bool_t from_file=false)
build the network layers
virtual void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
void ForceWeights(std::vector< Double_t > *weights)
force the synapse weights
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
void BuildLayer(Int_t numNeurons, TObjArray *curLayer, TObjArray *prevLayer, Int_t layerIndex, Int_t numLayers, Bool_t from_file=false)
build a single layer with neurons and synapses connecting this layer to the previous layer
void ForceNetworkCalculations()
calculate input values to each neuron
void ForceNetworkInputs(const Event *ev, Int_t ignoreIndex=-1)
force the input values of the input neurons force the value for each input neuron
virtual const std::vector< Float_t > & GetMulticlassValues()
get the multiclass classification values generated by the NN
void ReadWeightsFromXML(void *wghtnode)
read MLP from xml weight file
Bool_t Debug() const
who the hell makes such strange Debug flags that even use "global pointers"..
virtual void WriteMonitoringHistosToFile() const
write histograms to file
virtual const std::vector< Float_t > & GetRegressionValues()
get the regression value generated by the NN
virtual void PrintNetwork() const
print network representation, for debugging
Virtual base Class for all MVA method.
Definition MethodBase.h:111
Ranking for variables in method (implementation)
Definition Ranking.h:48
Class for easily choosing activation functions.
std::vector< TString > * GetAllActivationNames() const
returns the names of all know activation functions
TActivation * CreateActivation(EActivationType type) const
instantiate the correct activation object according to the type chosen (given as the enumeration type...
Tanh activation function for ANN.
Class for easily choosing neuron input functions.
TNeuronInput * CreateNeuronInput(ENeuronInputType type) const
std::vector< TString > * GetAllNeuronInputNames() const
Neuron class used by TMVA artificial neural network methods.
Definition TNeuron.h:49
Double_t GetActivationValue() const
Definition TNeuron.h:105
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
Definition TNeuron.cxx:84
TSynapse * PostLinkAt(Int_t index) const
Definition TNeuron.h:111
void SetActivationEqn(TActivation *activation)
set activation equation
Definition TNeuron.cxx:160
Double_t GetDelta() const
Definition TNeuron.h:106
void AddPostLink(TSynapse *post)
add synapse as a post-link to this neuron
Definition TNeuron.cxx:178
void SetInputCalculator(TNeuronInput *calculator)
set input calculator
Definition TNeuron.cxx:151
void SetInputNeuron()
Definition TNeuron.h:112
Int_t NumPreLinks() const
Definition TNeuron.h:108
void PrintActivationEqn()
print activation equation, for debugging
Definition TNeuron.cxx:327
void CalculateValue()
calculate neuron input
Definition TNeuron.cxx:93
void SetBiasNeuron()
Definition TNeuron.h:114
void CalculateActivationValue()
calculate neuron activation/output
Definition TNeuron.cxx:102
void SetOutputNeuron()
Definition TNeuron.h:113
void PrintPostLinks() const
Definition TNeuron.h:119
Int_t NumPostLinks() const
Definition TNeuron.h:109
void AddPreLink(TSynapse *pre)
add synapse as a pre-link to this neuron
Definition TNeuron.cxx:169
Double_t GetValue() const
Definition TNeuron.h:104
void DeletePreLinks()
delete all pre-links
Definition TNeuron.cxx:187
void PrintPreLinks() const
Definition TNeuron.h:118
Synapse class used by TMVA artificial neural network methods.
Definition TSynapse.h:42
void SetWeight(Double_t weight)
set synapse weight
Definition TSynapse.cxx:68
Double_t GetWeight()
Definition TSynapse.h:53
void SetPostNeuron(TNeuron *post)
Definition TSynapse.h:68
void SetPreNeuron(TNeuron *pre)
Definition TSynapse.h:65
Double_t NormVariable(Double_t x, Double_t xmin, Double_t xmax)
normalise to output range: [-1, 1]
Definition Tools.cxx:110
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition Tools.cxx:1162
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition Tools.cxx:1124
Bool_t AddRawLine(void *node, const char *raw)
XML helpers.
Definition Tools.cxx:1190
const char * GetContent(void *node)
XML helpers.
Definition Tools.cxx:1174
void * GetChild(void *parent, const char *childname=0)
get child node
Definition Tools.cxx:1150
TXMLEngine & xmlengine()
Definition Tools.h:262
void ReadAttr(void *node, const char *, T &value)
read attribute from xml
Definition Tools.h:329
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
add attribute to xml
Definition Tools.h:347
@ kTraining
Definition Types.h:143
An array of TObjects.
Definition TObjArray.h:31
Int_t GetEntriesFast() const
Definition TObjArray.h:58
void Add(TObject *obj)
Definition TObjArray.h:68
Int_t GetEntries() const
Return the number of objects in array (i.e.
TObject * At(Int_t idx) const
Definition TObjArray.h:164
virtual Int_t Write(const char *name=0, Int_t option=0, Int_t bufsize=0)
Write this object to the current directory.
Definition TObject.cxx:868
Random number generator class based on M.
Definition TRandom3.h:27
Basic string class.
Definition TString.h:136
Ssiz_t Length() const
Definition TString.h:410
Ssiz_t First(char c) const
Find first occurrence of a character c.
Definition TString.cxx:523
const char * Data() const
Definition TString.h:369
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition TString.h:615
TString & Remove(Ssiz_t pos)
Definition TString.h:673
Bool_t AddRawLine(XMLNodePointer_t parent, const char *line)
Add just line into xml file Line should has correct xml syntax that later it can be decoded by xml pa...
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=nullptr)
create new child element for parent node
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xmlnode
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
const char * GetNodeContent(XMLNodePointer_t xmlnode)
get contents (if any) of xmlnode
XMLNodePointer_t GetNext(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
return next to xmlnode node if realnode==kTRUE, any special nodes in between will be skipped
Double_t x[n]
Definition legend1.C:17
const Int_t n
Definition legend1.C:16
create variable transformations
Tools & gTools()
MsgLogger & Endl(MsgLogger &ml)
Definition MsgLogger.h:148
Short_t Abs(Short_t d)
Definition TMathBase.h:120