Logo ROOT   6.16/01
Reference Guide
TMVAClassification_MLPBNN.class.C
Go to the documentation of this file.
1// Class: ReadMLPBNN
2// Automatically generated by MethodBase::MakeClass
3//
4
5/* configuration options =====================================================
6
7#GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
8
9Method : MLP::MLPBNN
10TMVA Release : 4.2.1 [262657]
11ROOT Release : 6.16/01 [397313]
12Creator : sftnight
13Date : Sun Dec 19 22:13:35 2021
14Host : Linux root-ubuntu-2004-3 5.4.0-73-generic #82-Ubuntu SMP Wed Apr 14 17:39:42 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux
15Dir : /home/sftnight/build/workspace/root-makedoc-v616/rootspi/rdoc/src/v6-16-00-patches/documentation/doxygen
16Training events: 2000
17Analysis type : [Classification]
18
19
20#OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
21
22# Set by User:
23NCycles: "60" [Number of training cycles]
24HiddenLayers: "N+5" [Specification of hidden layer architecture]
25NeuronType: "tanh" [Neuron activation function type]
26V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
27VarTransform: "N" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
28H: "True" [Print method-specific help message]
29TrainingMethod: "BFGS" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
30TestRate: "5" [Test for overtraining performed at each #th epochs]
31UseRegulator: "True" [Use regulator to avoid over-training]
32# Default:
33RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
34EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
35NeuronInputType: "sum" [Neuron input function type]
36VerbosityLevel: "Default" [Verbosity level]
37CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
38IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
39LearningRate: "2.000000e-02" [ANN learning rate parameter]
40DecayRate: "1.000000e-02" [Decay rate for learning parameter]
41EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
42Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
43SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
44SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
45SamplingTraining: "True" [The training sample is sampled]
46SamplingTesting: "False" [The testing sample is sampled]
47ResetStep: "50" [How often BFGS should reset history]
48Tau: "3.000000e+00" [LineSearch "size step"]
49BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
50BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
51ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
52ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
53UpdateLimit: "10000" [Maximum times of regulator update]
54CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
55WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
56##
57
58
59#VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
60
61NVar 4
62var1+var2 myvar1 myvar1 myvar1 'F' [-9.33803939819,7.69307804108]
63var1-var2 myvar2 myvar2 Expression 2 'F' [-3.25508260727,4.02912044525]
64var3 var3 var3 Variable 3 units 'F' [-5.2777428627,4.64297914505]
65var4 var4 var4 Variable 4 units 'F' [-5.6007027626,4.67435789108]
66NSpec 2
67var1*2 spec1 spec1 Spectator 1 units 'F' [-9.91655540466,8.7030172348]
68var1*3 spec2 spec2 Spectator 2 units 'F' [-14.874833107,13.0545253754]
69
70
71============================================================================ */
72
73#include <array>
74#include <vector>
75#include <cmath>
76#include <string>
77#include <iostream>
78
79#ifndef IClassifierReader__def
80#define IClassifierReader__def
81
82class IClassifierReader {
83
84 public:
85
86 // constructor
87 IClassifierReader() : fStatusIsClean( true ) {}
88 virtual ~IClassifierReader() {}
89
90 // return classifier response
91 virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
92
93 // returns classifier status
94 bool IsStatusClean() const { return fStatusIsClean; }
95
96 protected:
97
98 bool fStatusIsClean;
99};
100
101#endif
102
103class ReadMLPBNN : public IClassifierReader {
104
105 public:
106
107 // constructor
108 ReadMLPBNN( std::vector<std::string>& theInputVars )
109 : IClassifierReader(),
110 fClassName( "ReadMLPBNN" ),
111 fNvars( 4 )
112 {
113 // the training input variables
114 const char* inputVars[] = { "var1+var2", "var1-var2", "var3", "var4" };
115
116 // sanity checks
117 if (theInputVars.size() <= 0) {
118 std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
119 fStatusIsClean = false;
120 }
121
122 if (theInputVars.size() != fNvars) {
123 std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
124 << theInputVars.size() << " != " << fNvars << std::endl;
125 fStatusIsClean = false;
126 }
127
128 // validate input variables
129 for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
130 if (theInputVars[ivar] != inputVars[ivar]) {
131 std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
132 << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
133 fStatusIsClean = false;
134 }
135 }
136
137 // initialize min and max vectors (for normalisation)
138 fVmin[0] = -1;
139 fVmax[0] = 1;
140 fVmin[1] = -1;
141 fVmax[1] = 1;
142 fVmin[2] = -1;
143 fVmax[2] = 1;
144 fVmin[3] = -1;
145 fVmax[3] = 0.99999988079071;
146
147 // initialize input variable types
148 fType[0] = 'F';
149 fType[1] = 'F';
150 fType[2] = 'F';
151 fType[3] = 'F';
152
153 // initialize constants
154 Initialize();
155
156 // initialize transformation
157 InitTransform();
158 }
159
160 // destructor
161 virtual ~ReadMLPBNN() {
162 Clear(); // method-specific
163 }
164
165 // the classifier response
166 // "inputValues" is a vector of input values in the same order as the
167 // variables given to the constructor
168 double GetMvaValue( const std::vector<double>& inputValues ) const override;
169
170 private:
171
172 // method-specific destructor
173 void Clear();
174
175 // input variable transformation
176
177 double fOff_1[3][4];
178 double fScal_1[3][4];
179 void InitTransform_1();
180 void Transform_1( std::vector<double> & iv, int sigOrBgd ) const;
181 void InitTransform();
182 void Transform( std::vector<double> & iv, int sigOrBgd ) const;
183
184 // common member variables
185 const char* fClassName;
186
187 const size_t fNvars;
188 size_t GetNvar() const { return fNvars; }
189 char GetType( int ivar ) const { return fType[ivar]; }
190
191 // normalisation of input variables
192 double fVmin[4];
193 double fVmax[4];
194 double NormVariable( double x, double xmin, double xmax ) const {
195 // normalise to output range: [-1, 1]
196 return 2*(x - xmin)/(xmax - xmin) - 1.0;
197 }
198
199 // type of input variable: 'F' or 'I'
200 char fType[4];
201
202 // initialize internal variables
203 void Initialize();
204 double GetMvaValue__( const std::vector<double>& inputValues ) const;
205
206 // private members (method specific)
207
208 double ActivationFnc(double x) const;
209 double OutputActivationFnc(double x) const;
210
211 double fWeightMatrix0to1[10][5]; // weight matrix from layer 0 to 1
212 double fWeightMatrix1to2[1][10]; // weight matrix from layer 1 to 2
213
214};
215
216inline void ReadMLPBNN::Initialize()
217{
218 // build network structure
219 // weight matrix from layer 0 to 1
220 fWeightMatrix0to1[0][0] = 0.499339155868432;
221 fWeightMatrix0to1[1][0] = 1.97323370224732;
222 fWeightMatrix0to1[2][0] = 0.835850497820873;
223 fWeightMatrix0to1[3][0] = 0.370715139038071;
224 fWeightMatrix0to1[4][0] = -3.3244780544242;
225 fWeightMatrix0to1[5][0] = -1.00515616054487;
226 fWeightMatrix0to1[6][0] = -0.903517469691334;
227 fWeightMatrix0to1[7][0] = 1.68095300796643;
228 fWeightMatrix0to1[8][0] = -1.8086188219696;
229 fWeightMatrix0to1[0][1] = -0.469787533689036;
230 fWeightMatrix0to1[1][1] = -1.47308437821555;
231 fWeightMatrix0to1[2][1] = -0.548985376762871;
232 fWeightMatrix0to1[3][1] = -1.08352747767343;
233 fWeightMatrix0to1[4][1] = -0.706728846639731;
234 fWeightMatrix0to1[5][1] = -0.973391622880094;
235 fWeightMatrix0to1[6][1] = 0.659572707267351;
236 fWeightMatrix0to1[7][1] = -0.31695950236766;
237 fWeightMatrix0to1[8][1] = 2.4093761098387;
238 fWeightMatrix0to1[0][2] = 0.355913509480398;
239 fWeightMatrix0to1[1][2] = 1.40361366583309;
240 fWeightMatrix0to1[2][2] = -0.210743959789661;
241 fWeightMatrix0to1[3][2] = -0.978127205874705;
242 fWeightMatrix0to1[4][2] = -1.01949963347685;
243 fWeightMatrix0to1[5][2] = 0.897826904860834;
244 fWeightMatrix0to1[6][2] = -1.25600410498056;
245 fWeightMatrix0to1[7][2] = -0.743241454883446;
246 fWeightMatrix0to1[8][2] = 1.48931411037645;
247 fWeightMatrix0to1[0][3] = -2.17450079985913;
248 fWeightMatrix0to1[1][3] = -1.60207885420351;
249 fWeightMatrix0to1[2][3] = 0.552988361084977;
250 fWeightMatrix0to1[3][3] = 2.28075263985257;
251 fWeightMatrix0to1[4][3] = 4.00452196921293;
252 fWeightMatrix0to1[5][3] = 1.33661120213445;
253 fWeightMatrix0to1[6][3] = 0.0806551453627355;
254 fWeightMatrix0to1[7][3] = -0.523552944740624;
255 fWeightMatrix0to1[8][3] = 0.925130649598083;
256 fWeightMatrix0to1[0][4] = -0.565154618007088;
257 fWeightMatrix0to1[1][4] = 2.36601716651037;
258 fWeightMatrix0to1[2][4] = -1.05353132940242;
259 fWeightMatrix0to1[3][4] = -0.163502849122647;
260 fWeightMatrix0to1[4][4] = -0.194079160790969;
261 fWeightMatrix0to1[5][4] = 0.98449705780042;
262 fWeightMatrix0to1[6][4] = 1.9250180795564;
263 fWeightMatrix0to1[7][4] = 1.61703868436564;
264 fWeightMatrix0to1[8][4] = -0.686657299656178;
265 // weight matrix from layer 1 to 2
266 fWeightMatrix1to2[0][0] = -2.23742003371891;
267 fWeightMatrix1to2[0][1] = 0.277464222426375;
268 fWeightMatrix1to2[0][2] = -0.203363715034144;
269 fWeightMatrix1to2[0][3] = 1.57205142891781;
270 fWeightMatrix1to2[0][4] = 5.94689352294752;
271 fWeightMatrix1to2[0][5] = 1.15566920637399;
272 fWeightMatrix1to2[0][6] = 1.26453893993009;
273 fWeightMatrix1to2[0][7] = -1.34996651290595;
274 fWeightMatrix1to2[0][8] = 1.53068569815131;
275 fWeightMatrix1to2[0][9] = -1.7128873155142;
276}
277
278inline double ReadMLPBNN::GetMvaValue__( const std::vector<double>& inputValues ) const
279{
280 if (inputValues.size() != (unsigned int)4) {
281 std::cout << "Input vector needs to be of size " << 4 << std::endl;
282 return 0;
283 }
284
285 std::array<double, 10> fWeights1 {{}};
286 std::array<double, 1> fWeights2 {{}};
287 fWeights1.back() = 1.;
288
289 // layer 0 to 1
290 for (int o=0; o<9; o++) {
291 std::array<double, 5> buffer; // no need to initialise
292 for (int i = 0; i<5 - 1; i++) {
293 buffer[i] = fWeightMatrix0to1[o][i] * inputValues[i];
294 } // loop over i
295 buffer.back() = fWeightMatrix0to1[o][4]; for (int i=0; i<5; i++) {
296 fWeights1[o] += buffer[i];
297 } // loop over i
298 } // loop over o
299 for (int o=0; o<9; o++) {
300 fWeights1[o] = ActivationFnc(fWeights1[o]);
301 } // loop over o
302 // layer 1 to 2
303 for (int o=0; o<1; o++) {
304 std::array<double, 10> buffer; // no need to initialise
305 for (int i=0; i<10; i++) {
306 buffer[i] = fWeightMatrix1to2[o][i] * fWeights1[i];
307 } // loop over i
308 for (int i=0; i<10; i++) {
309 fWeights2[o] += buffer[i];
310 } // loop over i
311 } // loop over o
312 for (int o=0; o<1; o++) {
313 fWeights2[o] = OutputActivationFnc(fWeights2[o]);
314 } // loop over o
315
316 return fWeights2[0];
317}
318
319double ReadMLPBNN::ActivationFnc(double x) const {
320 // hyperbolic tan
321 return tanh(x);
322}
323double ReadMLPBNN::OutputActivationFnc(double x) const {
324 // sigmoid
325 return 1.0/(1.0+exp(-x));
326}
327
328// Clean up
329inline void ReadMLPBNN::Clear()
330{
331}
332 inline double ReadMLPBNN::GetMvaValue( const std::vector<double>& inputValues ) const
333 {
334 // classifier response value
335 double retval = 0;
336
337 // classifier response, sanity check first
338 if (!IsStatusClean()) {
339 std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
340 << " because status is dirty" << std::endl;
341 retval = 0;
342 }
343 else {
344 std::vector<double> iV(inputValues);
345 Transform( iV, -1 );
346 retval = GetMvaValue__( iV );
347 }
348
349 return retval;
350 }
351
352//_______________________________________________________________________
353inline void ReadMLPBNN::InitTransform_1()
354{
355 double fMin_1[3][4];
356 double fMax_1[3][4];
357 // Normalization transformation, initialisation
358 fMin_1[0][0] = -4.33593845367;
359 fMax_1[0][0] = 6.3994679451;
360 fScal_1[0][0] = 2.0/(fMax_1[0][0]-fMin_1[0][0]);
361 fOff_1[0][0] = fMin_1[0][0]*fScal_1[0][0]+1.;
362 fMin_1[1][0] = -9.33803939819;
363 fMax_1[1][0] = 7.69307804108;
364 fScal_1[1][0] = 2.0/(fMax_1[1][0]-fMin_1[1][0]);
365 fOff_1[1][0] = fMin_1[1][0]*fScal_1[1][0]+1.;
366 fMin_1[2][0] = -9.33803939819;
367 fMax_1[2][0] = 7.69307804108;
368 fScal_1[2][0] = 2.0/(fMax_1[2][0]-fMin_1[2][0]);
369 fOff_1[2][0] = fMin_1[2][0]*fScal_1[2][0]+1.;
370 fMin_1[0][1] = -3.20988440514;
371 fMax_1[0][1] = 4.02912044525;
372 fScal_1[0][1] = 2.0/(fMax_1[0][1]-fMin_1[0][1]);
373 fOff_1[0][1] = fMin_1[0][1]*fScal_1[0][1]+1.;
374 fMin_1[1][1] = -3.25508260727;
375 fMax_1[1][1] = 3.36500358582;
376 fScal_1[1][1] = 2.0/(fMax_1[1][1]-fMin_1[1][1]);
377 fOff_1[1][1] = fMin_1[1][1]*fScal_1[1][1]+1.;
378 fMin_1[2][1] = -3.25508260727;
379 fMax_1[2][1] = 4.02912044525;
380 fScal_1[2][1] = 2.0/(fMax_1[2][1]-fMin_1[2][1]);
381 fOff_1[2][1] = fMin_1[2][1]*fScal_1[2][1]+1.;
382 fMin_1[0][2] = -2.60635733604;
383 fMax_1[0][2] = 3.86989831924;
384 fScal_1[0][2] = 2.0/(fMax_1[0][2]-fMin_1[0][2]);
385 fOff_1[0][2] = fMin_1[0][2]*fScal_1[0][2]+1.;
386 fMin_1[1][2] = -5.2777428627;
387 fMax_1[1][2] = 4.64297914505;
388 fScal_1[1][2] = 2.0/(fMax_1[1][2]-fMin_1[1][2]);
389 fOff_1[1][2] = fMin_1[1][2]*fScal_1[1][2]+1.;
390 fMin_1[2][2] = -5.2777428627;
391 fMax_1[2][2] = 4.64297914505;
392 fScal_1[2][2] = 2.0/(fMax_1[2][2]-fMin_1[2][2]);
393 fOff_1[2][2] = fMin_1[2][2]*fScal_1[2][2]+1.;
394 fMin_1[0][3] = -2.1695792675;
395 fMax_1[0][3] = 4.5351858139;
396 fScal_1[0][3] = 2.0/(fMax_1[0][3]-fMin_1[0][3]);
397 fOff_1[0][3] = fMin_1[0][3]*fScal_1[0][3]+1.;
398 fMin_1[1][3] = -5.6007027626;
399 fMax_1[1][3] = 4.67435789108;
400 fScal_1[1][3] = 2.0/(fMax_1[1][3]-fMin_1[1][3]);
401 fOff_1[1][3] = fMin_1[1][3]*fScal_1[1][3]+1.;
402 fMin_1[2][3] = -5.6007027626;
403 fMax_1[2][3] = 4.67435789108;
404 fScal_1[2][3] = 2.0/(fMax_1[2][3]-fMin_1[2][3]);
405 fOff_1[2][3] = fMin_1[2][3]*fScal_1[2][3]+1.;
406}
407
408//_______________________________________________________________________
409inline void ReadMLPBNN::Transform_1( std::vector<double>& iv, int cls) const
410{
411 // Normalization transformation
412 if (cls < 0 || cls > 2) {
413 if (2 > 1 ) cls = 2;
414 else cls = 2;
415 }
416 const int nVar = 4;
417
418 // get indices of used variables
419
420 // define the indices of the variables which are transformed by this transformation
421 static std::vector<int> indicesGet;
422 static std::vector<int> indicesPut;
423
424 if ( indicesGet.empty() ) {
425 indicesGet.reserve(fNvars);
426 indicesGet.push_back( 0);
427 indicesGet.push_back( 1);
428 indicesGet.push_back( 2);
429 indicesGet.push_back( 3);
430 }
431 if ( indicesPut.empty() ) {
432 indicesPut.reserve(fNvars);
433 indicesPut.push_back( 0);
434 indicesPut.push_back( 1);
435 indicesPut.push_back( 2);
436 indicesPut.push_back( 3);
437 }
438
439 static std::vector<double> dv;
440 dv.resize(nVar);
441 for (int ivar=0; ivar<nVar; ivar++) dv[ivar] = iv[indicesGet.at(ivar)];
442 for (int ivar=0;ivar<4;ivar++) {
443 double offset = fOff_1[cls][ivar];
444 double scale = fScal_1[cls][ivar];
445 iv[indicesPut.at(ivar)] = scale*dv[ivar]-offset;
446 }
447}
448
449//_______________________________________________________________________
450inline void ReadMLPBNN::InitTransform()
451{
452 InitTransform_1();
453}
454
455//_______________________________________________________________________
456inline void ReadMLPBNN::Transform( std::vector<double>& iv, int sigOrBgd ) const
457{
458 Transform_1( iv, sigOrBgd );
459}
PyObject * fType
float xmin
Definition: THbookFile.cxx:93
float xmax
Definition: THbookFile.cxx:93
double tanh(double)
double exp(double)
Double_t x[n]
Definition: legend1.C:17
Type GetType(const std::string &Name)
Definition: Systematics.cxx:34
void Initialize(Bool_t useTMVAStyle=kTRUE)
Definition: tmvaglob.cxx:176