Logo ROOT   6.16/01
Reference Guide
TMVAMulticlass_MLP.class.C
Go to the documentation of this file.
1// Class: ReadMLP
2// Automatically generated by MethodBase::MakeClass
3//
4
5/* configuration options =====================================================
6
7#GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
8
9Method : MLP::MLP
10TMVA Release : 4.2.1 [262657]
11ROOT Release : 6.16/01 [397313]
12Creator : sftnight
13Date : Sun Dec 19 22:15:26 2021
14Host : Linux root-ubuntu-2004-3 5.4.0-73-generic #82-Ubuntu SMP Wed Apr 14 17:39:42 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux
15Dir : /home/sftnight/build/workspace/root-makedoc-v616/rootspi/rdoc/src/v6-16-00-patches/documentation/doxygen
16Training events: 4000
17Analysis type : [Classification]
18
19
20#OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
21
22# Set by User:
23NCycles: "1000" [Number of training cycles]
24HiddenLayers: "N+5,5" [Specification of hidden layer architecture]
25NeuronType: "tanh" [Neuron activation function type]
26EstimatorType: "MSE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
27V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
28H: "False" [Print method-specific help message]
29TestRate: "5" [Test for overtraining performed at each #th epochs]
30# Default:
31RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
32NeuronInputType: "sum" [Neuron input function type]
33VerbosityLevel: "Default" [Verbosity level]
34VarTransform: "None" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
35CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
36IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
37TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
38LearningRate: "2.000000e-02" [ANN learning rate parameter]
39DecayRate: "1.000000e-02" [Decay rate for learning parameter]
40EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
41Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
42SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
43SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
44SamplingTraining: "True" [The training sample is sampled]
45SamplingTesting: "False" [The testing sample is sampled]
46ResetStep: "50" [How often BFGS should reset history]
47Tau: "3.000000e+00" [LineSearch "size step"]
48BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
49BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
50ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
51ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
52UseRegulator: "False" [Use regulator to avoid over-training]
53UpdateLimit: "10000" [Maximum times of regulator update]
54CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
55WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
56##
57
58
59#VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
60
61NVar 4
62var1 var1 var1 var1 'F' [-3.65916013718,3.26447582245]
63var2 var2 var2 Variable 2 'F' [-3.68905711174,3.78774046898]
64var3 var3 var3 Variable 3 units 'F' [-4.57268333435,4.56402540207]
65var4 var4 var4 Variable 4 units 'F' [-4.84856987,5.04116535187]
66NSpec 0
67
68
69============================================================================ */
70
71#include <array>
72#include <vector>
73#include <cmath>
74#include <string>
75#include <iostream>
76
77#ifndef IClassifierReader__def
78#define IClassifierReader__def
79
80class IClassifierReader {
81
82 public:
83
84 // constructor
85 IClassifierReader() : fStatusIsClean( true ) {}
86 virtual ~IClassifierReader() {}
87
88 // return classifier response
89 virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
90
91 // returns classifier status
92 bool IsStatusClean() const { return fStatusIsClean; }
93
94 protected:
95
96 bool fStatusIsClean;
97};
98
99#endif
100
101class ReadMLP : public IClassifierReader {
102
103 public:
104
105 // constructor
106 ReadMLP( std::vector<std::string>& theInputVars )
107 : IClassifierReader(),
108 fClassName( "ReadMLP" ),
109 fNvars( 4 )
110 {
111 // the training input variables
112 const char* inputVars[] = { "var1", "var2", "var3", "var4" };
113
114 // sanity checks
115 if (theInputVars.size() <= 0) {
116 std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
117 fStatusIsClean = false;
118 }
119
120 if (theInputVars.size() != fNvars) {
121 std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
122 << theInputVars.size() << " != " << fNvars << std::endl;
123 fStatusIsClean = false;
124 }
125
126 // validate input variables
127 for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
128 if (theInputVars[ivar] != inputVars[ivar]) {
129 std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
130 << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
131 fStatusIsClean = false;
132 }
133 }
134
135 // initialize min and max vectors (for normalisation)
136 fVmin[0] = 0;
137 fVmax[0] = 0;
138 fVmin[1] = 0;
139 fVmax[1] = 0;
140 fVmin[2] = 0;
141 fVmax[2] = 0;
142 fVmin[3] = 0;
143 fVmax[3] = 0;
144
145 // initialize input variable types
146 fType[0] = 'F';
147 fType[1] = 'F';
148 fType[2] = 'F';
149 fType[3] = 'F';
150
151 // initialize constants
152 Initialize();
153
154 }
155
156 // destructor
157 virtual ~ReadMLP() {
158 Clear(); // method-specific
159 }
160
161 // the classifier response
162 // "inputValues" is a vector of input values in the same order as the
163 // variables given to the constructor
164 double GetMvaValue( const std::vector<double>& inputValues ) const override;
165
166 private:
167
168 // method-specific destructor
169 void Clear();
170
171 // common member variables
172 const char* fClassName;
173
174 const size_t fNvars;
175 size_t GetNvar() const { return fNvars; }
176 char GetType( int ivar ) const { return fType[ivar]; }
177
178 // normalisation of input variables
179 double fVmin[4];
180 double fVmax[4];
181 double NormVariable( double x, double xmin, double xmax ) const {
182 // normalise to output range: [-1, 1]
183 return 2*(x - xmin)/(xmax - xmin) - 1.0;
184 }
185
186 // type of input variable: 'F' or 'I'
187 char fType[4];
188
189 // initialize internal variables
190 void Initialize();
191 double GetMvaValue__( const std::vector<double>& inputValues ) const;
192
193 // private members (method specific)
194
195 double ActivationFnc(double x) const;
196 double OutputActivationFnc(double x) const;
197
198 double fWeightMatrix0to1[10][5]; // weight matrix from layer 0 to 1
199 double fWeightMatrix1to2[6][10]; // weight matrix from layer 1 to 2
200 double fWeightMatrix2to3[4][6]; // weight matrix from layer 2 to 3
201
202};
203
204inline void ReadMLP::Initialize()
205{
206 // build network structure
207 // weight matrix from layer 0 to 1
208 fWeightMatrix0to1[0][0] = -1.06651200319108;
209 fWeightMatrix0to1[1][0] = 2.78386988043118;
210 fWeightMatrix0to1[2][0] = 0.101393252285154;
211 fWeightMatrix0to1[3][0] = 0.206941798604133;
212 fWeightMatrix0to1[4][0] = -1.78286501698958;
213 fWeightMatrix0to1[5][0] = -0.860777618534828;
214 fWeightMatrix0to1[6][0] = -0.808727112289036;
215 fWeightMatrix0to1[7][0] = 2.47804262221377;
216 fWeightMatrix0to1[8][0] = -1.13551735547915;
217 fWeightMatrix0to1[0][1] = 3.71378925654866;
218 fWeightMatrix0to1[1][1] = 1.69096355947333;
219 fWeightMatrix0to1[2][1] = 0.237366503241686;
220 fWeightMatrix0to1[3][1] = -0.73602253713693;
221 fWeightMatrix0to1[4][1] = -1.4854832241276;
222 fWeightMatrix0to1[5][1] = -0.965663960944631;
223 fWeightMatrix0to1[6][1] = 1.27201233522517;
224 fWeightMatrix0to1[7][1] = -0.0651979297580546;
225 fWeightMatrix0to1[8][1] = 4.60539585604408;
226 fWeightMatrix0to1[0][2] = 0.142204729367398;
227 fWeightMatrix0to1[1][2] = 0.977147297405069;
228 fWeightMatrix0to1[2][2] = -1.22212905685236;
229 fWeightMatrix0to1[3][2] = -0.123352194790325;
230 fWeightMatrix0to1[4][2] = -2.02701841995523;
231 fWeightMatrix0to1[5][2] = -0.00825727752816287;
232 fWeightMatrix0to1[6][2] = -2.80282340746091;
233 fWeightMatrix0to1[7][2] = -0.0256599583030388;
234 fWeightMatrix0to1[8][2] = -0.202155755792556;
235 fWeightMatrix0to1[0][3] = -0.442817815050983;
236 fWeightMatrix0to1[1][3] = -4.4536829927998;
237 fWeightMatrix0to1[2][3] = 0.231276547713158;
238 fWeightMatrix0to1[3][3] = 2.18864663038851;
239 fWeightMatrix0to1[4][3] = 4.08552885807348;
240 fWeightMatrix0to1[5][3] = 1.70260974665176;
241 fWeightMatrix0to1[6][3] = 1.55486472026845;
242 fWeightMatrix0to1[7][3] = 0.365736245687583;
243 fWeightMatrix0to1[8][3] = 0.508488311462387;
244 fWeightMatrix0to1[0][4] = -6.88388268719374;
245 fWeightMatrix0to1[1][4] = 1.09728226906791;
246 fWeightMatrix0to1[2][4] = -1.03660907192257;
247 fWeightMatrix0to1[3][4] = 0.628591791670253;
248 fWeightMatrix0to1[4][4] = 1.6733138429079;
249 fWeightMatrix0to1[5][4] = 2.88267349959512;
250 fWeightMatrix0to1[6][4] = 3.78423644920161;
251 fWeightMatrix0to1[7][4] = -1.88203499450257;
252 fWeightMatrix0to1[8][4] = -5.36735871461378;
253 // weight matrix from layer 1 to 2
254 fWeightMatrix1to2[0][0] = -0.357235796862591;
255 fWeightMatrix1to2[1][0] = 0.04505062489752;
256 fWeightMatrix1to2[2][0] = 0.340588167356101;
257 fWeightMatrix1to2[3][0] = 3.9929498015394;
258 fWeightMatrix1to2[4][0] = 0.145228923116718;
259 fWeightMatrix1to2[0][1] = 2.79393964714356;
260 fWeightMatrix1to2[1][1] = 0.215732000789057;
261 fWeightMatrix1to2[2][1] = 0.23033176153333;
262 fWeightMatrix1to2[3][1] = 0.185230972693792;
263 fWeightMatrix1to2[4][1] = -2.09502080886405;
264 fWeightMatrix1to2[0][2] = -0.0189434307875217;
265 fWeightMatrix1to2[1][2] = -1.03322674362896;
266 fWeightMatrix1to2[2][2] = -1.07708655007349;
267 fWeightMatrix1to2[3][2] = 0.0851539991212902;
268 fWeightMatrix1to2[4][2] = 0.149434494803098;
269 fWeightMatrix1to2[0][3] = -0.271010033517566;
270 fWeightMatrix1to2[1][3] = -1.55390366610449;
271 fWeightMatrix1to2[2][3] = -0.0669792697717976;
272 fWeightMatrix1to2[3][3] = -0.282630749551357;
273 fWeightMatrix1to2[4][3] = 0.343249727100533;
274 fWeightMatrix1to2[0][4] = 2.70532126867034;
275 fWeightMatrix1to2[1][4] = 0.272681859582698;
276 fWeightMatrix1to2[2][4] = 0.040338893188297;
277 fWeightMatrix1to2[3][4] = 0.529662776529901;
278 fWeightMatrix1to2[4][4] = 0.584154232222749;
279 fWeightMatrix1to2[0][5] = 0.669218600470071;
280 fWeightMatrix1to2[1][5] = -1.2688687650503;
281 fWeightMatrix1to2[2][5] = -0.803695836169736;
282 fWeightMatrix1to2[3][5] = 2.21566550678552;
283 fWeightMatrix1to2[4][5] = 1.35717620272584;
284 fWeightMatrix1to2[0][6] = 0.209308698854902;
285 fWeightMatrix1to2[1][6] = -0.731317432299243;
286 fWeightMatrix1to2[2][6] = -0.506532607488316;
287 fWeightMatrix1to2[3][6] = 1.70474804097025;
288 fWeightMatrix1to2[4][6] = 2.0272958402778;
289 fWeightMatrix1to2[0][7] = 0.134850207060653;
290 fWeightMatrix1to2[1][7] = 0.807221505342792;
291 fWeightMatrix1to2[2][7] = 0.541274616894995;
292 fWeightMatrix1to2[3][7] = -3.11772539853194;
293 fWeightMatrix1to2[4][7] = -0.106183680509714;
294 fWeightMatrix1to2[0][8] = 0.0189265853772227;
295 fWeightMatrix1to2[1][8] = -0.167053790176189;
296 fWeightMatrix1to2[2][8] = 0.217742419049963;
297 fWeightMatrix1to2[3][8] = -3.75105316235857;
298 fWeightMatrix1to2[4][8] = -0.0393298810581988;
299 fWeightMatrix1to2[0][9] = -1.45552886865421;
300 fWeightMatrix1to2[1][9] = -0.29335486135367;
301 fWeightMatrix1to2[2][9] = 1.11555186966183;
302 fWeightMatrix1to2[3][9] = -2.40365415835108;
303 fWeightMatrix1to2[4][9] = -1.70261245590261;
304 // weight matrix from layer 2 to 3
305 fWeightMatrix2to3[0][0] = -1.4794134225943;
306 fWeightMatrix2to3[1][0] = 1.11172320132498;
307 fWeightMatrix2to3[2][0] = -1.05853789432097;
308 fWeightMatrix2to3[3][0] = -0.811113655311863;
309 fWeightMatrix2to3[0][1] = -1.57415343464894;
310 fWeightMatrix2to3[1][1] = -2.00617289268192;
311 fWeightMatrix2to3[2][1] = -1.73396745628971;
312 fWeightMatrix2to3[3][1] = 2.54079329268742;
313 fWeightMatrix2to3[0][2] = 1.26121184490424;
314 fWeightMatrix2to3[1][2] = 0.520765319749485;
315 fWeightMatrix2to3[2][2] = 2.33123364578584;
316 fWeightMatrix2to3[3][2] = -2.89527287902075;
317 fWeightMatrix2to3[0][3] = 0.66272014779784;
318 fWeightMatrix2to3[1][3] = 0.564421555704318;
319 fWeightMatrix2to3[2][3] = 1.16864581662824;
320 fWeightMatrix2to3[3][3] = -4.68276776259688;
321 fWeightMatrix2to3[0][4] = 3.13744173280683;
322 fWeightMatrix2to3[1][4] = -0.428900582056675;
323 fWeightMatrix2to3[2][4] = -2.79045078130764;
324 fWeightMatrix2to3[3][4] = -0.315304266179115;
325 fWeightMatrix2to3[0][5] = 0.0539164770048504;
326 fWeightMatrix2to3[1][5] = -0.194351274022359;
327 fWeightMatrix2to3[2][5] = -0.470818380105623;
328 fWeightMatrix2to3[3][5] = 2.63756733325277;
329}
330
331inline double ReadMLP::GetMvaValue__( const std::vector<double>& inputValues ) const
332{
333 if (inputValues.size() != (unsigned int)4) {
334 std::cout << "Input vector needs to be of size " << 4 << std::endl;
335 return 0;
336 }
337
338 std::array<double, 10> fWeights1 {{}};
339 std::array<double, 6> fWeights2 {{}};
340 std::array<double, 4> fWeights3 {{}};
341 fWeights1.back() = 1.;
342 fWeights2.back() = 1.;
343
344 // layer 0 to 1
345 for (int o=0; o<9; o++) {
346 std::array<double, 5> buffer; // no need to initialise
347 for (int i = 0; i<5 - 1; i++) {
348 buffer[i] = fWeightMatrix0to1[o][i] * inputValues[i];
349 } // loop over i
350 buffer.back() = fWeightMatrix0to1[o][4]; for (int i=0; i<5; i++) {
351 fWeights1[o] += buffer[i];
352 } // loop over i
353 } // loop over o
354 for (int o=0; o<9; o++) {
355 fWeights1[o] = ActivationFnc(fWeights1[o]);
356 } // loop over o
357 // layer 1 to 2
358 for (int o=0; o<5; o++) {
359 std::array<double, 10> buffer; // no need to initialise
360 for (int i=0; i<10; i++) {
361 buffer[i] = fWeightMatrix1to2[o][i] * fWeights1[i];
362 } // loop over i
363 for (int i=0; i<10; i++) {
364 fWeights2[o] += buffer[i];
365 } // loop over i
366 } // loop over o
367 for (int o=0; o<5; o++) {
368 fWeights2[o] = ActivationFnc(fWeights2[o]);
369 } // loop over o
370 // layer 2 to 3
371 for (int o=0; o<4; o++) {
372 std::array<double, 6> buffer; // no need to initialise
373 for (int i=0; i<6; i++) {
374 buffer[i] = fWeightMatrix2to3[o][i] * fWeights2[i];
375 } // loop over i
376 for (int i=0; i<6; i++) {
377 fWeights3[o] += buffer[i];
378 } // loop over i
379 } // loop over o
380 for (int o=0; o<4; o++) {
381 fWeights3[o] = OutputActivationFnc(fWeights3[o]);
382 } // loop over o
383
384 return fWeights3[0];
385}
386
387double ReadMLP::ActivationFnc(double x) const {
388 // hyperbolic tan
389 return tanh(x);
390}
391double ReadMLP::OutputActivationFnc(double x) const {
392 // identity
393 return x;
394}
395
396// Clean up
397inline void ReadMLP::Clear()
398{
399}
400 inline double ReadMLP::GetMvaValue( const std::vector<double>& inputValues ) const
401 {
402 // classifier response value
403 double retval = 0;
404
405 // classifier response, sanity check first
406 if (!IsStatusClean()) {
407 std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
408 << " because status is dirty" << std::endl;
409 retval = 0;
410 }
411 else {
412 retval = GetMvaValue__( inputValues );
413 }
414
415 return retval;
416 }
PyObject * fType
float xmin
Definition: THbookFile.cxx:93
float xmax
Definition: THbookFile.cxx:93
double tanh(double)
Double_t x[n]
Definition: legend1.C:17
Type GetType(const std::string &Name)
Definition: Systematics.cxx:34
void Initialize(Bool_t useTMVAStyle=kTRUE)
Definition: tmvaglob.cxx:176