Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
MethodCFMlpANN_Utils.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : TMVA::MethodCFMlpANN_utils *
8 * *
9 * *
10 * Reference for the original FORTRAN version "mlpl3.F": *
11 * Authors : J. Proriol and contributions from ALEPH-Clermont-Ferrand *
12 * Team members *
13 * Copyright: Laboratoire Physique Corpusculaire *
14 * Universite de Blaise Pascal, IN2P3/CNRS *
15 * *
16 * Modifications by present authors: *
17 * use dynamical data tables (not for all of them, but for the big ones) *
18 * *
19 * Description: *
20 * Utility routine translated from original mlpl3.F FORTRAN routine *
21 * *
22 * MultiLayerPerceptron : Training code *
23 * *
24 * NTRAIN: Nb of events used during the learning *
25 * NTEST: Nb of events used for the test *
26 * TIN: Input variables *
27 * TOUT: type of the event *
28 * *
29 * ---------------------------------------------------------------------------- *
30 * *
31 * Authors (alphabetical): *
32 * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
33 * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France *
34 * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany *
35 * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada *
36 * *
37 * Copyright (c) 2005: *
38 * CERN, Switzerland *
39 * U. of Victoria, Canada *
40 * MPI-K Heidelberg, Germany *
41 * LAPP, Annecy, France *
42 * *
43 * Redistribution and use in source and binary forms, with or without *
44 * modification, are permitted according to the terms listed in LICENSE *
45 * (see tmva/doc/LICENSE) *
46 * *
47 **********************************************************************************/
48
49/*! \class TMVA::MethodCFMlpANN_Utils
50\ingroup TMVA
51
52Implementation of Clermond-Ferrand artificial neural network
53
54Reference for the original FORTRAN version "mlpl3.F":
55 - Authors : J. Proriol and contributions from ALEPH-Clermont-Ferrand
56 Team members
57 - Copyright: Laboratoire Physique Corpusculaire
58 Universite de Blaise Pascal, IN2P3/CNRS
59*/
60
61
62#include <string>
63#include <iostream>
64#include <cstdlib>
65
66#include "TMath.h"
67#include "TString.h"
68
70#include "TMVA/MsgLogger.h"
71#include "TMVA/Timer.h"
72#include "TMVA/Types.h"
73
74using std::cout;
75using std::endl;
76
77
80const char* const TMVA::MethodCFMlpANN_Utils::fg_MethodName = "--- CFMlpANN ";
81
82////////////////////////////////////////////////////////////////////////////////
83/// default constructor
84
86fg_0(0),
87fg_999(999)
88{
89 Int_t i(0);
90 for(i=0; i<max_nVar_;++i) fVarn_1.xmin[i] = 0;
91 fCost_1.ancout = 0;
92 fCost_1.ieps = 0;
93 fCost_1.tolcou = 0;
94
95 for(i=0; i<max_nNodes_;++i) fDel_1.coef[i] = 0;
96 for(i=0; i<max_nLayers_*max_nNodes_;++i) fDel_1.del[i] = 0;
97 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delta[i] = 0;
98 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delw[i] = 0;
99 for(i=0; i<max_nLayers_*max_nNodes_;++i) fDel_1.delww[i] = 0;
100 fDel_1.demin = 0;
101 fDel_1.demax = 0;
102 fDel_1.idde = 0;
103 for(i=0; i<max_nLayers_;++i) fDel_1.temp[i] = 0;
104
105 for(i=0; i<max_nNodes_;++i) fNeur_1.cut[i] = 0;
106 for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.deltaww[i] = 0;
107 for(i=0; i<max_nLayers_;++i) fNeur_1.neuron[i] = 0;
108 for(i=0; i<max_nNodes_;++i) fNeur_1.o[i] = 0;
109 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fNeur_1.w[i] = 0;
110 for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.ww[i] = 0;
111 for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.x[i] = 0;
112 for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.y[i] = 0;
113
114 fParam_1.eeps = 0;
115 fParam_1.epsmin = 0;
116 fParam_1.epsmax = 0;
117 fParam_1.eta = 0;
118 fParam_1.ichoi = 0;
119 fParam_1.itest = 0;
120 fParam_1.layerm = 0;
121 fParam_1.lclass = 0;
122 fParam_1.nblearn = 0;
123 fParam_1.ndiv = 0;
124 fParam_1.ndivis = 0;
125 fParam_1.nevl = 0;
126 fParam_1.nevt = 0;
127 fParam_1.nunap = 0;
128 fParam_1.nunilec = 0;
129 fParam_1.nunishort = 0;
130 fParam_1.nunisor = 0;
131 fParam_1.nvar = 0;
132
133 fVarn_1.iclass = 0;
134 for(i=0; i<max_Events_;++i) fVarn_1.mclass[i] = 0;
135 for(i=0; i<max_Events_;++i) fVarn_1.nclass[i] = 0;
136 for(i=0; i<max_nVar_;++i) fVarn_1.xmax[i] = 0;
137
138 fLogger = 0;
139}
140
141////////////////////////////////////////////////////////////////////////////////
142/// Destructor.
143
147
148////////////////////////////////////////////////////////////////////////////////
149
152 Int_t *nodes, Int_t *ncycle )
153{
154 // training interface - called from MethodCFMlpANN class object
155
156 // sanity checks
157 if (*ntrain + *ntest > max_Events_) {
158 printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of training + testing" \
159 " events exceeds hardcoded maximum - reset to maximum allowed number");
160 *ntrain = *ntrain*(max_Events_/(*ntrain + *ntest));
161 *ntest = *ntest *(max_Events_/(*ntrain + *ntest));
162 }
163 if (*nvar2 > max_nVar_) {
164 printf( "*** CFMlpANN_f2c: ERROR in Train_nn: number of variables" \
165 " exceeds hardcoded maximum ==> abort");
166 std::exit(1);
167 }
168 if (*nlayer > max_nLayers_) {
169 printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of layers" \
170 " exceeds hardcoded maximum - reset to maximum allowed number");
171 *nlayer = max_nLayers_;
172 }
173 if (*nodes > max_nNodes_) {
174 printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of nodes" \
175 " exceeds hardcoded maximum - reset to maximum allowed number");
176 *nodes = max_nNodes_;
177 }
178
179 // create dynamic data tables (AH)
180 fVarn2_1.Create( *ntrain + *ntest, *nvar2 );
181 fVarn3_1.Create( *ntrain + *ntest, *nvar2 );
182
183 // Int_t imax;
184 char det[20];
185
186 Entree_new(nvar2, det, ntrain, ntest, nlayer, nodes, ncycle, (Int_t)20);
187 if (fNeur_1.neuron[fParam_1.layerm - 1] == 1) {
188 // imax = 2;
189 fParam_1.lclass = 2;
190 }
191 else {
192 // imax = fNeur_1.neuron[fParam_1.layerm - 1] << 1;
193 fParam_1.lclass = fNeur_1.neuron[fParam_1.layerm - 1];
194 }
195 fParam_1.nvar = fNeur_1.neuron[0];
196 TestNN();
197 Innit(det, tout2, tin2, (Int_t)20);
198
199 // delete data tables
200 fVarn2_1.Delete();
201 fVarn3_1.Delete();
202}
203
204////////////////////////////////////////////////////////////////////////////////
205
207 Int_t *ntest, Int_t *numlayer, Int_t *nodes,
208 Int_t *numcycle, Int_t /*det_len*/)
209{
210 // first initialisation of ANN
211 Int_t i__1;
212
214 Int_t ntemp, num, retrain;
215
216 /* NTRAIN: Nb of events used during the learning */
217 /* NTEST: Nb of events used for the test */
218 /* TIN: Input variables */
219 /* TOUT: type of the event */
220
221 fCost_1.ancout = 1e30;
222
223 /* .............. HardCoded Values .................... */
224 retrain = 0;
225 rewrite = 1000;
226 for (i__ = 1; i__ <= max_nNodes_; ++i__) {
227 fDel_1.coef[i__ - 1] = (Float_t)0.;
228 }
229 for (i__ = 1; i__ <= max_nLayers_; ++i__) {
230 fDel_1.temp[i__ - 1] = (Float_t)0.;
231 }
232 fParam_1.layerm = *numlayer;
233 if (fParam_1.layerm > max_nLayers_) {
234 printf("Error: number of layers exceeds maximum: %i, %i ==> abort",
235 fParam_1.layerm, max_nLayers_ );
236 Arret("modification of mlpl3_param_lim.inc is needed ");
237 }
238 fParam_1.nevl = *ntrain;
239 fParam_1.nevt = *ntest;
240 fParam_1.nblearn = *numcycle;
241 fVarn_1.iclass = 2;
242 fParam_1.nunilec = 10;
243 fParam_1.epsmin = 1e-10;
244 fParam_1.epsmax = 1e-4;
245 fParam_1.eta = .5;
246 fCost_1.tolcou = 1e-6;
247 fCost_1.ieps = 2;
248 fParam_1.nunisor = 30;
249 fParam_1.nunishort = 48;
250 fParam_1.nunap = 40;
251
252 ULog() << kINFO << "Total number of events for training: " << fParam_1.nevl << Endl;
253 ULog() << kINFO << "Total number of training cycles : " << fParam_1.nblearn << Endl;
254 if (fParam_1.nevl > max_Events_) {
255 printf("Error: number of learning events exceeds maximum: %i, %i ==> abort",
256 fParam_1.nevl, max_Events_ );
257 Arret("modification of mlpl3_param_lim.inc is needed ");
258 }
259 if (fParam_1.nevt > max_Events_) {
260 printf("Error: number of testing events exceeds maximum: %i, %i ==> abort",
261 fParam_1.nevt, max_Events_ );
262 Arret("modification of mlpl3_param_lim.inc is needed ");
263 }
264 i__1 = fParam_1.layerm;
265 for (j = 1; j <= i__1; ++j) {
266 num = nodes[j-1];
267 if (num < 2) {
268 num = 2;
269 }
270 if (j == fParam_1.layerm && num != 2) {
271 num = 2;
272 }
273 fNeur_1.neuron[j - 1] = num;
274 }
275 i__1 = fParam_1.layerm;
276 for (j = 1; j <= i__1; ++j) {
277 ULog() << kINFO << "Number of layers for neuron(" << j << "): " << fNeur_1.neuron[j - 1] << Endl;
278 }
279 if (fNeur_1.neuron[fParam_1.layerm - 1] != 2) {
280 printf("Error: wrong number of classes at output layer: %i != 2 ==> abort\n",
281 fNeur_1.neuron[fParam_1.layerm - 1]);
282 Arret("stop");
283 }
284 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
285 for (j = 1; j <= i__1; ++j) {
286 fDel_1.coef[j - 1] = 1.;
287 }
288 i__1 = fParam_1.layerm;
289 for (j = 1; j <= i__1; ++j) {
290 fDel_1.temp[j - 1] = 1.;
291 }
292 fParam_1.ichoi = retrain;
293 fParam_1.ndivis = rewrite;
294 fDel_1.idde = 1;
295 if (! (fParam_1.ichoi == 0 || fParam_1.ichoi == 1)) {
296 printf( "Big troubles !!! \n" );
297 Arret("new training or continued one !");
298 }
299 if (fParam_1.ichoi == 0) {
300 ULog() << kINFO << "New training will be performed" << Endl;
301 }
302 else {
303 printf("%s: New training will be continued from a weight file\n", fg_MethodName);
304 }
305 ncoef = 0;
306 ntemp = 0;
307 for (i__ = 1; i__ <= max_nNodes_; ++i__) {
308 if (fDel_1.coef[i__ - 1] != (Float_t)0.) {
309 ++ncoef;
310 }
311 }
312 for (i__ = 1; i__ <= max_nLayers_; ++i__) {
313 if (fDel_1.temp[i__ - 1] != (Float_t)0.) {
314 ++ntemp;
315 }
316 }
317 if (ncoef != fNeur_1.neuron[fParam_1.layerm - 1]) {
318 Arret(" entree error code 1 : need to reported");
319 }
320 if (ntemp != fParam_1.layerm) {
321 Arret("entree error code 2 : need to reported");
322 }
323}
324
325#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
326#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
327
328////////////////////////////////////////////////////////////////////////////////
329/// [smart comments to be added]
330
332{
334 Int_t i__, j;
335 Int_t layer;
336
337 i__1 = fParam_1.layerm;
338 for (layer = 2; layer <= i__1; ++layer) {
339 i__2 = fNeur_1.neuron[layer - 2];
340 for (i__ = 1; i__ <= i__2; ++i__) {
341 i__3 = fNeur_1.neuron[layer - 1];
342 for (j = 1; j <= i__3; ++j) {
343 w_ref(layer, j, i__) = (Sen3a() * 2. - 1.) * .2;
344 ww_ref(layer, j) = (Sen3a() * 2. - 1.) * .2;
345 }
346 }
347 }
348}
349
350#undef ww_ref
351#undef w_ref
352
353#define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
354#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
355#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
356#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
357#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
358
359////////////////////////////////////////////////////////////////////////////////
360/// [smart comments to be added]
361
363{
365
366 Double_t f;
367 Int_t i__, j;
368 Int_t layer;
369
370 i__1 = fNeur_1.neuron[0];
371 for (i__ = 1; i__ <= i__1; ++i__) {
372 y_ref(1, i__) = xeev_ref(*ievent, i__);
373 }
374 i__1 = fParam_1.layerm - 1;
375 for (layer = 1; layer <= i__1; ++layer) {
376 i__2 = fNeur_1.neuron[layer];
377 for (j = 1; j <= i__2; ++j) {
378 x_ref(layer + 1, j) = 0.;
379 i__3 = fNeur_1.neuron[layer - 1];
380 for (i__ = 1; i__ <= i__3; ++i__) {
381 x_ref(layer + 1, j) = ( x_ref(layer + 1, j) + y_ref(layer, i__)
382 * w_ref(layer + 1, j, i__) );
383 }
384 x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
385 i__3 = layer + 1;
386 Foncf(&i__3, &x_ref(layer + 1, j), &f);
387 y_ref(layer + 1, j) = f;
388 }
389 }
390}
391
392#undef ww_ref
393#undef y_ref
394#undef x_ref
395#undef w_ref
396#undef xeev_ref
397
398#define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
399
400////////////////////////////////////////////////////////////////////////////////
401/// [smart comments to be added]
402
404{
405 Int_t i__1, i__2;
406
407 Int_t i__, j, k, l;
408 Int_t nocla[max_nNodes_], ikend;
409 Double_t xpg[max_nVar_];
410
411 *ktest = 0;
412 i__1 = fParam_1.lclass;
413 for (k = 1; k <= i__1; ++k) {
414 nocla[k - 1] = 0;
415 }
416 i__1 = fParam_1.nvar;
417 for (i__ = 1; i__ <= i__1; ++i__) {
418 fVarn_1.xmin[i__ - 1] = 1e30;
419 fVarn_1.xmax[i__ - 1] = -fVarn_1.xmin[i__ - 1];
420 }
421 i__1 = fParam_1.nevl;
422 for (i__ = 1; i__ <= i__1; ++i__) {
423 DataInterface(tout2, tin2, &fg_100, &fg_0, &fParam_1.nevl, &fParam_1.nvar,
424 xpg, &fVarn_1.nclass[i__ - 1], &ikend);
425 if (ikend == -1) {
426 break;
427 }
428
429 CollectVar(&fParam_1.nvar, &fVarn_1.nclass[i__ - 1], xpg);
430
431 i__2 = fParam_1.nvar;
432 for (j = 1; j <= i__2; ++j) {
433 xeev_ref(i__, j) = xpg[j - 1];
434 }
435 if (fVarn_1.iclass == 1) {
436 i__2 = fParam_1.lclass;
437 for (k = 1; k <= i__2; ++k) {
438 if (fVarn_1.nclass[i__ - 1] == k) {
439 ++nocla[k - 1];
440 }
441 }
442 }
443 i__2 = fParam_1.nvar;
444 for (k = 1; k <= i__2; ++k) {
445 if (xeev_ref(i__, k) < fVarn_1.xmin[k - 1]) {
446 fVarn_1.xmin[k - 1] = xeev_ref(i__, k);
447 }
448 if (xeev_ref(i__, k) > fVarn_1.xmax[k - 1]) {
449 fVarn_1.xmax[k - 1] = xeev_ref(i__, k);
450 }
451 }
452 }
453
454 if (fVarn_1.iclass == 1) {
455 i__2 = fParam_1.lclass;
456 for (k = 1; k <= i__2; ++k) {
457 i__1 = fParam_1.lclass;
458 for (l = 1; l <= i__1; ++l) {
459 if (nocla[k - 1] != nocla[l - 1]) {
460 *ktest = 1;
461 }
462 }
463 }
464 }
465 i__1 = fParam_1.nevl;
466 for (i__ = 1; i__ <= i__1; ++i__) {
467 i__2 = fParam_1.nvar;
468 for (l = 1; l <= i__2; ++l) {
469 if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
470 Float_t)0.) {
471 xeev_ref(i__, l) = (Float_t)0.;
472 }
473 else {
474 xeev_ref(i__, l) = xeev_ref(i__, l) - (fVarn_1.xmax[l - 1] +
475 fVarn_1.xmin[l - 1]) / 2.;
476 xeev_ref(i__, l) = xeev_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
477 fVarn_1.xmin[l - 1]) / 2.);
478 }
479 }
480 }
481}
482
483#undef xeev_ref
484
485#define delw_ref(a_1,a_2,a_3) fDel_1.delw[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
486#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
487#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
488#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
489#define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
490#define delww_ref(a_1,a_2) fDel_1.delww[(a_2)*max_nLayers_ + a_1 - 7]
491#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
492#define del_ref(a_1,a_2) fDel_1.del[(a_2)*max_nLayers_ + a_1 - 7]
493#define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
494
495////////////////////////////////////////////////////////////////////////////////
496/// [smart comments to be added]
497
499{
501
502 Double_t f;
503 Int_t i__, j, k, l;
504 Double_t df, uu;
505
506 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
507 for (i__ = 1; i__ <= i__1; ++i__) {
508 if (fVarn_1.nclass[*ievent - 1] == i__) {
509 fNeur_1.o[i__ - 1] = 1.;
510 }
511 else {
512 fNeur_1.o[i__ - 1] = -1.;
513 }
514 }
515 l = fParam_1.layerm;
516 i__1 = fNeur_1.neuron[l - 1];
517 for (i__ = 1; i__ <= i__1; ++i__) {
518 f = y_ref(l, i__);
519 df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
520 del_ref(l, i__) = df * (fNeur_1.o[i__ - 1] - y_ref(l, i__)) *
521 fDel_1.coef[i__ - 1];
522 delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
523 i__2 = fNeur_1.neuron[l - 2];
524 for (j = 1; j <= i__2; ++j) {
525 delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(l -
526 1, j);
527 /* L20: */
528 }
529 }
530 for (l = fParam_1.layerm - 1; l >= 2; --l) {
531 i__2 = fNeur_1.neuron[l - 1];
532 for (i__ = 1; i__ <= i__2; ++i__) {
533 uu = 0.;
534 i__1 = fNeur_1.neuron[l];
535 for (k = 1; k <= i__1; ++k) {
536 uu += w_ref(l + 1, k, i__) * del_ref(l + 1, k);
537 }
538 Foncf(&l, &x_ref(l, i__), &f);
539 df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
540 del_ref(l, i__) = df * uu;
541 delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
542 i__1 = fNeur_1.neuron[l - 2];
543 for (j = 1; j <= i__1; ++j) {
544 delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(
545 l - 1, j);
546 }
547 }
548 }
549 i__1 = fParam_1.layerm;
550 for (l = 2; l <= i__1; ++l) {
551 i__2 = fNeur_1.neuron[l - 1];
552 for (i__ = 1; i__ <= i__2; ++i__) {
553 deltaww_ref(l, i__) = delww_ref(l, i__) + fParam_1.eta *
554 deltaww_ref(l, i__);
555 ww_ref(l, i__) = ww_ref(l, i__) + deltaww_ref(l, i__);
556 i__3 = fNeur_1.neuron[l - 2];
557 for (j = 1; j <= i__3; ++j) {
558 delta_ref(l, i__, j) = delw_ref(l, i__, j) + fParam_1.eta *
559 delta_ref(l, i__, j);
560 w_ref(l, i__, j) = w_ref(l, i__, j) + delta_ref(l, i__, j);
561 }
562 }
563 }
564}
565
566#undef deltaww_ref
567#undef del_ref
568#undef ww_ref
569#undef delww_ref
570#undef delta_ref
571#undef y_ref
572#undef x_ref
573#undef w_ref
574#undef delw_ref
575
576#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
577#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
578
579////////////////////////////////////////////////////////////////////////////////
580
582{
583 // write weights to file
584
585 if (*iii == *maxcycle) {
586 // now in MethodCFMlpANN.cxx
587 }
588}
589
590#undef ww_ref
591#undef w_ref
592
593#define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
594#define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
595
596////////////////////////////////////////////////////////////////////////////////
597
599{
600 // Initialization
602
603 Int_t i__, j;
605 Int_t ievent(0);
606 Int_t kkk;
607 Double_t xxx = 0.0, yyy = 0.0;
608
609 Leclearn(&ktest, tout2, tin2);
610 Lecev2(&ktest, tout2, tin2);
611 if (ktest == 1) {
612 printf( " .... strange to be here (1) ... \n");
613 std::exit(1);
614 }
615 i__1 = fParam_1.layerm - 1;
616 for (layer = 1; layer <= i__1; ++layer) {
617 i__2 = fNeur_1.neuron[layer];
618 for (j = 1; j <= i__2; ++j) {
619 deltaww_ref(layer + 1, j) = 0.;
620 i__3 = fNeur_1.neuron[layer - 1];
621 for (i__ = 1; i__ <= i__3; ++i__) {
622 delta_ref(layer + 1, j, i__) = 0.;
623 }
624 }
625 }
626 if (fParam_1.ichoi == 1) {
627 Inl();
628 }
629 else {
630 Wini();
631 }
632 kkk = 0;
633 i__3 = fParam_1.nblearn;
634 Timer timer( i__3, "CFMlpANN" );
635 Int_t num = i__3/100;
636
637 for (i1 = 1; i1 <= i__3; ++i1) {
638
639 if ( ( num>0 && (i1-1)%num == 0) || (i1 == i__3) ) timer.DrawProgressBar( i1-1 );
640
641 i__2 = fParam_1.nevl;
642 for (i__ = 1; i__ <= i__2; ++i__) {
643 ++kkk;
644 if (fCost_1.ieps == 2) {
645 fParam_1.eeps = Fdecroi(&kkk);
646 }
647 if (fCost_1.ieps == 1) {
648 fParam_1.eeps = fParam_1.epsmin;
649 }
651 if (fVarn_1.iclass == 2) {
652 ievent = (Int_t) ((Double_t) fParam_1.nevl * Sen3a());
653 if (ievent == 0) {
654 doCont = kFALSE;
655 }
656 }
657 if (doCont) {
658 if (fVarn_1.iclass == 1) {
659 nevod = fParam_1.nevl / fParam_1.lclass;
660 nrest = i__ % fParam_1.lclass;
661 fParam_1.ndiv = i__ / fParam_1.lclass;
662 if (nrest != 0) {
663 ievent = fParam_1.ndiv + 1 + (fParam_1.lclass - nrest) *
664 nevod;
665 }
666 else {
667 ievent = fParam_1.ndiv;
668 }
669 }
670 En_avant(&ievent);
671 En_arriere(&ievent);
672 }
673 }
674 yyy = 0.;
675 if (i1 % fParam_1.ndivis == 0 || i1 == 1 || i1 == fParam_1.nblearn) {
676 Cout(&i1, &xxx);
677 Cout2(&i1, &yyy);
678 GraphNN(&i1, &xxx, &yyy, det, (Int_t)20);
679 Out(&i1, &fParam_1.nblearn);
680 }
681 if (xxx < fCost_1.tolcou) {
682 GraphNN(&fParam_1.nblearn, &xxx, &yyy, det, (Int_t)20);
683 Out(&fParam_1.nblearn, &fParam_1.nblearn);
684 break;
685 }
686 }
687}
688
689#undef deltaww_ref
690#undef delta_ref
691
692////////////////////////////////////////////////////////////////////////////////
693/// [smart comments to be added]
694
696{
697 Int_t i__1;
698
699 Int_t i__;
700 Int_t ktest;
701
702 ktest = 0;
703 if (fParam_1.layerm > max_nLayers_) {
704 ktest = 1;
705 printf("Error: number of layers exceeds maximum: %i, %i ==> abort",
706 fParam_1.layerm, max_nLayers_ );
707 Arret("modification of mlpl3_param_lim.inc is needed ");
708 }
709 if (fParam_1.nevl > max_Events_) {
710 ktest = 1;
711 printf("Error: number of training events exceeds maximum: %i, %i ==> abort",
712 fParam_1.nevl, max_Events_ );
713 Arret("modification of mlpl3_param_lim.inc is needed ");
714 }
715 if (fParam_1.nevt > max_Events_) {
716 printf("Error: number of testing events exceeds maximum: %i, %i ==> abort",
717 fParam_1.nevt, max_Events_ );
718 Arret("modification of mlpl3_param_lim.inc is needed ");
719 }
720 if (fParam_1.lclass < fNeur_1.neuron[fParam_1.layerm - 1]) {
721 ktest = 1;
722 printf("Error: wrong number of classes at ouput layer: %i != %i ==> abort\n",
723 fNeur_1.neuron[fParam_1.layerm - 1], fParam_1.lclass);
724 Arret("problem needs to reported ");
725 }
726 if (fParam_1.nvar > max_nVar_) {
727 ktest = 1;
728 printf("Error: number of variables exceeds maximum: %i, %i ==> abort",
729 fParam_1.nvar, fg_max_nVar_ );
730 Arret("modification of mlpl3_param_lim.inc is needed");
731 }
732 i__1 = fParam_1.layerm;
733 for (i__ = 1; i__ <= i__1; ++i__) {
734 if (fNeur_1.neuron[i__ - 1] > max_nNodes_) {
735 ktest = 1;
736 printf("Error: number of neurons at layer exceeds maximum: %i, %i ==> abort",
737 i__, fg_max_nNodes_ );
738 }
739 }
740 if (ktest == 1) {
741 printf( " .... strange to be here (2) ... \n");
742 std::exit(1);
743 }
744}
745
746#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
747
748////////////////////////////////////////////////////////////////////////////////
749/// [smart comments to be added]
750
752{
753 Int_t i__1, i__2;
755
757 Int_t i__, j;
758
759 c__ = 0.;
760 i__1 = fParam_1.nevl;
761 for (i__ = 1; i__ <= i__1; ++i__) {
762 En_avant(&i__);
763 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
764 for (j = 1; j <= i__2; ++j) {
765 if (fVarn_1.nclass[i__ - 1] == j) {
766 fNeur_1.o[j - 1] = 1.;
767 }
768 else {
769 fNeur_1.o[j - 1] = -1.;
770 }
771 // Computing 2nd power
772 d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
773 c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
774 }
775 }
776 c__ /= (Double_t) (fParam_1.nevl * fParam_1.lclass) * 2.;
777 *xxx = c__;
778 fCost_1.ancout = c__;
779}
780
781#undef y_ref
782
783#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
784#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
785
786////////////////////////////////////////////////////////////////////////////////
787/// [smart comments to be added]
788
790{
791 Int_t i__1, i__2;
792
793 Int_t jmax, k, layer, kk, nq, nr;
794
795 i__1 = fParam_1.nvar;
796 i__1 = fParam_1.layerm;
797 i__1 = fParam_1.layerm - 1;
798 for (layer = 1; layer <= i__1; ++layer) {
799 nq = fNeur_1.neuron[layer] / 10;
800 nr = fNeur_1.neuron[layer] - nq * 10;
801 if (nr == 0) {
802 kk = nq;
803 }
804 else {
805 kk = nq + 1;
806 }
807 i__2 = kk;
808 for (k = 1; k <= i__2; ++k) {
809 // jmin = k * 10 - 9;
810 jmax = k * 10;
811 if (fNeur_1.neuron[layer] < jmax) {
812 jmax = fNeur_1.neuron[layer];
813 }
814 // i__3 = fNeur_1.neuron[layer - 1];
815 }
816 }
817}
818
819#undef ww_ref
820#undef w_ref
821
822////////////////////////////////////////////////////////////////////////////////
823/// [smart comments to be added]
824
826{
828
830
831 aaa = (fParam_1.epsmin - fParam_1.epsmax) / (Double_t) (fParam_1.nblearn *
832 fParam_1.nevl - 1);
833 bbb = fParam_1.epsmax - aaa;
834 ret_val = aaa * (Double_t) (*i__) + bbb;
835 return ret_val;
836}
837
838#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
839
840////////////////////////////////////////////////////////////////////////////////
841/// [smart comments to be added]
842
844 Double_t * /*yyy*/, char * /*det*/, Int_t /*det_len*/ )
845{
846 Int_t i__1, i__2;
847
848 Double_t xmok[max_nNodes_];
849 // Float_t xpaw;
850 Double_t xmko[max_nNodes_];
851 Int_t i__, j;
852 Int_t ix;
853 // Int_t jjj;
854 // Float_t vbn[10];
855 Int_t nko[max_nNodes_], nok[max_nNodes_];
856
857 // for (i__ = 1; i__ <= 10; ++i__) {
858 // vbn[i__ - 1] = (Float_t)0.;
859 // }
860 if (*ilearn == 1) {
861 // AH: removed output
862 }
863 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
864 for (i__ = 1; i__ <= i__1; ++i__) {
865 nok[i__ - 1] = 0;
866 nko[i__ - 1] = 0;
867 xmok[i__ - 1] = 0.;
868 xmko[i__ - 1] = 0.;
869 }
870 i__1 = fParam_1.nevl;
871 for (i__ = 1; i__ <= i__1; ++i__) {
872 En_avant(&i__);
873 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
874 for (j = 1; j <= i__2; ++j) {
875 // xpaw = (Float_t) y_ref(fParam_1.layerm, j);
876 if (fVarn_1.nclass[i__ - 1] == j) {
877 ++nok[j - 1];
878 xmok[j - 1] += y_ref(fParam_1.layerm, j);
879 }
880 else {
881 ++nko[j - 1];
882 xmko[j - 1] += y_ref(fParam_1.layerm, j);
883 // jjj = j + fNeur_1.neuron[fParam_1.layerm - 1];
884 }
885 // if (j <= 9) {
886 // vbn[j - 1] = xpaw;
887 // }
888 }
889 // vbn[9] = (Float_t) fVarn_1.nclass[i__ - 1];
890 }
891 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
892 for (j = 1; j <= i__1; ++j) {
893 xmok[j - 1] /= (Double_t) nok[j - 1];
894 xmko[j - 1] /= (Double_t) nko[j - 1];
895 fNeur_1.cut[j - 1] = (xmok[j - 1] + xmko[j - 1]) / 2.;
896 }
897 ix = fNeur_1.neuron[fParam_1.layerm - 1];
898 i__1 = ix;
899}
900
901#undef y_ref
902
903////////////////////////////////////////////////////////////////////////////////
904/// [smart comments to be added]
905
907{
908 // Initialized data
909 Int_t m12 = 4096;
910 Double_t f1 = 2.44140625e-4;
911 Double_t f2 = 5.96046448e-8;
912 Double_t f3 = 1.45519152e-11;
913 Int_t j1 = 3823;
914 Int_t j2 = 4006;
915 Int_t j3 = 2903;
916 static Int_t fg_i1 = 3823;
917 static Int_t fg_i2 = 4006;
918 static Int_t fg_i3 = 2903;
919
921 Int_t k3, l3, k2, l2, k1, l1;
922
923 // reference: /k.d.senne/j. stochastics/ vol 1,no 3 (1974),pp.215-38
924 k3 = fg_i3 * j3;
925 l3 = k3 / m12;
926 k2 = fg_i2 * j3 + fg_i3 * j2 + l3;
927 l2 = k2 / m12;
928 k1 = fg_i1 * j3 + fg_i2 * j2 + fg_i3 * j1 + l2;
929 l1 = k1 / m12;
930 fg_i1 = k1 - l1 * m12;
931 fg_i2 = k2 - l2 * m12;
932 fg_i3 = k3 - l3 * m12;
933 ret_val = f1 * (Double_t) fg_i1 + f2 * (Float_t) fg_i2 + f3 * (Double_t) fg_i3;
934
935 return ret_val;
936}
937
938////////////////////////////////////////////////////////////////////////////////
939
941{
942 // [needs to be checked]
943 Double_t yy;
944
945 if (*u / fDel_1.temp[*i__ - 1] > 170.) {
946 *f = .99999999989999999;
947 }
948 else if (*u / fDel_1.temp[*i__ - 1] < -170.) {
949 *f = -.99999999989999999;
950 }
951 else {
952 yy = TMath::Exp(-(*u) / fDel_1.temp[*i__ - 1]);
953 *f = (1. - yy) / (yy + 1.);
954 }
955}
956
957#undef w_ref
958
959#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
960
961////////////////////////////////////////////////////////////////////////////////
962/// [smart comments to be added]
963
965{
966 Int_t i__1, i__2;
968
970 Int_t i__, j;
971
972 c__ = 0.;
973 i__1 = fParam_1.nevt;
974 for (i__ = 1; i__ <= i__1; ++i__) {
975 En_avant2(&i__);
976 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
977 for (j = 1; j <= i__2; ++j) {
978 if (fVarn_1.mclass[i__ - 1] == j) {
979 fNeur_1.o[j - 1] = 1.;
980 }
981 else {
982 fNeur_1.o[j - 1] = -1.;
983 }
984 /* Computing 2nd power */
985 d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
986 c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
987 }
988 }
989 c__ /= (Double_t) (fParam_1.nevt * fParam_1.lclass) * 2.;
990 *yyy = c__;
991}
992
993#undef y_ref
994
995#define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
996
997////////////////////////////////////////////////////////////////////////////////
998/// [smart comments to be added]
999
1001{
1002 Int_t i__1, i__2;
1003
1004 Int_t i__, j, l;
1005 // Int_t mocla[max_nNodes_];
1006 Int_t ikend;
1007 Double_t xpg[max_nVar_];
1008
1009 /* NTRAIN: Nb of events used during the learning */
1010 /* NTEST: Nb of events used for the test */
1011 /* TIN: Input variables */
1012 /* TOUT: type of the event */
1013
1014 *ktest = 0;
1015 i__1 = fParam_1.lclass;
1016 // for (k = 1; k <= i__1; ++k) {
1017 // mocla[k - 1] = 0;
1018 // }
1019 i__1 = fParam_1.nevt;
1020 for (i__ = 1; i__ <= i__1; ++i__) {
1021 DataInterface(tout2, tin2, &fg_999, &fg_0, &fParam_1.nevt, &fParam_1.nvar,
1022 xpg, &fVarn_1.mclass[i__ - 1], &ikend);
1023
1024 if (ikend == -1) {
1025 break;
1026 }
1027
1028 i__2 = fParam_1.nvar;
1029 for (j = 1; j <= i__2; ++j) {
1030 xx_ref(i__, j) = xpg[j - 1];
1031 }
1032 }
1033
1034 i__1 = fParam_1.nevt;
1035 for (i__ = 1; i__ <= i__1; ++i__) {
1036 i__2 = fParam_1.nvar;
1037 for (l = 1; l <= i__2; ++l) {
1038 if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
1039 Float_t)0.) {
1040 xx_ref(i__, l) = (Float_t)0.;
1041 }
1042 else {
1043 xx_ref(i__, l) = xx_ref(i__, l) - (fVarn_1.xmax[l - 1] +
1044 fVarn_1.xmin[l - 1]) / 2.;
1045 xx_ref(i__, l) = xx_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
1046 fVarn_1.xmin[l - 1]) / 2.);
1047 }
1048 }
1049 }
1050}
1051
1052#undef xx_ref
1053
1054#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
1055#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
1056#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
1057#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
1058#define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
1059
1060////////////////////////////////////////////////////////////////////////////////
1061/// [smart comments to be added]
1062
1064{
1065 Int_t i__1, i__2, i__3;
1066
1067 Double_t f;
1068 Int_t i__, j;
1069 Int_t layer;
1070
1071 i__1 = fNeur_1.neuron[0];
1072 for (i__ = 1; i__ <= i__1; ++i__) {
1073 y_ref(1, i__) = xx_ref(*ievent, i__);
1074 }
1075 i__1 = fParam_1.layerm - 1;
1076 for (layer = 1; layer <= i__1; ++layer) {
1077 i__2 = fNeur_1.neuron[layer];
1078 for (j = 1; j <= i__2; ++j) {
1079 x_ref(layer + 1, j) = 0.;
1080 i__3 = fNeur_1.neuron[layer - 1];
1081 for (i__ = 1; i__ <= i__3; ++i__) {
1082 x_ref(layer + 1, j) = x_ref(layer + 1, j) + y_ref(layer, i__)
1083 * w_ref(layer + 1, j, i__);
1084 }
1085 x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
1086 i__3 = layer + 1;
1087 Foncf(&i__3, &x_ref(layer + 1, j), &f);
1088 y_ref(layer + 1, j) = f;
1089 /* L2: */
1090 }
1091 }
1092}
1093
1094#undef xx_ref
1095#undef ww_ref
1096#undef y_ref
1097#undef x_ref
1098#undef w_ref
1099
1100////////////////////////////////////////////////////////////////////////////////
1101
1103{
1104 // fatal error occurred: stop execution
1105 printf("%s: %s",fg_MethodName, mot);
1106 std::exit(1);
1107}
1108
1109////////////////////////////////////////////////////////////////////////////////
1110/// [smart comments to be added]
1111
1112void TMVA::MethodCFMlpANN_Utils::CollectVar( Int_t * /*nvar*/, Int_t * /*class__*/, Double_t * /*xpg*/ )
1113{
1114 // Int_t i__1;
1115
1116 // Int_t i__;
1117 // Float_t x[201];
1118
1119 // // Parameter adjustments
1120 // --xpg;
1121
1122 // for (i__ = 1; i__ <= 201; ++i__) {
1123 // x[i__ - 1] = 0.0;
1124 // }
1125 // x[0] = (Float_t) (*class__);
1126 // i__1 = *nvar;
1127 // for (i__ = 1; i__ <= i__1; ++i__) {
1128 // x[i__] = (Float_t) xpg[i__];
1129 // }
1130}
#define del_ref(a_1, a_2)
#define y_ref(a_1, a_2)
#define xx_ref(a_1, a_2)
#define xeev_ref(a_1, a_2)
#define w_ref(a_1, a_2, a_3)
#define delww_ref(a_1, a_2)
#define delta_ref(a_1, a_2, a_3)
#define ww_ref(a_1, a_2)
#define delw_ref(a_1, a_2, a_3)
#define deltaww_ref(a_1, a_2)
#define x_ref(a_1, a_2)
#define f(i)
Definition RSha256.hxx:104
#define e(i)
Definition RSha256.hxx:103
int Int_t
Signed integer 4 bytes (int)
Definition RtypesCore.h:59
float Float_t
Float 4 bytes (float)
Definition RtypesCore.h:71
constexpr Bool_t kFALSE
Definition RtypesCore.h:108
double Double_t
Double 8 bytes.
Definition RtypesCore.h:73
constexpr Bool_t kTRUE
Definition RtypesCore.h:107
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
void Foncf(Int_t *i__, Double_t *u, Double_t *f)
void Out(Int_t *iii, Int_t *maxcycle)
MethodCFMlpANN_Utils()
default constructor
void Innit(char *det, Double_t *tout2, Double_t *tin2, Int_t)
void Entree_new(Int_t *, char *, Int_t *ntrain, Int_t *ntest, Int_t *numlayer, Int_t *nodes, Int_t *numcycle, Int_t)
struct TMVA::MethodCFMlpANN_Utils::@159 fNeur_1
void CollectVar(Int_t *nvar, Int_t *class__, Double_t *xpg)
[smart comments to be added]
void Leclearn(Int_t *ktest, Double_t *tout2, Double_t *tin2)
[smart comments to be added]
void GraphNN(Int_t *ilearn, Double_t *, Double_t *, char *, Int_t)
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@161 fCost_1
void En_avant2(Int_t *ievent)
[smart comments to be added]
Double_t Fdecroi(Int_t *i__)
[smart comments to be added]
void En_arriere(Int_t *ievent)
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@157 fParam_1
void Cout(Int_t *, Double_t *xxx)
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@158 fVarn_1
Double_t Sen3a(void)
[smart comments to be added]
void Train_nn(Double_t *tin2, Double_t *tout2, Int_t *ntrain, Int_t *ntest, Int_t *nvar2, Int_t *nlayer, Int_t *nodes, Int_t *ncycle)
void Wini()
[smart comments to be added]
void En_avant(Int_t *ievent)
[smart comments to be added]
void Cout2(Int_t *, Double_t *yyy)
[smart comments to be added]
void TestNN()
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@160 fDel_1
void Lecev2(Int_t *ktest, Double_t *tout2, Double_t *tin2)
[smart comments to be added]
virtual ~MethodCFMlpANN_Utils()
Destructor.
static const char *const fg_MethodName
void Inl()
[smart comments to be added]
Timing information for training and evaluation of MVA methods.
Definition Timer.h:58
TF1 * f1
Definition legend1.C:11
MsgLogger & Endl(MsgLogger &ml)
Definition MsgLogger.h:148
Double_t Exp(Double_t x)
Returns the base-e exponential function of x, which is e raised to the power x.
Definition TMath.h:720
TLine l
Definition textangle.C:4