Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
MethodCFMlpANN_Utils.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : TMVA::MethodCFMlpANN_utils *
8 * Web : http://tmva.sourceforge.net *
9 * *
10 * Reference for the original FORTRAN version "mlpl3.F": *
11 * Authors : J. Proriol and contributions from ALEPH-Clermont-Ferrand *
12 * Team members *
13 * Copyright: Laboratoire Physique Corpusculaire *
14 * Universite de Blaise Pascal, IN2P3/CNRS *
15 * *
16 * Modifications by present authors: *
17 * use dynamical data tables (not for all of them, but for the big ones) *
18 * *
19 * Description: *
20 * Utility routine translated from original mlpl3.F FORTRAN routine *
21 * *
22 * MultiLayerPerceptron : Training code *
23 * *
24 * NTRAIN: Nb of events used during the learning *
25 * NTEST: Nb of events used for the test *
26 * TIN: Input variables *
27 * TOUT: type of the event *
28 * *
29 * ---------------------------------------------------------------------------- *
30 * *
31 * Authors (alphabetical): *
32 * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
33 * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France *
34 * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany *
35 * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada *
36 * *
37 * Copyright (c) 2005: *
38 * CERN, Switzerland *
39 * U. of Victoria, Canada *
40 * MPI-K Heidelberg, Germany *
41 * LAPP, Annecy, France *
42 * *
43 * Redistribution and use in source and binary forms, with or without *
44 * modification, are permitted according to the terms listed in LICENSE *
45 * (http://tmva.sourceforge.net/LICENSE) *
46 * *
47 **********************************************************************************/
48
49/*! \class TMVA::MethodCFMlpANN_Utils
50\ingroup TMVA
51
52Implementation of Clermond-Ferrand artificial neural network
53
54Reference for the original FORTRAN version "mlpl3.F":
55 - Authors : J. Proriol and contributions from ALEPH-Clermont-Ferrand
56 Team members
57 - Copyright: Laboratoire Physique Corpusculaire
58 Universite de Blaise Pascal, IN2P3/CNRS
59*/
60
61
62#include <string>
63#include <iostream>
64#include <cstdlib>
65
66#include "TMath.h"
67#include "TString.h"
68
70#include "TMVA/MsgLogger.h"
71#include "TMVA/Timer.h"
72#include "TMVA/Types.h"
73
74using std::cout;
75using std::endl;
76
78
81const char* const TMVA::MethodCFMlpANN_Utils::fg_MethodName = "--- CFMlpANN ";
82
83////////////////////////////////////////////////////////////////////////////////
84/// default constructor
85
87fg_0(0),
88fg_999(999)
89{
90 Int_t i(0);
91 for(i=0; i<max_nVar_;++i) fVarn_1.xmin[i] = 0;
92 fCost_1.ancout = 0;
93 fCost_1.ieps = 0;
94 fCost_1.tolcou = 0;
95
96 for(i=0; i<max_nNodes_;++i) fDel_1.coef[i] = 0;
97 for(i=0; i<max_nLayers_*max_nNodes_;++i) fDel_1.del[i] = 0;
98 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delta[i] = 0;
99 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delw[i] = 0;
100 for(i=0; i<max_nLayers_*max_nNodes_;++i) fDel_1.delww[i] = 0;
101 fDel_1.demin = 0;
102 fDel_1.demax = 0;
103 fDel_1.idde = 0;
104 for(i=0; i<max_nLayers_;++i) fDel_1.temp[i] = 0;
105
106 for(i=0; i<max_nNodes_;++i) fNeur_1.cut[i] = 0;
107 for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.deltaww[i] = 0;
108 for(i=0; i<max_nLayers_;++i) fNeur_1.neuron[i] = 0;
109 for(i=0; i<max_nNodes_;++i) fNeur_1.o[i] = 0;
110 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fNeur_1.w[i] = 0;
111 for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.ww[i] = 0;
112 for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.x[i] = 0;
113 for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.y[i] = 0;
114
115 fParam_1.eeps = 0;
116 fParam_1.epsmin = 0;
117 fParam_1.epsmax = 0;
118 fParam_1.eta = 0;
119 fParam_1.ichoi = 0;
120 fParam_1.itest = 0;
121 fParam_1.layerm = 0;
122 fParam_1.lclass = 0;
123 fParam_1.nblearn = 0;
124 fParam_1.ndiv = 0;
125 fParam_1.ndivis = 0;
126 fParam_1.nevl = 0;
127 fParam_1.nevt = 0;
128 fParam_1.nunap = 0;
129 fParam_1.nunilec = 0;
130 fParam_1.nunishort = 0;
131 fParam_1.nunisor = 0;
132 fParam_1.nvar = 0;
133
134 fVarn_1.iclass = 0;
135 for(i=0; i<max_Events_;++i) fVarn_1.mclass[i] = 0;
136 for(i=0; i<max_Events_;++i) fVarn_1.nclass[i] = 0;
137 for(i=0; i<max_nVar_;++i) fVarn_1.xmax[i] = 0;
138
139 fLogger = 0;
140}
141
142////////////////////////////////////////////////////////////////////////////////
143/// Destructor.
144
146{
147}
148
149////////////////////////////////////////////////////////////////////////////////
150
152 Int_t *ntest, Int_t *nvar2, Int_t *nlayer,
153 Int_t *nodes, Int_t *ncycle )
154{
155 // training interface - called from MethodCFMlpANN class object
156
157 // sanity checks
158 if (*ntrain + *ntest > max_Events_) {
159 printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of training + testing" \
160 " events exceeds hardcoded maximum - reset to maximum allowed number");
161 *ntrain = *ntrain*(max_Events_/(*ntrain + *ntest));
162 *ntest = *ntest *(max_Events_/(*ntrain + *ntest));
163 }
164 if (*nvar2 > max_nVar_) {
165 printf( "*** CFMlpANN_f2c: ERROR in Train_nn: number of variables" \
166 " exceeds hardcoded maximum ==> abort");
167 std::exit(1);
168 }
169 if (*nlayer > max_nLayers_) {
170 printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of layers" \
171 " exceeds hardcoded maximum - reset to maximum allowed number");
172 *nlayer = max_nLayers_;
173 }
174 if (*nodes > max_nNodes_) {
175 printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of nodes" \
176 " exceeds hardcoded maximum - reset to maximum allowed number");
177 *nodes = max_nNodes_;
178 }
179
180 // create dynamic data tables (AH)
181 fVarn2_1.Create( *ntrain + *ntest, *nvar2 );
182 fVarn3_1.Create( *ntrain + *ntest, *nvar2 );
183
184 // Int_t imax;
185 char det[20];
186
187 Entree_new(nvar2, det, ntrain, ntest, nlayer, nodes, ncycle, (Int_t)20);
188 if (fNeur_1.neuron[fParam_1.layerm - 1] == 1) {
189 // imax = 2;
190 fParam_1.lclass = 2;
191 }
192 else {
193 // imax = fNeur_1.neuron[fParam_1.layerm - 1] << 1;
194 fParam_1.lclass = fNeur_1.neuron[fParam_1.layerm - 1];
195 }
196 fParam_1.nvar = fNeur_1.neuron[0];
197 TestNN();
198 Innit(det, tout2, tin2, (Int_t)20);
199
200 // delete data tables
201 fVarn2_1.Delete();
202 fVarn3_1.Delete();
203}
204
205////////////////////////////////////////////////////////////////////////////////
206
208 Int_t *ntest, Int_t *numlayer, Int_t *nodes,
209 Int_t *numcycle, Int_t /*det_len*/)
210{
211 // first initialisation of ANN
212 Int_t i__1;
213
214 Int_t rewrite, i__, j, ncoef;
215 Int_t ntemp, num, retrain;
216
217 /* NTRAIN: Nb of events used during the learning */
218 /* NTEST: Nb of events used for the test */
219 /* TIN: Input variables */
220 /* TOUT: type of the event */
221
222 fCost_1.ancout = 1e30;
223
224 /* .............. HardCoded Values .................... */
225 retrain = 0;
226 rewrite = 1000;
227 for (i__ = 1; i__ <= max_nNodes_; ++i__) {
228 fDel_1.coef[i__ - 1] = (Float_t)0.;
229 }
230 for (i__ = 1; i__ <= max_nLayers_; ++i__) {
231 fDel_1.temp[i__ - 1] = (Float_t)0.;
232 }
233 fParam_1.layerm = *numlayer;
234 if (fParam_1.layerm > max_nLayers_) {
235 printf("Error: number of layers exceeds maximum: %i, %i ==> abort",
236 fParam_1.layerm, max_nLayers_ );
237 Arret("modification of mlpl3_param_lim.inc is needed ");
238 }
239 fParam_1.nevl = *ntrain;
240 fParam_1.nevt = *ntest;
241 fParam_1.nblearn = *numcycle;
242 fVarn_1.iclass = 2;
243 fParam_1.nunilec = 10;
244 fParam_1.epsmin = 1e-10;
245 fParam_1.epsmax = 1e-4;
246 fParam_1.eta = .5;
247 fCost_1.tolcou = 1e-6;
248 fCost_1.ieps = 2;
249 fParam_1.nunisor = 30;
250 fParam_1.nunishort = 48;
251 fParam_1.nunap = 40;
252
253 ULog() << kINFO << "Total number of events for training: " << fParam_1.nevl << Endl;
254 ULog() << kINFO << "Total number of training cycles : " << fParam_1.nblearn << Endl;
255 if (fParam_1.nevl > max_Events_) {
256 printf("Error: number of learning events exceeds maximum: %i, %i ==> abort",
257 fParam_1.nevl, max_Events_ );
258 Arret("modification of mlpl3_param_lim.inc is needed ");
259 }
260 if (fParam_1.nevt > max_Events_) {
261 printf("Error: number of testing events exceeds maximum: %i, %i ==> abort",
262 fParam_1.nevt, max_Events_ );
263 Arret("modification of mlpl3_param_lim.inc is needed ");
264 }
265 i__1 = fParam_1.layerm;
266 for (j = 1; j <= i__1; ++j) {
267 num = nodes[j-1];
268 if (num < 2) {
269 num = 2;
270 }
271 if (j == fParam_1.layerm && num != 2) {
272 num = 2;
273 }
274 fNeur_1.neuron[j - 1] = num;
275 }
276 i__1 = fParam_1.layerm;
277 for (j = 1; j <= i__1; ++j) {
278 ULog() << kINFO << "Number of layers for neuron(" << j << "): " << fNeur_1.neuron[j - 1] << Endl;
279 }
280 if (fNeur_1.neuron[fParam_1.layerm - 1] != 2) {
281 printf("Error: wrong number of classes at output layer: %i != 2 ==> abort\n",
282 fNeur_1.neuron[fParam_1.layerm - 1]);
283 Arret("stop");
284 }
285 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
286 for (j = 1; j <= i__1; ++j) {
287 fDel_1.coef[j - 1] = 1.;
288 }
289 i__1 = fParam_1.layerm;
290 for (j = 1; j <= i__1; ++j) {
291 fDel_1.temp[j - 1] = 1.;
292 }
293 fParam_1.ichoi = retrain;
294 fParam_1.ndivis = rewrite;
295 fDel_1.idde = 1;
296 if (! (fParam_1.ichoi == 0 || fParam_1.ichoi == 1)) {
297 printf( "Big troubles !!! \n" );
298 Arret("new training or continued one !");
299 }
300 if (fParam_1.ichoi == 0) {
301 ULog() << kINFO << "New training will be performed" << Endl;
302 }
303 else {
304 printf("%s: New training will be continued from a weight file\n", fg_MethodName);
305 }
306 ncoef = 0;
307 ntemp = 0;
308 for (i__ = 1; i__ <= max_nNodes_; ++i__) {
309 if (fDel_1.coef[i__ - 1] != (Float_t)0.) {
310 ++ncoef;
311 }
312 }
313 for (i__ = 1; i__ <= max_nLayers_; ++i__) {
314 if (fDel_1.temp[i__ - 1] != (Float_t)0.) {
315 ++ntemp;
316 }
317 }
318 if (ncoef != fNeur_1.neuron[fParam_1.layerm - 1]) {
319 Arret(" entree error code 1 : need to reported");
320 }
321 if (ntemp != fParam_1.layerm) {
322 Arret("entree error code 2 : need to reported");
323 }
324}
325
326#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
327#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
328
329////////////////////////////////////////////////////////////////////////////////
330/// [smart comments to be added]
331
333{
334 Int_t i__1, i__2, i__3;
335 Int_t i__, j;
336 Int_t layer;
337
338 i__1 = fParam_1.layerm;
339 for (layer = 2; layer <= i__1; ++layer) {
340 i__2 = fNeur_1.neuron[layer - 2];
341 for (i__ = 1; i__ <= i__2; ++i__) {
342 i__3 = fNeur_1.neuron[layer - 1];
343 for (j = 1; j <= i__3; ++j) {
344 w_ref(layer, j, i__) = (Sen3a() * 2. - 1.) * .2;
345 ww_ref(layer, j) = (Sen3a() * 2. - 1.) * .2;
346 }
347 }
348 }
349}
350
351#undef ww_ref
352#undef w_ref
353
354#define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
355#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
356#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
357#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
358#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
359
360////////////////////////////////////////////////////////////////////////////////
361/// [smart comments to be added]
362
364{
365 Int_t i__1, i__2, i__3;
366
367 Double_t f;
368 Int_t i__, j;
369 Int_t layer;
370
371 i__1 = fNeur_1.neuron[0];
372 for (i__ = 1; i__ <= i__1; ++i__) {
373 y_ref(1, i__) = xeev_ref(*ievent, i__);
374 }
375 i__1 = fParam_1.layerm - 1;
376 for (layer = 1; layer <= i__1; ++layer) {
377 i__2 = fNeur_1.neuron[layer];
378 for (j = 1; j <= i__2; ++j) {
379 x_ref(layer + 1, j) = 0.;
380 i__3 = fNeur_1.neuron[layer - 1];
381 for (i__ = 1; i__ <= i__3; ++i__) {
382 x_ref(layer + 1, j) = ( x_ref(layer + 1, j) + y_ref(layer, i__)
383 * w_ref(layer + 1, j, i__) );
384 }
385 x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
386 i__3 = layer + 1;
387 Foncf(&i__3, &x_ref(layer + 1, j), &f);
388 y_ref(layer + 1, j) = f;
389 }
390 }
391}
392
393#undef ww_ref
394#undef y_ref
395#undef x_ref
396#undef w_ref
397#undef xeev_ref
398
399#define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
400
401////////////////////////////////////////////////////////////////////////////////
402/// [smart comments to be added]
403
405{
406 Int_t i__1, i__2;
407
408 Int_t i__, j, k, l;
409 Int_t nocla[max_nNodes_], ikend;
410 Double_t xpg[max_nVar_];
411
412 *ktest = 0;
413 i__1 = fParam_1.lclass;
414 for (k = 1; k <= i__1; ++k) {
415 nocla[k - 1] = 0;
416 }
417 i__1 = fParam_1.nvar;
418 for (i__ = 1; i__ <= i__1; ++i__) {
419 fVarn_1.xmin[i__ - 1] = 1e30;
420 fVarn_1.xmax[i__ - 1] = -fVarn_1.xmin[i__ - 1];
421 }
422 i__1 = fParam_1.nevl;
423 for (i__ = 1; i__ <= i__1; ++i__) {
424 DataInterface(tout2, tin2, &fg_100, &fg_0, &fParam_1.nevl, &fParam_1.nvar,
425 xpg, &fVarn_1.nclass[i__ - 1], &ikend);
426 if (ikend == -1) {
427 break;
428 }
429
430 CollectVar(&fParam_1.nvar, &fVarn_1.nclass[i__ - 1], xpg);
431
432 i__2 = fParam_1.nvar;
433 for (j = 1; j <= i__2; ++j) {
434 xeev_ref(i__, j) = xpg[j - 1];
435 }
436 if (fVarn_1.iclass == 1) {
437 i__2 = fParam_1.lclass;
438 for (k = 1; k <= i__2; ++k) {
439 if (fVarn_1.nclass[i__ - 1] == k) {
440 ++nocla[k - 1];
441 }
442 }
443 }
444 i__2 = fParam_1.nvar;
445 for (k = 1; k <= i__2; ++k) {
446 if (xeev_ref(i__, k) < fVarn_1.xmin[k - 1]) {
447 fVarn_1.xmin[k - 1] = xeev_ref(i__, k);
448 }
449 if (xeev_ref(i__, k) > fVarn_1.xmax[k - 1]) {
450 fVarn_1.xmax[k - 1] = xeev_ref(i__, k);
451 }
452 }
453 }
454
455 if (fVarn_1.iclass == 1) {
456 i__2 = fParam_1.lclass;
457 for (k = 1; k <= i__2; ++k) {
458 i__1 = fParam_1.lclass;
459 for (l = 1; l <= i__1; ++l) {
460 if (nocla[k - 1] != nocla[l - 1]) {
461 *ktest = 1;
462 }
463 }
464 }
465 }
466 i__1 = fParam_1.nevl;
467 for (i__ = 1; i__ <= i__1; ++i__) {
468 i__2 = fParam_1.nvar;
469 for (l = 1; l <= i__2; ++l) {
470 if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
471 Float_t)0.) {
472 xeev_ref(i__, l) = (Float_t)0.;
473 }
474 else {
475 xeev_ref(i__, l) = xeev_ref(i__, l) - (fVarn_1.xmax[l - 1] +
476 fVarn_1.xmin[l - 1]) / 2.;
477 xeev_ref(i__, l) = xeev_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
478 fVarn_1.xmin[l - 1]) / 2.);
479 }
480 }
481 }
482}
483
484#undef xeev_ref
485
486#define delw_ref(a_1,a_2,a_3) fDel_1.delw[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
487#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
488#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
489#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
490#define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
491#define delww_ref(a_1,a_2) fDel_1.delww[(a_2)*max_nLayers_ + a_1 - 7]
492#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
493#define del_ref(a_1,a_2) fDel_1.del[(a_2)*max_nLayers_ + a_1 - 7]
494#define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
495
496////////////////////////////////////////////////////////////////////////////////
497/// [smart comments to be added]
498
500{
501 Int_t i__1, i__2, i__3;
502
503 Double_t f;
504 Int_t i__, j, k, l;
505 Double_t df, uu;
506
507 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
508 for (i__ = 1; i__ <= i__1; ++i__) {
509 if (fVarn_1.nclass[*ievent - 1] == i__) {
510 fNeur_1.o[i__ - 1] = 1.;
511 }
512 else {
513 fNeur_1.o[i__ - 1] = -1.;
514 }
515 }
516 l = fParam_1.layerm;
517 i__1 = fNeur_1.neuron[l - 1];
518 for (i__ = 1; i__ <= i__1; ++i__) {
519 f = y_ref(l, i__);
520 df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
521 del_ref(l, i__) = df * (fNeur_1.o[i__ - 1] - y_ref(l, i__)) *
522 fDel_1.coef[i__ - 1];
523 delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
524 i__2 = fNeur_1.neuron[l - 2];
525 for (j = 1; j <= i__2; ++j) {
526 delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(l -
527 1, j);
528 /* L20: */
529 }
530 }
531 for (l = fParam_1.layerm - 1; l >= 2; --l) {
532 i__2 = fNeur_1.neuron[l - 1];
533 for (i__ = 1; i__ <= i__2; ++i__) {
534 uu = 0.;
535 i__1 = fNeur_1.neuron[l];
536 for (k = 1; k <= i__1; ++k) {
537 uu += w_ref(l + 1, k, i__) * del_ref(l + 1, k);
538 }
539 Foncf(&l, &x_ref(l, i__), &f);
540 df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
541 del_ref(l, i__) = df * uu;
542 delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
543 i__1 = fNeur_1.neuron[l - 2];
544 for (j = 1; j <= i__1; ++j) {
545 delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(
546 l - 1, j);
547 }
548 }
549 }
550 i__1 = fParam_1.layerm;
551 for (l = 2; l <= i__1; ++l) {
552 i__2 = fNeur_1.neuron[l - 1];
553 for (i__ = 1; i__ <= i__2; ++i__) {
554 deltaww_ref(l, i__) = delww_ref(l, i__) + fParam_1.eta *
555 deltaww_ref(l, i__);
556 ww_ref(l, i__) = ww_ref(l, i__) + deltaww_ref(l, i__);
557 i__3 = fNeur_1.neuron[l - 2];
558 for (j = 1; j <= i__3; ++j) {
559 delta_ref(l, i__, j) = delw_ref(l, i__, j) + fParam_1.eta *
560 delta_ref(l, i__, j);
561 w_ref(l, i__, j) = w_ref(l, i__, j) + delta_ref(l, i__, j);
562 }
563 }
564 }
565}
566
567#undef deltaww_ref
568#undef del_ref
569#undef ww_ref
570#undef delww_ref
571#undef delta_ref
572#undef y_ref
573#undef x_ref
574#undef w_ref
575#undef delw_ref
576
577#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
578#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
579
580////////////////////////////////////////////////////////////////////////////////
581
583{
584 // write weights to file
585
586 if (*iii == *maxcycle) {
587 // now in MethodCFMlpANN.cxx
588 }
589}
590
591#undef ww_ref
592#undef w_ref
593
594#define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
595#define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
596
597////////////////////////////////////////////////////////////////////////////////
598
600{
601 // Initialization
602 Int_t i__1, i__2, i__3;
603
604 Int_t i__, j;
605 Int_t nevod, layer, ktest, i1, nrest;
606 Int_t ievent(0);
607 Int_t kkk;
608 Double_t xxx = 0.0, yyy = 0.0;
609
610 Leclearn(&ktest, tout2, tin2);
611 Lecev2(&ktest, tout2, tin2);
612 if (ktest == 1) {
613 printf( " .... strange to be here (1) ... \n");
614 std::exit(1);
615 }
616 i__1 = fParam_1.layerm - 1;
617 for (layer = 1; layer <= i__1; ++layer) {
618 i__2 = fNeur_1.neuron[layer];
619 for (j = 1; j <= i__2; ++j) {
620 deltaww_ref(layer + 1, j) = 0.;
621 i__3 = fNeur_1.neuron[layer - 1];
622 for (i__ = 1; i__ <= i__3; ++i__) {
623 delta_ref(layer + 1, j, i__) = 0.;
624 }
625 }
626 }
627 if (fParam_1.ichoi == 1) {
628 Inl();
629 }
630 else {
631 Wini();
632 }
633 kkk = 0;
634 i__3 = fParam_1.nblearn;
635 Timer timer( i__3, "CFMlpANN" );
636 Int_t num = i__3/100;
637
638 for (i1 = 1; i1 <= i__3; ++i1) {
639
640 if ( ( num>0 && (i1-1)%num == 0) || (i1 == i__3) ) timer.DrawProgressBar( i1-1 );
641
642 i__2 = fParam_1.nevl;
643 for (i__ = 1; i__ <= i__2; ++i__) {
644 ++kkk;
645 if (fCost_1.ieps == 2) {
646 fParam_1.eeps = Fdecroi(&kkk);
647 }
648 if (fCost_1.ieps == 1) {
649 fParam_1.eeps = fParam_1.epsmin;
650 }
651 Bool_t doCont = kTRUE;
652 if (fVarn_1.iclass == 2) {
653 ievent = (Int_t) ((Double_t) fParam_1.nevl * Sen3a());
654 if (ievent == 0) {
655 doCont = kFALSE;
656 }
657 }
658 if (doCont) {
659 if (fVarn_1.iclass == 1) {
660 nevod = fParam_1.nevl / fParam_1.lclass;
661 nrest = i__ % fParam_1.lclass;
662 fParam_1.ndiv = i__ / fParam_1.lclass;
663 if (nrest != 0) {
664 ievent = fParam_1.ndiv + 1 + (fParam_1.lclass - nrest) *
665 nevod;
666 }
667 else {
668 ievent = fParam_1.ndiv;
669 }
670 }
671 En_avant(&ievent);
672 En_arriere(&ievent);
673 }
674 }
675 yyy = 0.;
676 if (i1 % fParam_1.ndivis == 0 || i1 == 1 || i1 == fParam_1.nblearn) {
677 Cout(&i1, &xxx);
678 Cout2(&i1, &yyy);
679 GraphNN(&i1, &xxx, &yyy, det, (Int_t)20);
680 Out(&i1, &fParam_1.nblearn);
681 }
682 if (xxx < fCost_1.tolcou) {
683 GraphNN(&fParam_1.nblearn, &xxx, &yyy, det, (Int_t)20);
684 Out(&fParam_1.nblearn, &fParam_1.nblearn);
685 break;
686 }
687 }
688}
689
690#undef deltaww_ref
691#undef delta_ref
692
693////////////////////////////////////////////////////////////////////////////////
694/// [smart comments to be added]
695
697{
698 Int_t i__1;
699
700 Int_t i__;
701 Int_t ktest;
702
703 ktest = 0;
704 if (fParam_1.layerm > max_nLayers_) {
705 ktest = 1;
706 printf("Error: number of layers exceeds maximum: %i, %i ==> abort",
707 fParam_1.layerm, max_nLayers_ );
708 Arret("modification of mlpl3_param_lim.inc is needed ");
709 }
710 if (fParam_1.nevl > max_Events_) {
711 ktest = 1;
712 printf("Error: number of training events exceeds maximum: %i, %i ==> abort",
713 fParam_1.nevl, max_Events_ );
714 Arret("modification of mlpl3_param_lim.inc is needed ");
715 }
716 if (fParam_1.nevt > max_Events_) {
717 printf("Error: number of testing events exceeds maximum: %i, %i ==> abort",
718 fParam_1.nevt, max_Events_ );
719 Arret("modification of mlpl3_param_lim.inc is needed ");
720 }
721 if (fParam_1.lclass < fNeur_1.neuron[fParam_1.layerm - 1]) {
722 ktest = 1;
723 printf("Error: wrong number of classes at ouput layer: %i != %i ==> abort\n",
724 fNeur_1.neuron[fParam_1.layerm - 1], fParam_1.lclass);
725 Arret("problem needs to reported ");
726 }
727 if (fParam_1.nvar > max_nVar_) {
728 ktest = 1;
729 printf("Error: number of variables exceeds maximum: %i, %i ==> abort",
730 fParam_1.nvar, fg_max_nVar_ );
731 Arret("modification of mlpl3_param_lim.inc is needed");
732 }
733 i__1 = fParam_1.layerm;
734 for (i__ = 1; i__ <= i__1; ++i__) {
735 if (fNeur_1.neuron[i__ - 1] > max_nNodes_) {
736 ktest = 1;
737 printf("Error: number of neurons at layer exceeds maximum: %i, %i ==> abort",
738 i__, fg_max_nNodes_ );
739 }
740 }
741 if (ktest == 1) {
742 printf( " .... strange to be here (2) ... \n");
743 std::exit(1);
744 }
745}
746
747#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
748
749////////////////////////////////////////////////////////////////////////////////
750/// [smart comments to be added]
751
753{
754 Int_t i__1, i__2;
755 Double_t d__1;
756
757 Double_t c__;
758 Int_t i__, j;
759
760 c__ = 0.;
761 i__1 = fParam_1.nevl;
762 for (i__ = 1; i__ <= i__1; ++i__) {
763 En_avant(&i__);
764 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
765 for (j = 1; j <= i__2; ++j) {
766 if (fVarn_1.nclass[i__ - 1] == j) {
767 fNeur_1.o[j - 1] = 1.;
768 }
769 else {
770 fNeur_1.o[j - 1] = -1.;
771 }
772 // Computing 2nd power
773 d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
774 c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
775 }
776 }
777 c__ /= (Double_t) (fParam_1.nevl * fParam_1.lclass) * 2.;
778 *xxx = c__;
779 fCost_1.ancout = c__;
780}
781
782#undef y_ref
783
784#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
785#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
786
787////////////////////////////////////////////////////////////////////////////////
788/// [smart comments to be added]
789
791{
792 Int_t i__1, i__2;
793
794 Int_t jmax, k, layer, kk, nq, nr;
795
796 i__1 = fParam_1.nvar;
797 i__1 = fParam_1.layerm;
798 i__1 = fParam_1.layerm - 1;
799 for (layer = 1; layer <= i__1; ++layer) {
800 nq = fNeur_1.neuron[layer] / 10;
801 nr = fNeur_1.neuron[layer] - nq * 10;
802 if (nr == 0) {
803 kk = nq;
804 }
805 else {
806 kk = nq + 1;
807 }
808 i__2 = kk;
809 for (k = 1; k <= i__2; ++k) {
810 // jmin = k * 10 - 9;
811 jmax = k * 10;
812 if (fNeur_1.neuron[layer] < jmax) {
813 jmax = fNeur_1.neuron[layer];
814 }
815 // i__3 = fNeur_1.neuron[layer - 1];
816 }
817 }
818}
819
820#undef ww_ref
821#undef w_ref
822
823////////////////////////////////////////////////////////////////////////////////
824/// [smart comments to be added]
825
827{
828 Double_t ret_val;
829
830 Double_t aaa, bbb;
831
832 aaa = (fParam_1.epsmin - fParam_1.epsmax) / (Double_t) (fParam_1.nblearn *
833 fParam_1.nevl - 1);
834 bbb = fParam_1.epsmax - aaa;
835 ret_val = aaa * (Double_t) (*i__) + bbb;
836 return ret_val;
837}
838
839#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
840
841////////////////////////////////////////////////////////////////////////////////
842/// [smart comments to be added]
843
845 Double_t * /*yyy*/, char * /*det*/, Int_t /*det_len*/ )
846{
847 Int_t i__1, i__2;
848
849 Double_t xmok[max_nNodes_];
850 // Float_t xpaw;
851 Double_t xmko[max_nNodes_];
852 Int_t i__, j;
853 Int_t ix;
854 // Int_t jjj;
855 // Float_t vbn[10];
856 Int_t nko[max_nNodes_], nok[max_nNodes_];
857
858 // for (i__ = 1; i__ <= 10; ++i__) {
859 // vbn[i__ - 1] = (Float_t)0.;
860 // }
861 if (*ilearn == 1) {
862 // AH: removed output
863 }
864 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
865 for (i__ = 1; i__ <= i__1; ++i__) {
866 nok[i__ - 1] = 0;
867 nko[i__ - 1] = 0;
868 xmok[i__ - 1] = 0.;
869 xmko[i__ - 1] = 0.;
870 }
871 i__1 = fParam_1.nevl;
872 for (i__ = 1; i__ <= i__1; ++i__) {
873 En_avant(&i__);
874 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
875 for (j = 1; j <= i__2; ++j) {
876 // xpaw = (Float_t) y_ref(fParam_1.layerm, j);
877 if (fVarn_1.nclass[i__ - 1] == j) {
878 ++nok[j - 1];
879 xmok[j - 1] += y_ref(fParam_1.layerm, j);
880 }
881 else {
882 ++nko[j - 1];
883 xmko[j - 1] += y_ref(fParam_1.layerm, j);
884 // jjj = j + fNeur_1.neuron[fParam_1.layerm - 1];
885 }
886 // if (j <= 9) {
887 // vbn[j - 1] = xpaw;
888 // }
889 }
890 // vbn[9] = (Float_t) fVarn_1.nclass[i__ - 1];
891 }
892 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
893 for (j = 1; j <= i__1; ++j) {
894 xmok[j - 1] /= (Double_t) nok[j - 1];
895 xmko[j - 1] /= (Double_t) nko[j - 1];
896 fNeur_1.cut[j - 1] = (xmok[j - 1] + xmko[j - 1]) / 2.;
897 }
898 ix = fNeur_1.neuron[fParam_1.layerm - 1];
899 i__1 = ix;
900}
901
902#undef y_ref
903
904////////////////////////////////////////////////////////////////////////////////
905/// [smart comments to be added]
906
908{
909 // Initialized data
910 Int_t m12 = 4096;
911 Double_t f1 = 2.44140625e-4;
912 Double_t f2 = 5.96046448e-8;
913 Double_t f3 = 1.45519152e-11;
914 Int_t j1 = 3823;
915 Int_t j2 = 4006;
916 Int_t j3 = 2903;
917 static Int_t fg_i1 = 3823;
918 static Int_t fg_i2 = 4006;
919 static Int_t fg_i3 = 2903;
920
921 Double_t ret_val;
922 Int_t k3, l3, k2, l2, k1, l1;
923
924 // reference: /k.d.senne/j. stochastics/ vol 1,no 3 (1974),pp.215-38
925 k3 = fg_i3 * j3;
926 l3 = k3 / m12;
927 k2 = fg_i2 * j3 + fg_i3 * j2 + l3;
928 l2 = k2 / m12;
929 k1 = fg_i1 * j3 + fg_i2 * j2 + fg_i3 * j1 + l2;
930 l1 = k1 / m12;
931 fg_i1 = k1 - l1 * m12;
932 fg_i2 = k2 - l2 * m12;
933 fg_i3 = k3 - l3 * m12;
934 ret_val = f1 * (Double_t) fg_i1 + f2 * (Float_t) fg_i2 + f3 * (Double_t) fg_i3;
935
936 return ret_val;
937}
938
939////////////////////////////////////////////////////////////////////////////////
940
942{
943 // [needs to be checked]
944 Double_t yy;
945
946 if (*u / fDel_1.temp[*i__ - 1] > 170.) {
947 *f = .99999999989999999;
948 }
949 else if (*u / fDel_1.temp[*i__ - 1] < -170.) {
950 *f = -.99999999989999999;
951 }
952 else {
953 yy = TMath::Exp(-(*u) / fDel_1.temp[*i__ - 1]);
954 *f = (1. - yy) / (yy + 1.);
955 }
956}
957
958#undef w_ref
959
960#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
961
962////////////////////////////////////////////////////////////////////////////////
963/// [smart comments to be added]
964
966{
967 Int_t i__1, i__2;
968 Double_t d__1;
969
970 Double_t c__;
971 Int_t i__, j;
972
973 c__ = 0.;
974 i__1 = fParam_1.nevt;
975 for (i__ = 1; i__ <= i__1; ++i__) {
976 En_avant2(&i__);
977 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
978 for (j = 1; j <= i__2; ++j) {
979 if (fVarn_1.mclass[i__ - 1] == j) {
980 fNeur_1.o[j - 1] = 1.;
981 }
982 else {
983 fNeur_1.o[j - 1] = -1.;
984 }
985 /* Computing 2nd power */
986 d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
987 c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
988 }
989 }
990 c__ /= (Double_t) (fParam_1.nevt * fParam_1.lclass) * 2.;
991 *yyy = c__;
992}
993
994#undef y_ref
995
996#define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
997
998////////////////////////////////////////////////////////////////////////////////
999/// [smart comments to be added]
1000
1002{
1003 Int_t i__1, i__2;
1004
1005 Int_t i__, j, l;
1006 // Int_t mocla[max_nNodes_];
1007 Int_t ikend;
1008 Double_t xpg[max_nVar_];
1009
1010 /* NTRAIN: Nb of events used during the learning */
1011 /* NTEST: Nb of events used for the test */
1012 /* TIN: Input variables */
1013 /* TOUT: type of the event */
1014
1015 *ktest = 0;
1016 i__1 = fParam_1.lclass;
1017 // for (k = 1; k <= i__1; ++k) {
1018 // mocla[k - 1] = 0;
1019 // }
1020 i__1 = fParam_1.nevt;
1021 for (i__ = 1; i__ <= i__1; ++i__) {
1022 DataInterface(tout2, tin2, &fg_999, &fg_0, &fParam_1.nevt, &fParam_1.nvar,
1023 xpg, &fVarn_1.mclass[i__ - 1], &ikend);
1024
1025 if (ikend == -1) {
1026 break;
1027 }
1028
1029 i__2 = fParam_1.nvar;
1030 for (j = 1; j <= i__2; ++j) {
1031 xx_ref(i__, j) = xpg[j - 1];
1032 }
1033 }
1034
1035 i__1 = fParam_1.nevt;
1036 for (i__ = 1; i__ <= i__1; ++i__) {
1037 i__2 = fParam_1.nvar;
1038 for (l = 1; l <= i__2; ++l) {
1039 if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
1040 Float_t)0.) {
1041 xx_ref(i__, l) = (Float_t)0.;
1042 }
1043 else {
1044 xx_ref(i__, l) = xx_ref(i__, l) - (fVarn_1.xmax[l - 1] +
1045 fVarn_1.xmin[l - 1]) / 2.;
1046 xx_ref(i__, l) = xx_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
1047 fVarn_1.xmin[l - 1]) / 2.);
1048 }
1049 }
1050 }
1051}
1052
1053#undef xx_ref
1054
1055#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
1056#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
1057#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
1058#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
1059#define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
1060
1061////////////////////////////////////////////////////////////////////////////////
1062/// [smart comments to be added]
1063
1065{
1066 Int_t i__1, i__2, i__3;
1067
1068 Double_t f;
1069 Int_t i__, j;
1070 Int_t layer;
1071
1072 i__1 = fNeur_1.neuron[0];
1073 for (i__ = 1; i__ <= i__1; ++i__) {
1074 y_ref(1, i__) = xx_ref(*ievent, i__);
1075 }
1076 i__1 = fParam_1.layerm - 1;
1077 for (layer = 1; layer <= i__1; ++layer) {
1078 i__2 = fNeur_1.neuron[layer];
1079 for (j = 1; j <= i__2; ++j) {
1080 x_ref(layer + 1, j) = 0.;
1081 i__3 = fNeur_1.neuron[layer - 1];
1082 for (i__ = 1; i__ <= i__3; ++i__) {
1083 x_ref(layer + 1, j) = x_ref(layer + 1, j) + y_ref(layer, i__)
1084 * w_ref(layer + 1, j, i__);
1085 }
1086 x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
1087 i__3 = layer + 1;
1088 Foncf(&i__3, &x_ref(layer + 1, j), &f);
1089 y_ref(layer + 1, j) = f;
1090 /* L2: */
1091 }
1092 }
1093}
1094
1095#undef xx_ref
1096#undef ww_ref
1097#undef y_ref
1098#undef x_ref
1099#undef w_ref
1100
1101////////////////////////////////////////////////////////////////////////////////
1102
1104{
1105 // fatal error occurred: stop execution
1106 printf("%s: %s",fg_MethodName, mot);
1107 std::exit(1);
1108}
1109
1110////////////////////////////////////////////////////////////////////////////////
1111/// [smart comments to be added]
1112
1113void TMVA::MethodCFMlpANN_Utils::CollectVar( Int_t * /*nvar*/, Int_t * /*class__*/, Double_t * /*xpg*/ )
1114{
1115 // Int_t i__1;
1116
1117 // Int_t i__;
1118 // Float_t x[201];
1119
1120 // // Parameter adjustments
1121 // --xpg;
1122
1123 // for (i__ = 1; i__ <= 201; ++i__) {
1124 // x[i__ - 1] = 0.0;
1125 // }
1126 // x[0] = (Float_t) (*class__);
1127 // i__1 = *nvar;
1128 // for (i__ = 1; i__ <= i__1; ++i__) {
1129 // x[i__] = (Float_t) xpg[i__];
1130 // }
1131}
#define del_ref(a_1, a_2)
#define y_ref(a_1, a_2)
#define xx_ref(a_1, a_2)
#define xeev_ref(a_1, a_2)
#define w_ref(a_1, a_2, a_3)
#define delww_ref(a_1, a_2)
#define delta_ref(a_1, a_2, a_3)
#define ww_ref(a_1, a_2)
#define delw_ref(a_1, a_2, a_3)
#define deltaww_ref(a_1, a_2)
#define x_ref(a_1, a_2)
#define f(i)
Definition RSha256.hxx:104
#define e(i)
Definition RSha256.hxx:103
int Int_t
Definition RtypesCore.h:45
const Bool_t kFALSE
Definition RtypesCore.h:92
double Double_t
Definition RtypesCore.h:59
float Float_t
Definition RtypesCore.h:57
const Bool_t kTRUE
Definition RtypesCore.h:91
#define ClassImp(name)
Definition Rtypes.h:364
Implementation of Clermond-Ferrand artificial neural network.
void Foncf(Int_t *i__, Double_t *u, Double_t *f)
void Out(Int_t *iii, Int_t *maxcycle)
MethodCFMlpANN_Utils()
default constructor
void Innit(char *det, Double_t *tout2, Double_t *tin2, Int_t)
struct TMVA::MethodCFMlpANN_Utils::@163 fDel_1
void Entree_new(Int_t *, char *, Int_t *ntrain, Int_t *ntest, Int_t *numlayer, Int_t *nodes, Int_t *numcycle, Int_t)
void CollectVar(Int_t *nvar, Int_t *class__, Double_t *xpg)
[smart comments to be added]
void Leclearn(Int_t *ktest, Double_t *tout2, Double_t *tin2)
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@161 fVarn_1
void GraphNN(Int_t *ilearn, Double_t *, Double_t *, char *, Int_t)
[smart comments to be added]
void En_avant2(Int_t *ievent)
[smart comments to be added]
Double_t Fdecroi(Int_t *i__)
[smart comments to be added]
void En_arriere(Int_t *ievent)
[smart comments to be added]
void Cout(Int_t *, Double_t *xxx)
[smart comments to be added]
Double_t Sen3a(void)
[smart comments to be added]
void Train_nn(Double_t *tin2, Double_t *tout2, Int_t *ntrain, Int_t *ntest, Int_t *nvar2, Int_t *nlayer, Int_t *nodes, Int_t *ncycle)
void Wini()
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@162 fNeur_1
void En_avant(Int_t *ievent)
[smart comments to be added]
void Cout2(Int_t *, Double_t *yyy)
[smart comments to be added]
void TestNN()
[smart comments to be added]
void Lecev2(Int_t *ktest, Double_t *tout2, Double_t *tin2)
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@164 fCost_1
virtual ~MethodCFMlpANN_Utils()
Destructor.
static const char *const fg_MethodName
void Inl()
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@160 fParam_1
Timing information for training and evaluation of MVA methods.
Definition Timer.h:58
void DrawProgressBar(Int_t, const TString &comment="")
draws progress bar in color or B&W caution:
Definition Timer.cxx:202
TF1 * f1
Definition legend1.C:11
MsgLogger & Endl(MsgLogger &ml)
Definition MsgLogger.h:158
Double_t Exp(Double_t x)
Definition TMath.h:727
auto * l
Definition textangle.C:4