ROOT  6.07/01
Reference Guide
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Groups Pages
MethodCFMlpANN_Utils.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6  * Package: TMVA *
7  * Class : TMVA::MethodCFMlpANN_utils *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Reference for the original FORTRAN version "mlpl3.F": *
11  * Authors : J. Proriol and contributions from ALEPH-Clermont-Ferrand *
12  * Team members *
13  * Copyright: Laboratoire Physique Corpusculaire *
14  * Universite de Blaise Pascal, IN2P3/CNRS *
15  * *
16  * Modifications by present authors: *
17  * use dynamical data tables (not for all of them, but for the big ones) *
18  * *
19  * Description: *
20  * Utility routine translated from original mlpl3.F FORTRAN routine *
21  * *
22  * MultiLayerPerceptron : Training code *
23  * *
24  * NTRAIN: Nb of events used during the learning *
25  * NTEST: Nb of events used for the test *
26  * TIN: Input variables *
27  * TOUT: type of the event *
28  * *
29  * ---------------------------------------------------------------------------- *
30  * *
31  * Authors (alphabetical): *
32  * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
33  * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France *
34  * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany *
35  * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada *
36  * *
37  * Copyright (c) 2005: *
38  * CERN, Switzerland *
39  * U. of Victoria, Canada *
40  * MPI-K Heidelberg, Germany *
41  * LAPP, Annecy, France *
42  * *
43  * Redistribution and use in source and binary forms, with or without *
44  * modification, are permitted according to the terms listed in LICENSE *
45  * (http://tmva.sourceforge.net/LICENSE) *
46  * *
47  **********************************************************************************/
48 
49 //_______________________________________________________________________
50 //
51 // Implementation of Clermond-Ferrand artificial neural network
52 //
53 // Reference for the original FORTRAN version "mlpl3.F":
54 // Authors : J. Proriol and contributions from ALEPH-Clermont-Ferrand
55 // Team members
56 // Copyright: Laboratoire Physique Corpusculaire
57 // Universite de Blaise Pascal, IN2P3/CNRS
58 //_______________________________________________________________________
59 
60 #include <string>
61 #include <iostream>
62 #include <cstdlib>
63 
64 #include "TMath.h"
65 #include "TString.h"
66 
68 #include "TMVA/MsgLogger.h"
69 #include "TMVA/Timer.h"
70 #include "TMVA/Types.h"
71 
72 using std::cout;
73 using std::endl;
74 
76 
77 Int_t TMVA::MethodCFMlpANN_Utils::fg_100 = 100;
78 Int_t TMVA::MethodCFMlpANN_Utils::fg_0 = 0;
79 const Int_t TMVA::MethodCFMlpANN_Utils::fg_max_nVar_ = max_nVar_;
80 const Int_t TMVA::MethodCFMlpANN_Utils::fg_max_nNodes_ = max_nNodes_;
81 Int_t TMVA::MethodCFMlpANN_Utils::fg_999 = 999;
82 const char* const TMVA::MethodCFMlpANN_Utils::fg_MethodName = "--- CFMlpANN ";
83 
85 {
86  // default constructor
87  Int_t i(0);
88  for(i=0; i<max_nVar_;++i) fVarn_1.xmin[i] = 0;
89  fCost_1.ancout = 0;
90  fCost_1.ieps = 0;
91  fCost_1.tolcou = 0;
92 
93  for(i=0; i<max_nNodes_;++i) fDel_1.coef[i] = 0;
94  for(i=0; i<max_nLayers_*max_nNodes_;++i) fDel_1.del[i] = 0;
95  for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delta[i] = 0;
96  for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delw[i] = 0;
97  for(i=0; i<max_nLayers_*max_nNodes_;++i) fDel_1.delww[i] = 0;
98  fDel_1.demin = 0;
99  fDel_1.demax = 0;
100  fDel_1.idde = 0;
101  for(i=0; i<max_nLayers_;++i) fDel_1.temp[i] = 0;
102 
103  for(i=0; i<max_nNodes_;++i) fNeur_1.cut[i] = 0;
104  for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.deltaww[i] = 0;
105  for(i=0; i<max_nLayers_;++i) fNeur_1.neuron[i] = 0;
106  for(i=0; i<max_nNodes_;++i) fNeur_1.o[i] = 0;
107  for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fNeur_1.w[i] = 0;
108  for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.ww[i] = 0;
109  for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.x[i] = 0;
110  for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.y[i] = 0;
111 
112  fParam_1.eeps = 0;
113  fParam_1.epsmin = 0;
114  fParam_1.epsmax = 0;
115  fParam_1.eta = 0;
116  fParam_1.ichoi = 0;
117  fParam_1.itest = 0;
118  fParam_1.layerm = 0;
119  fParam_1.lclass = 0;
120  fParam_1.nblearn = 0;
121  fParam_1.ndiv = 0;
122  fParam_1.ndivis = 0;
123  fParam_1.nevl = 0;
124  fParam_1.nevt = 0;
125  fParam_1.nunap = 0;
126  fParam_1.nunilec = 0;
127  fParam_1.nunishort = 0;
128  fParam_1.nunisor = 0;
129  fParam_1.nvar = 0;
130 
131  fVarn_1.iclass = 0;
132  for(i=0; i<max_Events_;++i) fVarn_1.mclass[i] = 0;
133  for(i=0; i<max_Events_;++i) fVarn_1.nclass[i] = 0;
134  for(i=0; i<max_nVar_;++i) fVarn_1.xmax[i] = 0;
135 
136  fLogger = 0;
137 }
138 
140 {
141  // destructor
142 }
143 
145  Int_t *ntest, Int_t *nvar2, Int_t *nlayer,
146  Int_t *nodes, Int_t *ncycle )
147 {
148  // training interface - called from MethodCFMlpANN class object
149 
150  // sanity checks
151  if (*ntrain + *ntest > max_Events_) {
152  printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of training + testing" \
153  " events exceeds hardcoded maximum - reset to maximum allowed number");
154  *ntrain = *ntrain*(max_Events_/(*ntrain + *ntest));
155  *ntest = *ntest *(max_Events_/(*ntrain + *ntest));
156  }
157  if (*nvar2 > max_nVar_) {
158  printf( "*** CFMlpANN_f2c: ERROR in Train_nn: number of variables" \
159  " exceeds hardcoded maximum ==> abort");
160  std::exit(1);
161  }
162  if (*nlayer > max_nLayers_) {
163  printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of layers" \
164  " exceeds hardcoded maximum - reset to maximum allowed number");
165  *nlayer = max_nLayers_;
166  }
167  if (*nodes > max_nNodes_) {
168  printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of nodes" \
169  " exceeds hardcoded maximum - reset to maximum allowed number");
170  *nodes = max_nNodes_;
171  }
172 
173  // create dynamic data tables (AH)
174  fVarn2_1.Create( *ntrain + *ntest, *nvar2 );
175  fVarn3_1.Create( *ntrain + *ntest, *nvar2 );
176 
177  // Int_t imax;
178  char det[20];
179 
180  Entree_new(nvar2, det, ntrain, ntest, nlayer, nodes, ncycle, (Int_t)20);
181  if (fNeur_1.neuron[fParam_1.layerm - 1] == 1) {
182  // imax = 2;
183  fParam_1.lclass = 2;
184  }
185  else {
186  // imax = fNeur_1.neuron[fParam_1.layerm - 1] << 1;
187  fParam_1.lclass = fNeur_1.neuron[fParam_1.layerm - 1];
188  }
189  fParam_1.nvar = fNeur_1.neuron[0];
190  TestNN();
191  Innit(det, tout2, tin2, (Int_t)20);
192 
193  // delete data tables
194  fVarn2_1.Delete();
195  fVarn3_1.Delete();
196 }
197 
199  Int_t *ntest, Int_t *numlayer, Int_t *nodes,
200  Int_t *numcycle, Int_t /*det_len*/)
201 {
202  // first initialisation of ANN
203  Int_t i__1;
204 
205  Int_t rewrite, i__, j, ncoef;
206  Int_t ntemp, num, retrain;
207 
208  /* NTRAIN: Nb of events used during the learning */
209  /* NTEST: Nb of events used for the test */
210  /* TIN: Input variables */
211  /* TOUT: type of the event */
212 
213  fCost_1.ancout = 1e30;
214 
215  /* .............. HardCoded Values .................... */
216  retrain = 0;
217  rewrite = 1000;
218  for (i__ = 1; i__ <= max_nNodes_; ++i__) {
219  fDel_1.coef[i__ - 1] = (Float_t)0.;
220  }
221  for (i__ = 1; i__ <= max_nLayers_; ++i__) {
222  fDel_1.temp[i__ - 1] = (Float_t)0.;
223  }
224  fParam_1.layerm = *numlayer;
225  if (fParam_1.layerm > max_nLayers_) {
226  printf("Error: number of layers exceeds maximum: %i, %i ==> abort",
227  fParam_1.layerm, max_nLayers_ );
228  Arret("modification of mlpl3_param_lim.inc is needed ");
229  }
230  fParam_1.nevl = *ntrain;
231  fParam_1.nevt = *ntest;
232  fParam_1.nblearn = *numcycle;
233  fVarn_1.iclass = 2;
234  fParam_1.nunilec = 10;
235  fParam_1.epsmin = 1e-10;
236  fParam_1.epsmax = 1e-4;
237  fParam_1.eta = .5;
238  fCost_1.tolcou = 1e-6;
239  fCost_1.ieps = 2;
240  fParam_1.nunisor = 30;
241  fParam_1.nunishort = 48;
242  fParam_1.nunap = 40;
243 
244  ULog() << kINFO << "Total number of events for training: " << fParam_1.nevl << Endl;
245  ULog() << kINFO << "Total number of training cycles : " << fParam_1.nblearn << Endl;
246  if (fParam_1.nevl > max_Events_) {
247  printf("Error: number of learning events exceeds maximum: %i, %i ==> abort",
248  fParam_1.nevl, max_Events_ );
249  Arret("modification of mlpl3_param_lim.inc is needed ");
250  }
251  if (fParam_1.nevt > max_Events_) {
252  printf("Error: number of testing events exceeds maximum: %i, %i ==> abort",
253  fParam_1.nevt, max_Events_ );
254  Arret("modification of mlpl3_param_lim.inc is needed ");
255  }
256  i__1 = fParam_1.layerm;
257  for (j = 1; j <= i__1; ++j) {
258  num = nodes[j-1];
259  if (num < 2) {
260  num = 2;
261  }
262  if (j == fParam_1.layerm && num != 2) {
263  num = 2;
264  }
265  fNeur_1.neuron[j - 1] = num;
266  }
267  i__1 = fParam_1.layerm;
268  for (j = 1; j <= i__1; ++j) {
269  ULog() << kINFO << "Number of layers for neuron(" << j << "): " << fNeur_1.neuron[j - 1] << Endl;
270  }
271  if (fNeur_1.neuron[fParam_1.layerm - 1] != 2) {
272  printf("Error: wrong number of classes at ouput layer: %i != 2 ==> abort\n",
273  fNeur_1.neuron[fParam_1.layerm - 1]);
274  Arret("stop");
275  }
276  i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
277  for (j = 1; j <= i__1; ++j) {
278  fDel_1.coef[j - 1] = 1.;
279  }
280  i__1 = fParam_1.layerm;
281  for (j = 1; j <= i__1; ++j) {
282  fDel_1.temp[j - 1] = 1.;
283  }
284  fParam_1.ichoi = retrain;
285  fParam_1.ndivis = rewrite;
286  fDel_1.idde = 1;
287  if (! (fParam_1.ichoi == 0 || fParam_1.ichoi == 1)) {
288  printf( "Big troubles !!! \n" );
289  Arret("new training or continued one !");
290  }
291  if (fParam_1.ichoi == 0) {
292  ULog() << kINFO << "New training will be performed" << Endl;
293  }
294  else {
295  printf("%s: New training will be continued from a weight file\n", fg_MethodName);
296  }
297  ncoef = 0;
298  ntemp = 0;
299  for (i__ = 1; i__ <= max_nNodes_; ++i__) {
300  if (fDel_1.coef[i__ - 1] != (Float_t)0.) {
301  ++ncoef;
302  }
303  }
304  for (i__ = 1; i__ <= max_nLayers_; ++i__) {
305  if (fDel_1.temp[i__ - 1] != (Float_t)0.) {
306  ++ntemp;
307  }
308  }
309  if (ncoef != fNeur_1.neuron[fParam_1.layerm - 1]) {
310  Arret(" entree error code 1 : need to reported");
311  }
312  if (ntemp != fParam_1.layerm) {
313  Arret("entree error code 2 : need to reported");
314  }
315 }
316 
317 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
318 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
319 
321 {
322  // [smart comments to be added]
323  Int_t i__1, i__2, i__3;
324  Int_t i__, j;
325  Int_t layer;
326 
327  i__1 = fParam_1.layerm;
328  for (layer = 2; layer <= i__1; ++layer) {
329  i__2 = fNeur_1.neuron[layer - 2];
330  for (i__ = 1; i__ <= i__2; ++i__) {
331  i__3 = fNeur_1.neuron[layer - 1];
332  for (j = 1; j <= i__3; ++j) {
333  w_ref(layer, j, i__) = (Sen3a() * 2. - 1.) * .2;
334  ww_ref(layer, j) = (Sen3a() * 2. - 1.) * .2;
335  }
336  }
337  }
338 }
339 
340 #undef ww_ref
341 #undef w_ref
342 
343 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
344 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
345 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
346 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
347 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
348 
350 {
351  // [smart comments to be added]
352  Int_t i__1, i__2, i__3;
353 
354  Double_t f;
355  Int_t i__, j;
356  Int_t layer;
357 
358  i__1 = fNeur_1.neuron[0];
359  for (i__ = 1; i__ <= i__1; ++i__) {
360  y_ref(1, i__) = xeev_ref(*ievent, i__);
361  }
362  i__1 = fParam_1.layerm - 1;
363  for (layer = 1; layer <= i__1; ++layer) {
364  i__2 = fNeur_1.neuron[layer];
365  for (j = 1; j <= i__2; ++j) {
366  x_ref(layer + 1, j) = 0.;
367  i__3 = fNeur_1.neuron[layer - 1];
368  for (i__ = 1; i__ <= i__3; ++i__) {
369  x_ref(layer + 1, j) = ( x_ref(layer + 1, j) + y_ref(layer, i__)
370  * w_ref(layer + 1, j, i__) );
371  }
372  x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
373  i__3 = layer + 1;
374  Foncf(&i__3, &x_ref(layer + 1, j), &f);
375  y_ref(layer + 1, j) = f;
376  }
377  }
378 }
379 
380 #undef ww_ref
381 #undef y_ref
382 #undef x_ref
383 #undef w_ref
384 #undef xeev_ref
385 
386 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
387 
389 {
390  // [smart comments to be added]
391  Int_t i__1, i__2;
392 
393  Int_t i__, j, k, l;
394  Int_t nocla[max_nNodes_], ikend;
395  Double_t xpg[max_nVar_];
396 
397  *ktest = 0;
398  i__1 = fParam_1.lclass;
399  for (k = 1; k <= i__1; ++k) {
400  nocla[k - 1] = 0;
401  }
402  i__1 = fParam_1.nvar;
403  for (i__ = 1; i__ <= i__1; ++i__) {
404  fVarn_1.xmin[i__ - 1] = 1e30;
405  fVarn_1.xmax[i__ - 1] = -fVarn_1.xmin[i__ - 1];
406  }
407  i__1 = fParam_1.nevl;
408  for (i__ = 1; i__ <= i__1; ++i__) {
409  DataInterface(tout2, tin2, &fg_100, &fg_0, &fParam_1.nevl, &fParam_1.nvar,
410  xpg, &fVarn_1.nclass[i__ - 1], &ikend);
411  if (ikend == -1) {
412  break;
413  }
414 
415  CollectVar(&fParam_1.nvar, &fVarn_1.nclass[i__ - 1], xpg);
416 
417  i__2 = fParam_1.nvar;
418  for (j = 1; j <= i__2; ++j) {
419  xeev_ref(i__, j) = xpg[j - 1];
420  }
421  if (fVarn_1.iclass == 1) {
422  i__2 = fParam_1.lclass;
423  for (k = 1; k <= i__2; ++k) {
424  if (fVarn_1.nclass[i__ - 1] == k) {
425  ++nocla[k - 1];
426  }
427  }
428  }
429  i__2 = fParam_1.nvar;
430  for (k = 1; k <= i__2; ++k) {
431  if (xeev_ref(i__, k) < fVarn_1.xmin[k - 1]) {
432  fVarn_1.xmin[k - 1] = xeev_ref(i__, k);
433  }
434  if (xeev_ref(i__, k) > fVarn_1.xmax[k - 1]) {
435  fVarn_1.xmax[k - 1] = xeev_ref(i__, k);
436  }
437  }
438  }
439 
440  if (fVarn_1.iclass == 1) {
441  i__2 = fParam_1.lclass;
442  for (k = 1; k <= i__2; ++k) {
443  i__1 = fParam_1.lclass;
444  for (l = 1; l <= i__1; ++l) {
445  if (nocla[k - 1] != nocla[l - 1]) {
446  *ktest = 1;
447  }
448  }
449  }
450  }
451  i__1 = fParam_1.nevl;
452  for (i__ = 1; i__ <= i__1; ++i__) {
453  i__2 = fParam_1.nvar;
454  for (l = 1; l <= i__2; ++l) {
455  if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
456  Float_t)0.) {
457  xeev_ref(i__, l) = (Float_t)0.;
458  }
459  else {
460  xeev_ref(i__, l) = xeev_ref(i__, l) - (fVarn_1.xmax[l - 1] +
461  fVarn_1.xmin[l - 1]) / 2.;
462  xeev_ref(i__, l) = xeev_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
463  fVarn_1.xmin[l - 1]) / 2.);
464  }
465  }
466  }
467 }
468 
469 #undef xeev_ref
470 
471 #define delw_ref(a_1,a_2,a_3) fDel_1.delw[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
472 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
473 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
474 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
475 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
476 #define delww_ref(a_1,a_2) fDel_1.delww[(a_2)*max_nLayers_ + a_1 - 7]
477 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
478 #define del_ref(a_1,a_2) fDel_1.del[(a_2)*max_nLayers_ + a_1 - 7]
479 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
480 
482 {
483  // [smart comments to be added]
484  Int_t i__1, i__2, i__3;
485 
486  Double_t f;
487  Int_t i__, j, k, l;
488  Double_t df, uu;
489 
490  i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
491  for (i__ = 1; i__ <= i__1; ++i__) {
492  if (fVarn_1.nclass[*ievent - 1] == i__) {
493  fNeur_1.o[i__ - 1] = 1.;
494  }
495  else {
496  fNeur_1.o[i__ - 1] = -1.;
497  }
498  }
499  l = fParam_1.layerm;
500  i__1 = fNeur_1.neuron[l - 1];
501  for (i__ = 1; i__ <= i__1; ++i__) {
502  f = y_ref(l, i__);
503  df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
504  del_ref(l, i__) = df * (fNeur_1.o[i__ - 1] - y_ref(l, i__)) *
505  fDel_1.coef[i__ - 1];
506  delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
507  i__2 = fNeur_1.neuron[l - 2];
508  for (j = 1; j <= i__2; ++j) {
509  delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(l -
510  1, j);
511  /* L20: */
512  }
513  }
514  for (l = fParam_1.layerm - 1; l >= 2; --l) {
515  i__2 = fNeur_1.neuron[l - 1];
516  for (i__ = 1; i__ <= i__2; ++i__) {
517  uu = 0.;
518  i__1 = fNeur_1.neuron[l];
519  for (k = 1; k <= i__1; ++k) {
520  uu += w_ref(l + 1, k, i__) * del_ref(l + 1, k);
521  }
522  Foncf(&l, &x_ref(l, i__), &f);
523  df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
524  del_ref(l, i__) = df * uu;
525  delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
526  i__1 = fNeur_1.neuron[l - 2];
527  for (j = 1; j <= i__1; ++j) {
528  delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(
529  l - 1, j);
530  }
531  }
532  }
533  i__1 = fParam_1.layerm;
534  for (l = 2; l <= i__1; ++l) {
535  i__2 = fNeur_1.neuron[l - 1];
536  for (i__ = 1; i__ <= i__2; ++i__) {
537  deltaww_ref(l, i__) = delww_ref(l, i__) + fParam_1.eta *
538  deltaww_ref(l, i__);
539  ww_ref(l, i__) = ww_ref(l, i__) + deltaww_ref(l, i__);
540  i__3 = fNeur_1.neuron[l - 2];
541  for (j = 1; j <= i__3; ++j) {
542  delta_ref(l, i__, j) = delw_ref(l, i__, j) + fParam_1.eta *
543  delta_ref(l, i__, j);
544  w_ref(l, i__, j) = w_ref(l, i__, j) + delta_ref(l, i__, j);
545  }
546  }
547  }
548 }
549 
550 #undef deltaww_ref
551 #undef del_ref
552 #undef ww_ref
553 #undef delww_ref
554 #undef delta_ref
555 #undef y_ref
556 #undef x_ref
557 #undef w_ref
558 #undef delw_ref
559 
560 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
561 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
562 
564 {
565  // write weights to file
566 
567  if (*iii == *maxcycle) {
568  // now in MethodCFMlpANN.cxx
569  }
570 }
571 
572 #undef ww_ref
573 #undef w_ref
574 
575 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
576 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
577 
578 void TMVA::MethodCFMlpANN_Utils::Innit( char *det, Double_t *tout2, Double_t *tin2, Int_t )
579 {
580  // Initialization
581  Int_t i__1, i__2, i__3;
582 
583  Int_t i__, j;
584  Int_t nevod, layer, ktest, i1, nrest;
585  Int_t ievent(0);
586  Int_t kkk;
587  Double_t xxx = 0.0, yyy = 0.0;
588 
589  Leclearn(&ktest, tout2, tin2);
590  Lecev2(&ktest, tout2, tin2);
591  if (ktest == 1) {
592  printf( " .... strange to be here (1) ... \n");
593  std::exit(1);
594  }
595  i__1 = fParam_1.layerm - 1;
596  for (layer = 1; layer <= i__1; ++layer) {
597  i__2 = fNeur_1.neuron[layer];
598  for (j = 1; j <= i__2; ++j) {
599  deltaww_ref(layer + 1, j) = 0.;
600  i__3 = fNeur_1.neuron[layer - 1];
601  for (i__ = 1; i__ <= i__3; ++i__) {
602  delta_ref(layer + 1, j, i__) = 0.;
603  }
604  }
605  }
606  if (fParam_1.ichoi == 1) {
607  Inl();
608  }
609  else {
610  Wini();
611  }
612  kkk = 0;
613  i__3 = fParam_1.nblearn;
614  Timer timer( i__3, "CFMlpANN" );
615  Int_t num = i__3/100;
616 
617  for (i1 = 1; i1 <= i__3; ++i1) {
618 
619  if ( ( num>0 && (i1-1)%num == 0) || (i1 == i__3) ) timer.DrawProgressBar( i1-1 );
620 
621  i__2 = fParam_1.nevl;
622  for (i__ = 1; i__ <= i__2; ++i__) {
623  ++kkk;
624  if (fCost_1.ieps == 2) {
625  fParam_1.eeps = Fdecroi(&kkk);
626  }
627  if (fCost_1.ieps == 1) {
628  fParam_1.eeps = fParam_1.epsmin;
629  }
630  Bool_t doCont = kTRUE;
631  if (fVarn_1.iclass == 2) {
632  ievent = (Int_t) ((Double_t) fParam_1.nevl * Sen3a());
633  if (ievent == 0) {
634  doCont = kFALSE;
635  }
636  }
637  if (doCont) {
638  if (fVarn_1.iclass == 1) {
639  nevod = fParam_1.nevl / fParam_1.lclass;
640  nrest = i__ % fParam_1.lclass;
641  fParam_1.ndiv = i__ / fParam_1.lclass;
642  if (nrest != 0) {
643  ievent = fParam_1.ndiv + 1 + (fParam_1.lclass - nrest) *
644  nevod;
645  }
646  else {
647  ievent = fParam_1.ndiv;
648  }
649  }
650  En_avant(&ievent);
651  En_arriere(&ievent);
652  }
653  }
654  yyy = 0.;
655  if (i1 % fParam_1.ndivis == 0 || i1 == 1 || i1 == fParam_1.nblearn) {
656  Cout(&i1, &xxx);
657  Cout2(&i1, &yyy);
658  GraphNN(&i1, &xxx, &yyy, det, (Int_t)20);
659  Out(&i1, &fParam_1.nblearn);
660  }
661  if (xxx < fCost_1.tolcou) {
662  GraphNN(&fParam_1.nblearn, &xxx, &yyy, det, (Int_t)20);
663  Out(&fParam_1.nblearn, &fParam_1.nblearn);
664  break;
665  }
666  }
667 }
668 
669 #undef deltaww_ref
670 #undef delta_ref
671 
673 {
674  // [smart comments to be added]
675  Int_t i__1;
676 
677  Int_t i__;
678  Int_t ktest;
679 
680  ktest = 0;
681  if (fParam_1.layerm > max_nLayers_) {
682  ktest = 1;
683  printf("Error: number of layers exceeds maximum: %i, %i ==> abort",
684  fParam_1.layerm, max_nLayers_ );
685  Arret("modification of mlpl3_param_lim.inc is needed ");
686  }
687  if (fParam_1.nevl > max_Events_) {
688  ktest = 1;
689  printf("Error: number of training events exceeds maximum: %i, %i ==> abort",
690  fParam_1.nevl, max_Events_ );
691  Arret("modification of mlpl3_param_lim.inc is needed ");
692  }
693  if (fParam_1.nevt > max_Events_) {
694  printf("Error: number of testing events exceeds maximum: %i, %i ==> abort",
695  fParam_1.nevt, max_Events_ );
696  Arret("modification of mlpl3_param_lim.inc is needed ");
697  }
698  if (fParam_1.lclass < fNeur_1.neuron[fParam_1.layerm - 1]) {
699  ktest = 1;
700  printf("Error: wrong number of classes at ouput layer: %i != %i ==> abort\n",
701  fNeur_1.neuron[fParam_1.layerm - 1], fParam_1.lclass);
702  Arret("problem needs to reported ");
703  }
704  if (fParam_1.nvar > max_nVar_) {
705  ktest = 1;
706  printf("Error: number of variables exceeds maximum: %i, %i ==> abort",
707  fParam_1.nvar, fg_max_nVar_ );
708  Arret("modification of mlpl3_param_lim.inc is needed");
709  }
710  i__1 = fParam_1.layerm;
711  for (i__ = 1; i__ <= i__1; ++i__) {
712  if (fNeur_1.neuron[i__ - 1] > max_nNodes_) {
713  ktest = 1;
714  printf("Error: number of neurons at layer exceeds maximum: %i, %i ==> abort",
715  i__, fg_max_nNodes_ );
716  }
717  }
718  if (ktest == 1) {
719  printf( " .... strange to be here (2) ... \n");
720  std::exit(1);
721  }
722 }
723 
724 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
725 
727 {
728  // [smart comments to be added]
729  Int_t i__1, i__2;
730  Double_t d__1;
731 
732  Double_t c__;
733  Int_t i__, j;
734 
735  c__ = 0.;
736  i__1 = fParam_1.nevl;
737  for (i__ = 1; i__ <= i__1; ++i__) {
738  En_avant(&i__);
739  i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
740  for (j = 1; j <= i__2; ++j) {
741  if (fVarn_1.nclass[i__ - 1] == j) {
742  fNeur_1.o[j - 1] = 1.;
743  }
744  else {
745  fNeur_1.o[j - 1] = -1.;
746  }
747  // Computing 2nd power
748  d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
749  c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
750  }
751  }
752  c__ /= (Double_t) (fParam_1.nevl * fParam_1.lclass) * 2.;
753  *xxx = c__;
754  fCost_1.ancout = c__;
755 }
756 
757 #undef y_ref
758 
759 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
760 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
761 
763 {
764  // [smart comments to be added]
765  Int_t i__1, i__2;
766 
767  Int_t jmax, k, layer, kk, nq, nr;
768 
769  i__1 = fParam_1.nvar;
770  i__1 = fParam_1.layerm;
771  i__1 = fParam_1.layerm - 1;
772  for (layer = 1; layer <= i__1; ++layer) {
773  nq = fNeur_1.neuron[layer] / 10;
774  nr = fNeur_1.neuron[layer] - nq * 10;
775  if (nr == 0) {
776  kk = nq;
777  }
778  else {
779  kk = nq + 1;
780  }
781  i__2 = kk;
782  for (k = 1; k <= i__2; ++k) {
783  // jmin = k * 10 - 9;
784  jmax = k * 10;
785  if (fNeur_1.neuron[layer] < jmax) {
786  jmax = fNeur_1.neuron[layer];
787  }
788  // i__3 = fNeur_1.neuron[layer - 1];
789  }
790  }
791 }
792 
793 #undef ww_ref
794 #undef w_ref
795 
797 {
798  // [smart comments to be added]
799  Double_t ret_val;
800 
801  Double_t aaa, bbb;
802 
803  aaa = (fParam_1.epsmin - fParam_1.epsmax) / (Double_t) (fParam_1.nblearn *
804  fParam_1.nevl - 1);
805  bbb = fParam_1.epsmax - aaa;
806  ret_val = aaa * (Double_t) (*i__) + bbb;
807  return ret_val;
808 }
809 
810 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
811 
813  Double_t * /*yyy*/, char * /*det*/, Int_t /*det_len*/ )
814 {
815  // [smart comments to be added]
816  Int_t i__1, i__2;
817 
818  Double_t xmok[max_nNodes_];
819  // Float_t xpaw;
820  Double_t xmko[max_nNodes_];
821  Int_t i__, j;
822  Int_t ix;
823  // Int_t jjj;
824  // Float_t vbn[10];
825  Int_t nko[max_nNodes_], nok[max_nNodes_];
826 
827  // for (i__ = 1; i__ <= 10; ++i__) {
828  // vbn[i__ - 1] = (Float_t)0.;
829  // }
830  if (*ilearn == 1) {
831  // AH: removed output
832  }
833  i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
834  for (i__ = 1; i__ <= i__1; ++i__) {
835  nok[i__ - 1] = 0;
836  nko[i__ - 1] = 0;
837  xmok[i__ - 1] = 0.;
838  xmko[i__ - 1] = 0.;
839  }
840  i__1 = fParam_1.nevl;
841  for (i__ = 1; i__ <= i__1; ++i__) {
842  En_avant(&i__);
843  i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
844  for (j = 1; j <= i__2; ++j) {
845  // xpaw = (Float_t) y_ref(fParam_1.layerm, j);
846  if (fVarn_1.nclass[i__ - 1] == j) {
847  ++nok[j - 1];
848  xmok[j - 1] += y_ref(fParam_1.layerm, j);
849  }
850  else {
851  ++nko[j - 1];
852  xmko[j - 1] += y_ref(fParam_1.layerm, j);
853  // jjj = j + fNeur_1.neuron[fParam_1.layerm - 1];
854  }
855  // if (j <= 9) {
856  // vbn[j - 1] = xpaw;
857  // }
858  }
859  // vbn[9] = (Float_t) fVarn_1.nclass[i__ - 1];
860  }
861  i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
862  for (j = 1; j <= i__1; ++j) {
863  xmok[j - 1] /= (Double_t) nok[j - 1];
864  xmko[j - 1] /= (Double_t) nko[j - 1];
865  fNeur_1.cut[j - 1] = (xmok[j - 1] + xmko[j - 1]) / 2.;
866  }
867  ix = fNeur_1.neuron[fParam_1.layerm - 1];
868  i__1 = ix;
869 }
870 
871 #undef y_ref
872 
874 {
875  // [smart comments to be added]
876 
877  // Initialized data
878  Int_t m12 = 4096;
879  Double_t f1 = 2.44140625e-4;
880  Double_t f2 = 5.96046448e-8;
881  Double_t f3 = 1.45519152e-11;
882  Int_t j1 = 3823;
883  Int_t j2 = 4006;
884  Int_t j3 = 2903;
885  static Int_t fg_i1 = 3823;
886  static Int_t fg_i2 = 4006;
887  static Int_t fg_i3 = 2903;
888 
889  Double_t ret_val;
890  Int_t k3, l3, k2, l2, k1, l1;
891 
892  // reference: /k.d.senne/j. stochastics/ vol 1,no 3 (1974),pp.215-38
893  k3 = fg_i3 * j3;
894  l3 = k3 / m12;
895  k2 = fg_i2 * j3 + fg_i3 * j2 + l3;
896  l2 = k2 / m12;
897  k1 = fg_i1 * j3 + fg_i2 * j2 + fg_i3 * j1 + l2;
898  l1 = k1 / m12;
899  fg_i1 = k1 - l1 * m12;
900  fg_i2 = k2 - l2 * m12;
901  fg_i3 = k3 - l3 * m12;
902  ret_val = f1 * (Double_t) fg_i1 + f2 * (Float_t) fg_i2 + f3 * (Double_t) fg_i3;
903 
904  return ret_val;
905 }
906 
908 {
909  // [needs to be checked]
910  Double_t yy;
911 
912  if (*u / fDel_1.temp[*i__ - 1] > 170.) {
913  *f = .99999999989999999;
914  }
915  else if (*u / fDel_1.temp[*i__ - 1] < -170.) {
916  *f = -.99999999989999999;
917  }
918  else {
919  yy = TMath::Exp(-(*u) / fDel_1.temp[*i__ - 1]);
920  *f = (1. - yy) / (yy + 1.);
921  }
922 }
923 
924 #undef w_ref
925 
926 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
927 
929 {
930  // [smart comments to be added]
931  Int_t i__1, i__2;
932  Double_t d__1;
933 
934  Double_t c__;
935  Int_t i__, j;
936 
937  c__ = 0.;
938  i__1 = fParam_1.nevt;
939  for (i__ = 1; i__ <= i__1; ++i__) {
940  En_avant2(&i__);
941  i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
942  for (j = 1; j <= i__2; ++j) {
943  if (fVarn_1.mclass[i__ - 1] == j) {
944  fNeur_1.o[j - 1] = 1.;
945  }
946  else {
947  fNeur_1.o[j - 1] = -1.;
948  }
949  /* Computing 2nd power */
950  d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
951  c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
952  }
953  }
954  c__ /= (Double_t) (fParam_1.nevt * fParam_1.lclass) * 2.;
955  *yyy = c__;
956 }
957 
958 #undef y_ref
959 
960 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
961 
963 {
964  // [smart comments to be added]
965  Int_t i__1, i__2;
966 
967  Int_t i__, j, l;
968  // Int_t mocla[max_nNodes_];
969  Int_t ikend;
970  Double_t xpg[max_nVar_];
971 
972  /* NTRAIN: Nb of events used during the learning */
973  /* NTEST: Nb of events used for the test */
974  /* TIN: Input variables */
975  /* TOUT: type of the event */
976 
977  *ktest = 0;
978  i__1 = fParam_1.lclass;
979  // for (k = 1; k <= i__1; ++k) {
980  // mocla[k - 1] = 0;
981  // }
982  i__1 = fParam_1.nevt;
983  for (i__ = 1; i__ <= i__1; ++i__) {
984  DataInterface(tout2, tin2, &fg_999, &fg_0, &fParam_1.nevt, &fParam_1.nvar,
985  xpg, &fVarn_1.mclass[i__ - 1], &ikend);
986 
987  if (ikend == -1) {
988  break;
989  }
990 
991  i__2 = fParam_1.nvar;
992  for (j = 1; j <= i__2; ++j) {
993  xx_ref(i__, j) = xpg[j - 1];
994  }
995  }
996 
997  i__1 = fParam_1.nevt;
998  for (i__ = 1; i__ <= i__1; ++i__) {
999  i__2 = fParam_1.nvar;
1000  for (l = 1; l <= i__2; ++l) {
1001  if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
1002  Float_t)0.) {
1003  xx_ref(i__, l) = (Float_t)0.;
1004  }
1005  else {
1006  xx_ref(i__, l) = xx_ref(i__, l) - (fVarn_1.xmax[l - 1] +
1007  fVarn_1.xmin[l - 1]) / 2.;
1008  xx_ref(i__, l) = xx_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
1009  fVarn_1.xmin[l - 1]) / 2.);
1010  }
1011  }
1012  }
1013 }
1014 
1015 #undef xx_ref
1016 
1017 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
1018 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
1019 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
1020 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
1021 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
1022 
1024 {
1025  // [smart comments to be added]
1026  Int_t i__1, i__2, i__3;
1027 
1028  Double_t f;
1029  Int_t i__, j;
1030  Int_t layer;
1031 
1032  i__1 = fNeur_1.neuron[0];
1033  for (i__ = 1; i__ <= i__1; ++i__) {
1034  y_ref(1, i__) = xx_ref(*ievent, i__);
1035  }
1036  i__1 = fParam_1.layerm - 1;
1037  for (layer = 1; layer <= i__1; ++layer) {
1038  i__2 = fNeur_1.neuron[layer];
1039  for (j = 1; j <= i__2; ++j) {
1040  x_ref(layer + 1, j) = 0.;
1041  i__3 = fNeur_1.neuron[layer - 1];
1042  for (i__ = 1; i__ <= i__3; ++i__) {
1043  x_ref(layer + 1, j) = x_ref(layer + 1, j) + y_ref(layer, i__)
1044  * w_ref(layer + 1, j, i__);
1045  }
1046  x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
1047  i__3 = layer + 1;
1048  Foncf(&i__3, &x_ref(layer + 1, j), &f);
1049  y_ref(layer + 1, j) = f;
1050  /* L2: */
1051  }
1052  }
1053 }
1054 
1055 #undef xx_ref
1056 #undef ww_ref
1057 #undef y_ref
1058 #undef x_ref
1059 #undef w_ref
1060 
1061 void TMVA::MethodCFMlpANN_Utils::Arret( const char* mot )
1062 {
1063  // fatal error occurred: stop execution
1064  printf("%s: %s",fg_MethodName, mot);
1065  std::exit(1);
1066 }
1067 
1068 void TMVA::MethodCFMlpANN_Utils::CollectVar( Int_t * /*nvar*/, Int_t * /*class__*/, Double_t * /*xpg*/ )
1069 {
1070  // // [smart comments to be added]
1071  // Int_t i__1;
1072 
1073  // Int_t i__;
1074  // Float_t x[201];
1075 
1076  // // Parameter adjustments
1077  // --xpg;
1078 
1079  // for (i__ = 1; i__ <= 201; ++i__) {
1080  // x[i__ - 1] = 0.0;
1081  // }
1082  // x[0] = (Float_t) (*class__);
1083  // i__1 = *nvar;
1084  // for (i__ = 1; i__ <= i__1; ++i__) {
1085  // x[i__] = (Float_t) xpg[i__];
1086  // }
1087 }
void Foncf(Int_t *i__, Double_t *u, Double_t *f)
#define w_ref(a_1, a_2, a_3)
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:162
#define ww_ref(a_1, a_2)
#define y_ref(a_1, a_2)
float Float_t
Definition: RtypesCore.h:53
tuple f2
Definition: surfaces.py:24
void f3()
Definition: na49.C:50
#define del_ref(a_1, a_2)
const int max_nVar_
void Entree_new(Int_t *, char *, Int_t *ntrain, Int_t *ntest, Int_t *numlayer, Int_t *nodes, Int_t *numcycle, Int_t)
const int max_nLayers_
int Int_t
Definition: RtypesCore.h:41
bool Bool_t
Definition: RtypesCore.h:59
const Bool_t kFALSE
Definition: Rtypes.h:92
const int max_Events_
TFile * f
TStopwatch timer
Definition: pirndm.C:37
void Cout(Int_t *, Double_t *xxx)
void Out(Int_t *iii, Int_t *maxcycle)
TLine l1(2.5, 4.5, 15.5, 4.5)
#define xeev_ref(a_1, a_2)
const int max_nNodes_
void Cout2(Int_t *, Double_t *yyy)
void GraphNN(Int_t *ilearn, Double_t *, Double_t *, char *, Int_t)
void Train_nn(Double_t *tin2, Double_t *tout2, Int_t *ntrain, Int_t *ntest, Int_t *nvar2, Int_t *nlayer, Int_t *nodes, Int_t *ncycle)
#define x_ref(a_1, a_2)
#define delta_ref(a_1, a_2, a_3)
void Leclearn(Int_t *ktest, Double_t *tout2, Double_t *tin2)
TLine * l
Definition: textangle.C:4
Double_t Exp(Double_t x)
Definition: TMath.h:495
#define ClassImp(name)
Definition: Rtypes.h:279
double Double_t
Definition: RtypesCore.h:55
void Lecev2(Int_t *ktest, Double_t *tout2, Double_t *tin2)
ClassImp(TMCParticle) void TMCParticle printf(": p=(%7.3f,%7.3f,%9.3f) ;", fPx, fPy, fPz)
void Innit(char *det, Double_t *tout2, Double_t *tin2, Int_t)
TF1 * f1
Definition: legend1.C:11
void DrawProgressBar(Int_t, const TString &comment="")
draws progress bar in color or B&W caution:
Definition: Timer.cxx:183
#define delw_ref(a_1, a_2, a_3)
const Bool_t kTRUE
Definition: Rtypes.h:91
void CollectVar(Int_t *nvar, Int_t *class__, Double_t *xpg)
#define xx_ref(a_1, a_2)
#define deltaww_ref(a_1, a_2)
#define delww_ref(a_1, a_2)