Logo ROOT   6.08/07
Reference Guide
MethodCFMlpANN_Utils.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6  * Package: TMVA *
7  * Class : TMVA::MethodCFMlpANN_utils *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Reference for the original FORTRAN version "mlpl3.F": *
11  * Authors : J. Proriol and contributions from ALEPH-Clermont-Ferrand *
12  * Team members *
13  * Copyright: Laboratoire Physique Corpusculaire *
14  * Universite de Blaise Pascal, IN2P3/CNRS *
15  * *
16  * Modifications by present authors: *
17  * use dynamical data tables (not for all of them, but for the big ones) *
18  * *
19  * Description: *
20  * Utility routine translated from original mlpl3.F FORTRAN routine *
21  * *
22  * MultiLayerPerceptron : Training code *
23  * *
24  * NTRAIN: Nb of events used during the learning *
25  * NTEST: Nb of events used for the test *
26  * TIN: Input variables *
27  * TOUT: type of the event *
28  * *
29  * ---------------------------------------------------------------------------- *
30  * *
31  * Authors (alphabetical): *
32  * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
33  * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France *
34  * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany *
35  * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada *
36  * *
37  * Copyright (c) 2005: *
38  * CERN, Switzerland *
39  * U. of Victoria, Canada *
40  * MPI-K Heidelberg, Germany *
41  * LAPP, Annecy, France *
42  * *
43  * Redistribution and use in source and binary forms, with or without *
44  * modification, are permitted according to the terms listed in LICENSE *
45  * (http://tmva.sourceforge.net/LICENSE) *
46  * *
47  **********************************************************************************/
48 
49 //_______________________________________________________________________
50 //
51 // Implementation of Clermond-Ferrand artificial neural network
52 //
53 // Reference for the original FORTRAN version "mlpl3.F":
54 // Authors : J. Proriol and contributions from ALEPH-Clermont-Ferrand
55 // Team members
56 // Copyright: Laboratoire Physique Corpusculaire
57 // Universite de Blaise Pascal, IN2P3/CNRS
58 //_______________________________________________________________________
59 
60 #include <string>
61 #include <iostream>
62 #include <cstdlib>
63 
64 #include "TMath.h"
65 #include "TString.h"
66 
68 #include "TMVA/MsgLogger.h"
69 #include "TMVA/Timer.h"
70 #include "TMVA/Types.h"
71 
72 using std::cout;
73 using std::endl;
74 
76 
79 const char* const TMVA::MethodCFMlpANN_Utils::fg_MethodName = "--- CFMlpANN ";
80 
82 fg_0(0),
83 fg_999(999)
84 {
85  // default constructor
86  Int_t i(0);
87  for(i=0; i<max_nVar_;++i) fVarn_1.xmin[i] = 0;
88  fCost_1.ancout = 0;
89  fCost_1.ieps = 0;
90  fCost_1.tolcou = 0;
91 
92  for(i=0; i<max_nNodes_;++i) fDel_1.coef[i] = 0;
93  for(i=0; i<max_nLayers_*max_nNodes_;++i) fDel_1.del[i] = 0;
94  for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delta[i] = 0;
95  for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delw[i] = 0;
96  for(i=0; i<max_nLayers_*max_nNodes_;++i) fDel_1.delww[i] = 0;
97  fDel_1.demin = 0;
98  fDel_1.demax = 0;
99  fDel_1.idde = 0;
100  for(i=0; i<max_nLayers_;++i) fDel_1.temp[i] = 0;
101 
102  for(i=0; i<max_nNodes_;++i) fNeur_1.cut[i] = 0;
103  for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.deltaww[i] = 0;
104  for(i=0; i<max_nLayers_;++i) fNeur_1.neuron[i] = 0;
105  for(i=0; i<max_nNodes_;++i) fNeur_1.o[i] = 0;
106  for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fNeur_1.w[i] = 0;
107  for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.ww[i] = 0;
108  for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.x[i] = 0;
109  for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.y[i] = 0;
110 
111  fParam_1.eeps = 0;
112  fParam_1.epsmin = 0;
113  fParam_1.epsmax = 0;
114  fParam_1.eta = 0;
115  fParam_1.ichoi = 0;
116  fParam_1.itest = 0;
117  fParam_1.layerm = 0;
118  fParam_1.lclass = 0;
119  fParam_1.nblearn = 0;
120  fParam_1.ndiv = 0;
121  fParam_1.ndivis = 0;
122  fParam_1.nevl = 0;
123  fParam_1.nevt = 0;
124  fParam_1.nunap = 0;
125  fParam_1.nunilec = 0;
126  fParam_1.nunishort = 0;
127  fParam_1.nunisor = 0;
128  fParam_1.nvar = 0;
129 
130  fVarn_1.iclass = 0;
131  for(i=0; i<max_Events_;++i) fVarn_1.mclass[i] = 0;
132  for(i=0; i<max_Events_;++i) fVarn_1.nclass[i] = 0;
133  for(i=0; i<max_nVar_;++i) fVarn_1.xmax[i] = 0;
134 
135  fLogger = 0;
136 }
137 
139 {
140  // destructor
141 }
142 
144  Int_t *ntest, Int_t *nvar2, Int_t *nlayer,
145  Int_t *nodes, Int_t *ncycle )
146 {
147  // training interface - called from MethodCFMlpANN class object
148 
149  // sanity checks
150  if (*ntrain + *ntest > max_Events_) {
151  printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of training + testing" \
152  " events exceeds hardcoded maximum - reset to maximum allowed number");
153  *ntrain = *ntrain*(max_Events_/(*ntrain + *ntest));
154  *ntest = *ntest *(max_Events_/(*ntrain + *ntest));
155  }
156  if (*nvar2 > max_nVar_) {
157  printf( "*** CFMlpANN_f2c: ERROR in Train_nn: number of variables" \
158  " exceeds hardcoded maximum ==> abort");
159  std::exit(1);
160  }
161  if (*nlayer > max_nLayers_) {
162  printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of layers" \
163  " exceeds hardcoded maximum - reset to maximum allowed number");
164  *nlayer = max_nLayers_;
165  }
166  if (*nodes > max_nNodes_) {
167  printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of nodes" \
168  " exceeds hardcoded maximum - reset to maximum allowed number");
169  *nodes = max_nNodes_;
170  }
171 
172  // create dynamic data tables (AH)
173  fVarn2_1.Create( *ntrain + *ntest, *nvar2 );
174  fVarn3_1.Create( *ntrain + *ntest, *nvar2 );
175 
176  // Int_t imax;
177  char det[20];
178 
179  Entree_new(nvar2, det, ntrain, ntest, nlayer, nodes, ncycle, (Int_t)20);
180  if (fNeur_1.neuron[fParam_1.layerm - 1] == 1) {
181  // imax = 2;
182  fParam_1.lclass = 2;
183  }
184  else {
185  // imax = fNeur_1.neuron[fParam_1.layerm - 1] << 1;
186  fParam_1.lclass = fNeur_1.neuron[fParam_1.layerm - 1];
187  }
188  fParam_1.nvar = fNeur_1.neuron[0];
189  TestNN();
190  Innit(det, tout2, tin2, (Int_t)20);
191 
192  // delete data tables
193  fVarn2_1.Delete();
194  fVarn3_1.Delete();
195 }
196 
198  Int_t *ntest, Int_t *numlayer, Int_t *nodes,
199  Int_t *numcycle, Int_t /*det_len*/)
200 {
201  // first initialisation of ANN
202  Int_t i__1;
203 
204  Int_t rewrite, i__, j, ncoef;
205  Int_t ntemp, num, retrain;
206 
207  /* NTRAIN: Nb of events used during the learning */
208  /* NTEST: Nb of events used for the test */
209  /* TIN: Input variables */
210  /* TOUT: type of the event */
211 
212  fCost_1.ancout = 1e30;
213 
214  /* .............. HardCoded Values .................... */
215  retrain = 0;
216  rewrite = 1000;
217  for (i__ = 1; i__ <= max_nNodes_; ++i__) {
218  fDel_1.coef[i__ - 1] = (Float_t)0.;
219  }
220  for (i__ = 1; i__ <= max_nLayers_; ++i__) {
221  fDel_1.temp[i__ - 1] = (Float_t)0.;
222  }
223  fParam_1.layerm = *numlayer;
224  if (fParam_1.layerm > max_nLayers_) {
225  printf("Error: number of layers exceeds maximum: %i, %i ==> abort",
226  fParam_1.layerm, max_nLayers_ );
227  Arret("modification of mlpl3_param_lim.inc is needed ");
228  }
229  fParam_1.nevl = *ntrain;
230  fParam_1.nevt = *ntest;
231  fParam_1.nblearn = *numcycle;
232  fVarn_1.iclass = 2;
233  fParam_1.nunilec = 10;
234  fParam_1.epsmin = 1e-10;
235  fParam_1.epsmax = 1e-4;
236  fParam_1.eta = .5;
237  fCost_1.tolcou = 1e-6;
238  fCost_1.ieps = 2;
239  fParam_1.nunisor = 30;
240  fParam_1.nunishort = 48;
241  fParam_1.nunap = 40;
242 
243  ULog() << kINFO << "Total number of events for training: " << fParam_1.nevl << Endl;
244  ULog() << kINFO << "Total number of training cycles : " << fParam_1.nblearn << Endl;
245  if (fParam_1.nevl > max_Events_) {
246  printf("Error: number of learning events exceeds maximum: %i, %i ==> abort",
247  fParam_1.nevl, max_Events_ );
248  Arret("modification of mlpl3_param_lim.inc is needed ");
249  }
250  if (fParam_1.nevt > max_Events_) {
251  printf("Error: number of testing events exceeds maximum: %i, %i ==> abort",
252  fParam_1.nevt, max_Events_ );
253  Arret("modification of mlpl3_param_lim.inc is needed ");
254  }
255  i__1 = fParam_1.layerm;
256  for (j = 1; j <= i__1; ++j) {
257  num = nodes[j-1];
258  if (num < 2) {
259  num = 2;
260  }
261  if (j == fParam_1.layerm && num != 2) {
262  num = 2;
263  }
264  fNeur_1.neuron[j - 1] = num;
265  }
266  i__1 = fParam_1.layerm;
267  for (j = 1; j <= i__1; ++j) {
268  ULog() << kINFO << "Number of layers for neuron(" << j << "): " << fNeur_1.neuron[j - 1] << Endl;
269  }
270  if (fNeur_1.neuron[fParam_1.layerm - 1] != 2) {
271  printf("Error: wrong number of classes at ouput layer: %i != 2 ==> abort\n",
272  fNeur_1.neuron[fParam_1.layerm - 1]);
273  Arret("stop");
274  }
275  i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
276  for (j = 1; j <= i__1; ++j) {
277  fDel_1.coef[j - 1] = 1.;
278  }
279  i__1 = fParam_1.layerm;
280  for (j = 1; j <= i__1; ++j) {
281  fDel_1.temp[j - 1] = 1.;
282  }
283  fParam_1.ichoi = retrain;
284  fParam_1.ndivis = rewrite;
285  fDel_1.idde = 1;
286  if (! (fParam_1.ichoi == 0 || fParam_1.ichoi == 1)) {
287  printf( "Big troubles !!! \n" );
288  Arret("new training or continued one !");
289  }
290  if (fParam_1.ichoi == 0) {
291  ULog() << kINFO << "New training will be performed" << Endl;
292  }
293  else {
294  printf("%s: New training will be continued from a weight file\n", fg_MethodName);
295  }
296  ncoef = 0;
297  ntemp = 0;
298  for (i__ = 1; i__ <= max_nNodes_; ++i__) {
299  if (fDel_1.coef[i__ - 1] != (Float_t)0.) {
300  ++ncoef;
301  }
302  }
303  for (i__ = 1; i__ <= max_nLayers_; ++i__) {
304  if (fDel_1.temp[i__ - 1] != (Float_t)0.) {
305  ++ntemp;
306  }
307  }
308  if (ncoef != fNeur_1.neuron[fParam_1.layerm - 1]) {
309  Arret(" entree error code 1 : need to reported");
310  }
311  if (ntemp != fParam_1.layerm) {
312  Arret("entree error code 2 : need to reported");
313  }
314 }
315 
316 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
317 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
318 
320 {
321  // [smart comments to be added]
322  Int_t i__1, i__2, i__3;
323  Int_t i__, j;
324  Int_t layer;
325 
326  i__1 = fParam_1.layerm;
327  for (layer = 2; layer <= i__1; ++layer) {
328  i__2 = fNeur_1.neuron[layer - 2];
329  for (i__ = 1; i__ <= i__2; ++i__) {
330  i__3 = fNeur_1.neuron[layer - 1];
331  for (j = 1; j <= i__3; ++j) {
332  w_ref(layer, j, i__) = (Sen3a() * 2. - 1.) * .2;
333  ww_ref(layer, j) = (Sen3a() * 2. - 1.) * .2;
334  }
335  }
336  }
337 }
338 
339 #undef ww_ref
340 #undef w_ref
341 
342 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
343 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
344 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
345 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
346 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
347 
349 {
350  // [smart comments to be added]
351  Int_t i__1, i__2, i__3;
352 
353  Double_t f;
354  Int_t i__, j;
355  Int_t layer;
356 
357  i__1 = fNeur_1.neuron[0];
358  for (i__ = 1; i__ <= i__1; ++i__) {
359  y_ref(1, i__) = xeev_ref(*ievent, i__);
360  }
361  i__1 = fParam_1.layerm - 1;
362  for (layer = 1; layer <= i__1; ++layer) {
363  i__2 = fNeur_1.neuron[layer];
364  for (j = 1; j <= i__2; ++j) {
365  x_ref(layer + 1, j) = 0.;
366  i__3 = fNeur_1.neuron[layer - 1];
367  for (i__ = 1; i__ <= i__3; ++i__) {
368  x_ref(layer + 1, j) = ( x_ref(layer + 1, j) + y_ref(layer, i__)
369  * w_ref(layer + 1, j, i__) );
370  }
371  x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
372  i__3 = layer + 1;
373  Foncf(&i__3, &x_ref(layer + 1, j), &f);
374  y_ref(layer + 1, j) = f;
375  }
376  }
377 }
378 
379 #undef ww_ref
380 #undef y_ref
381 #undef x_ref
382 #undef w_ref
383 #undef xeev_ref
384 
385 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
386 
388 {
389  // [smart comments to be added]
390  Int_t i__1, i__2;
391 
392  Int_t i__, j, k, l;
393  Int_t nocla[max_nNodes_], ikend;
394  Double_t xpg[max_nVar_];
395 
396  *ktest = 0;
397  i__1 = fParam_1.lclass;
398  for (k = 1; k <= i__1; ++k) {
399  nocla[k - 1] = 0;
400  }
401  i__1 = fParam_1.nvar;
402  for (i__ = 1; i__ <= i__1; ++i__) {
403  fVarn_1.xmin[i__ - 1] = 1e30;
404  fVarn_1.xmax[i__ - 1] = -fVarn_1.xmin[i__ - 1];
405  }
406  i__1 = fParam_1.nevl;
407  for (i__ = 1; i__ <= i__1; ++i__) {
408  DataInterface(tout2, tin2, &fg_100, &fg_0, &fParam_1.nevl, &fParam_1.nvar,
409  xpg, &fVarn_1.nclass[i__ - 1], &ikend);
410  if (ikend == -1) {
411  break;
412  }
413 
414  CollectVar(&fParam_1.nvar, &fVarn_1.nclass[i__ - 1], xpg);
415 
416  i__2 = fParam_1.nvar;
417  for (j = 1; j <= i__2; ++j) {
418  xeev_ref(i__, j) = xpg[j - 1];
419  }
420  if (fVarn_1.iclass == 1) {
421  i__2 = fParam_1.lclass;
422  for (k = 1; k <= i__2; ++k) {
423  if (fVarn_1.nclass[i__ - 1] == k) {
424  ++nocla[k - 1];
425  }
426  }
427  }
428  i__2 = fParam_1.nvar;
429  for (k = 1; k <= i__2; ++k) {
430  if (xeev_ref(i__, k) < fVarn_1.xmin[k - 1]) {
431  fVarn_1.xmin[k - 1] = xeev_ref(i__, k);
432  }
433  if (xeev_ref(i__, k) > fVarn_1.xmax[k - 1]) {
434  fVarn_1.xmax[k - 1] = xeev_ref(i__, k);
435  }
436  }
437  }
438 
439  if (fVarn_1.iclass == 1) {
440  i__2 = fParam_1.lclass;
441  for (k = 1; k <= i__2; ++k) {
442  i__1 = fParam_1.lclass;
443  for (l = 1; l <= i__1; ++l) {
444  if (nocla[k - 1] != nocla[l - 1]) {
445  *ktest = 1;
446  }
447  }
448  }
449  }
450  i__1 = fParam_1.nevl;
451  for (i__ = 1; i__ <= i__1; ++i__) {
452  i__2 = fParam_1.nvar;
453  for (l = 1; l <= i__2; ++l) {
454  if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
455  Float_t)0.) {
456  xeev_ref(i__, l) = (Float_t)0.;
457  }
458  else {
459  xeev_ref(i__, l) = xeev_ref(i__, l) - (fVarn_1.xmax[l - 1] +
460  fVarn_1.xmin[l - 1]) / 2.;
461  xeev_ref(i__, l) = xeev_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
462  fVarn_1.xmin[l - 1]) / 2.);
463  }
464  }
465  }
466 }
467 
468 #undef xeev_ref
469 
470 #define delw_ref(a_1,a_2,a_3) fDel_1.delw[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
471 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
472 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
473 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
474 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
475 #define delww_ref(a_1,a_2) fDel_1.delww[(a_2)*max_nLayers_ + a_1 - 7]
476 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
477 #define del_ref(a_1,a_2) fDel_1.del[(a_2)*max_nLayers_ + a_1 - 7]
478 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
479 
481 {
482  // [smart comments to be added]
483  Int_t i__1, i__2, i__3;
484 
485  Double_t f;
486  Int_t i__, j, k, l;
487  Double_t df, uu;
488 
489  i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
490  for (i__ = 1; i__ <= i__1; ++i__) {
491  if (fVarn_1.nclass[*ievent - 1] == i__) {
492  fNeur_1.o[i__ - 1] = 1.;
493  }
494  else {
495  fNeur_1.o[i__ - 1] = -1.;
496  }
497  }
498  l = fParam_1.layerm;
499  i__1 = fNeur_1.neuron[l - 1];
500  for (i__ = 1; i__ <= i__1; ++i__) {
501  f = y_ref(l, i__);
502  df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
503  del_ref(l, i__) = df * (fNeur_1.o[i__ - 1] - y_ref(l, i__)) *
504  fDel_1.coef[i__ - 1];
505  delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
506  i__2 = fNeur_1.neuron[l - 2];
507  for (j = 1; j <= i__2; ++j) {
508  delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(l -
509  1, j);
510  /* L20: */
511  }
512  }
513  for (l = fParam_1.layerm - 1; l >= 2; --l) {
514  i__2 = fNeur_1.neuron[l - 1];
515  for (i__ = 1; i__ <= i__2; ++i__) {
516  uu = 0.;
517  i__1 = fNeur_1.neuron[l];
518  for (k = 1; k <= i__1; ++k) {
519  uu += w_ref(l + 1, k, i__) * del_ref(l + 1, k);
520  }
521  Foncf(&l, &x_ref(l, i__), &f);
522  df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
523  del_ref(l, i__) = df * uu;
524  delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
525  i__1 = fNeur_1.neuron[l - 2];
526  for (j = 1; j <= i__1; ++j) {
527  delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(
528  l - 1, j);
529  }
530  }
531  }
532  i__1 = fParam_1.layerm;
533  for (l = 2; l <= i__1; ++l) {
534  i__2 = fNeur_1.neuron[l - 1];
535  for (i__ = 1; i__ <= i__2; ++i__) {
536  deltaww_ref(l, i__) = delww_ref(l, i__) + fParam_1.eta *
537  deltaww_ref(l, i__);
538  ww_ref(l, i__) = ww_ref(l, i__) + deltaww_ref(l, i__);
539  i__3 = fNeur_1.neuron[l - 2];
540  for (j = 1; j <= i__3; ++j) {
541  delta_ref(l, i__, j) = delw_ref(l, i__, j) + fParam_1.eta *
542  delta_ref(l, i__, j);
543  w_ref(l, i__, j) = w_ref(l, i__, j) + delta_ref(l, i__, j);
544  }
545  }
546  }
547 }
548 
549 #undef deltaww_ref
550 #undef del_ref
551 #undef ww_ref
552 #undef delww_ref
553 #undef delta_ref
554 #undef y_ref
555 #undef x_ref
556 #undef w_ref
557 #undef delw_ref
558 
559 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
560 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
561 
563 {
564  // write weights to file
565 
566  if (*iii == *maxcycle) {
567  // now in MethodCFMlpANN.cxx
568  }
569 }
570 
571 #undef ww_ref
572 #undef w_ref
573 
574 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
575 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
576 
577 void TMVA::MethodCFMlpANN_Utils::Innit( char *det, Double_t *tout2, Double_t *tin2, Int_t )
578 {
579  // Initialization
580  Int_t i__1, i__2, i__3;
581 
582  Int_t i__, j;
583  Int_t nevod, layer, ktest, i1, nrest;
584  Int_t ievent(0);
585  Int_t kkk;
586  Double_t xxx = 0.0, yyy = 0.0;
587 
588  Leclearn(&ktest, tout2, tin2);
589  Lecev2(&ktest, tout2, tin2);
590  if (ktest == 1) {
591  printf( " .... strange to be here (1) ... \n");
592  std::exit(1);
593  }
594  i__1 = fParam_1.layerm - 1;
595  for (layer = 1; layer <= i__1; ++layer) {
596  i__2 = fNeur_1.neuron[layer];
597  for (j = 1; j <= i__2; ++j) {
598  deltaww_ref(layer + 1, j) = 0.;
599  i__3 = fNeur_1.neuron[layer - 1];
600  for (i__ = 1; i__ <= i__3; ++i__) {
601  delta_ref(layer + 1, j, i__) = 0.;
602  }
603  }
604  }
605  if (fParam_1.ichoi == 1) {
606  Inl();
607  }
608  else {
609  Wini();
610  }
611  kkk = 0;
612  i__3 = fParam_1.nblearn;
613  Timer timer( i__3, "CFMlpANN" );
614  Int_t num = i__3/100;
615 
616  for (i1 = 1; i1 <= i__3; ++i1) {
617 
618  if ( ( num>0 && (i1-1)%num == 0) || (i1 == i__3) ) timer.DrawProgressBar( i1-1 );
619 
620  i__2 = fParam_1.nevl;
621  for (i__ = 1; i__ <= i__2; ++i__) {
622  ++kkk;
623  if (fCost_1.ieps == 2) {
624  fParam_1.eeps = Fdecroi(&kkk);
625  }
626  if (fCost_1.ieps == 1) {
627  fParam_1.eeps = fParam_1.epsmin;
628  }
629  Bool_t doCont = kTRUE;
630  if (fVarn_1.iclass == 2) {
631  ievent = (Int_t) ((Double_t) fParam_1.nevl * Sen3a());
632  if (ievent == 0) {
633  doCont = kFALSE;
634  }
635  }
636  if (doCont) {
637  if (fVarn_1.iclass == 1) {
638  nevod = fParam_1.nevl / fParam_1.lclass;
639  nrest = i__ % fParam_1.lclass;
640  fParam_1.ndiv = i__ / fParam_1.lclass;
641  if (nrest != 0) {
642  ievent = fParam_1.ndiv + 1 + (fParam_1.lclass - nrest) *
643  nevod;
644  }
645  else {
646  ievent = fParam_1.ndiv;
647  }
648  }
649  En_avant(&ievent);
650  En_arriere(&ievent);
651  }
652  }
653  yyy = 0.;
654  if (i1 % fParam_1.ndivis == 0 || i1 == 1 || i1 == fParam_1.nblearn) {
655  Cout(&i1, &xxx);
656  Cout2(&i1, &yyy);
657  GraphNN(&i1, &xxx, &yyy, det, (Int_t)20);
658  Out(&i1, &fParam_1.nblearn);
659  }
660  if (xxx < fCost_1.tolcou) {
661  GraphNN(&fParam_1.nblearn, &xxx, &yyy, det, (Int_t)20);
662  Out(&fParam_1.nblearn, &fParam_1.nblearn);
663  break;
664  }
665  }
666 }
667 
668 #undef deltaww_ref
669 #undef delta_ref
670 
672 {
673  // [smart comments to be added]
674  Int_t i__1;
675 
676  Int_t i__;
677  Int_t ktest;
678 
679  ktest = 0;
680  if (fParam_1.layerm > max_nLayers_) {
681  ktest = 1;
682  printf("Error: number of layers exceeds maximum: %i, %i ==> abort",
683  fParam_1.layerm, max_nLayers_ );
684  Arret("modification of mlpl3_param_lim.inc is needed ");
685  }
686  if (fParam_1.nevl > max_Events_) {
687  ktest = 1;
688  printf("Error: number of training events exceeds maximum: %i, %i ==> abort",
689  fParam_1.nevl, max_Events_ );
690  Arret("modification of mlpl3_param_lim.inc is needed ");
691  }
692  if (fParam_1.nevt > max_Events_) {
693  printf("Error: number of testing events exceeds maximum: %i, %i ==> abort",
694  fParam_1.nevt, max_Events_ );
695  Arret("modification of mlpl3_param_lim.inc is needed ");
696  }
697  if (fParam_1.lclass < fNeur_1.neuron[fParam_1.layerm - 1]) {
698  ktest = 1;
699  printf("Error: wrong number of classes at ouput layer: %i != %i ==> abort\n",
700  fNeur_1.neuron[fParam_1.layerm - 1], fParam_1.lclass);
701  Arret("problem needs to reported ");
702  }
703  if (fParam_1.nvar > max_nVar_) {
704  ktest = 1;
705  printf("Error: number of variables exceeds maximum: %i, %i ==> abort",
706  fParam_1.nvar, fg_max_nVar_ );
707  Arret("modification of mlpl3_param_lim.inc is needed");
708  }
709  i__1 = fParam_1.layerm;
710  for (i__ = 1; i__ <= i__1; ++i__) {
711  if (fNeur_1.neuron[i__ - 1] > max_nNodes_) {
712  ktest = 1;
713  printf("Error: number of neurons at layer exceeds maximum: %i, %i ==> abort",
714  i__, fg_max_nNodes_ );
715  }
716  }
717  if (ktest == 1) {
718  printf( " .... strange to be here (2) ... \n");
719  std::exit(1);
720  }
721 }
722 
723 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
724 
726 {
727  // [smart comments to be added]
728  Int_t i__1, i__2;
729  Double_t d__1;
730 
731  Double_t c__;
732  Int_t i__, j;
733 
734  c__ = 0.;
735  i__1 = fParam_1.nevl;
736  for (i__ = 1; i__ <= i__1; ++i__) {
737  En_avant(&i__);
738  i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
739  for (j = 1; j <= i__2; ++j) {
740  if (fVarn_1.nclass[i__ - 1] == j) {
741  fNeur_1.o[j - 1] = 1.;
742  }
743  else {
744  fNeur_1.o[j - 1] = -1.;
745  }
746  // Computing 2nd power
747  d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
748  c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
749  }
750  }
751  c__ /= (Double_t) (fParam_1.nevl * fParam_1.lclass) * 2.;
752  *xxx = c__;
753  fCost_1.ancout = c__;
754 }
755 
756 #undef y_ref
757 
758 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
759 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
760 
762 {
763  // [smart comments to be added]
764  Int_t i__1, i__2;
765 
766  Int_t jmax, k, layer, kk, nq, nr;
767 
768  i__1 = fParam_1.nvar;
769  i__1 = fParam_1.layerm;
770  i__1 = fParam_1.layerm - 1;
771  for (layer = 1; layer <= i__1; ++layer) {
772  nq = fNeur_1.neuron[layer] / 10;
773  nr = fNeur_1.neuron[layer] - nq * 10;
774  if (nr == 0) {
775  kk = nq;
776  }
777  else {
778  kk = nq + 1;
779  }
780  i__2 = kk;
781  for (k = 1; k <= i__2; ++k) {
782  // jmin = k * 10 - 9;
783  jmax = k * 10;
784  if (fNeur_1.neuron[layer] < jmax) {
785  jmax = fNeur_1.neuron[layer];
786  }
787  // i__3 = fNeur_1.neuron[layer - 1];
788  }
789  }
790 }
791 
792 #undef ww_ref
793 #undef w_ref
794 
796 {
797  // [smart comments to be added]
798  Double_t ret_val;
799 
800  Double_t aaa, bbb;
801 
802  aaa = (fParam_1.epsmin - fParam_1.epsmax) / (Double_t) (fParam_1.nblearn *
803  fParam_1.nevl - 1);
804  bbb = fParam_1.epsmax - aaa;
805  ret_val = aaa * (Double_t) (*i__) + bbb;
806  return ret_val;
807 }
808 
809 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
810 
812  Double_t * /*yyy*/, char * /*det*/, Int_t /*det_len*/ )
813 {
814  // [smart comments to be added]
815  Int_t i__1, i__2;
816 
817  Double_t xmok[max_nNodes_];
818  // Float_t xpaw;
819  Double_t xmko[max_nNodes_];
820  Int_t i__, j;
821  Int_t ix;
822  // Int_t jjj;
823  // Float_t vbn[10];
824  Int_t nko[max_nNodes_], nok[max_nNodes_];
825 
826  // for (i__ = 1; i__ <= 10; ++i__) {
827  // vbn[i__ - 1] = (Float_t)0.;
828  // }
829  if (*ilearn == 1) {
830  // AH: removed output
831  }
832  i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
833  for (i__ = 1; i__ <= i__1; ++i__) {
834  nok[i__ - 1] = 0;
835  nko[i__ - 1] = 0;
836  xmok[i__ - 1] = 0.;
837  xmko[i__ - 1] = 0.;
838  }
839  i__1 = fParam_1.nevl;
840  for (i__ = 1; i__ <= i__1; ++i__) {
841  En_avant(&i__);
842  i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
843  for (j = 1; j <= i__2; ++j) {
844  // xpaw = (Float_t) y_ref(fParam_1.layerm, j);
845  if (fVarn_1.nclass[i__ - 1] == j) {
846  ++nok[j - 1];
847  xmok[j - 1] += y_ref(fParam_1.layerm, j);
848  }
849  else {
850  ++nko[j - 1];
851  xmko[j - 1] += y_ref(fParam_1.layerm, j);
852  // jjj = j + fNeur_1.neuron[fParam_1.layerm - 1];
853  }
854  // if (j <= 9) {
855  // vbn[j - 1] = xpaw;
856  // }
857  }
858  // vbn[9] = (Float_t) fVarn_1.nclass[i__ - 1];
859  }
860  i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
861  for (j = 1; j <= i__1; ++j) {
862  xmok[j - 1] /= (Double_t) nok[j - 1];
863  xmko[j - 1] /= (Double_t) nko[j - 1];
864  fNeur_1.cut[j - 1] = (xmok[j - 1] + xmko[j - 1]) / 2.;
865  }
866  ix = fNeur_1.neuron[fParam_1.layerm - 1];
867  i__1 = ix;
868 }
869 
870 #undef y_ref
871 
873 {
874  // [smart comments to be added]
875 
876  // Initialized data
877  Int_t m12 = 4096;
878  Double_t f1 = 2.44140625e-4;
879  Double_t f2 = 5.96046448e-8;
880  Double_t f3 = 1.45519152e-11;
881  Int_t j1 = 3823;
882  Int_t j2 = 4006;
883  Int_t j3 = 2903;
884  static Int_t fg_i1 = 3823;
885  static Int_t fg_i2 = 4006;
886  static Int_t fg_i3 = 2903;
887 
888  Double_t ret_val;
889  Int_t k3, l3, k2, l2, k1, l1;
890 
891  // reference: /k.d.senne/j. stochastics/ vol 1,no 3 (1974),pp.215-38
892  k3 = fg_i3 * j3;
893  l3 = k3 / m12;
894  k2 = fg_i2 * j3 + fg_i3 * j2 + l3;
895  l2 = k2 / m12;
896  k1 = fg_i1 * j3 + fg_i2 * j2 + fg_i3 * j1 + l2;
897  l1 = k1 / m12;
898  fg_i1 = k1 - l1 * m12;
899  fg_i2 = k2 - l2 * m12;
900  fg_i3 = k3 - l3 * m12;
901  ret_val = f1 * (Double_t) fg_i1 + f2 * (Float_t) fg_i2 + f3 * (Double_t) fg_i3;
902 
903  return ret_val;
904 }
905 
907 {
908  // [needs to be checked]
909  Double_t yy;
910 
911  if (*u / fDel_1.temp[*i__ - 1] > 170.) {
912  *f = .99999999989999999;
913  }
914  else if (*u / fDel_1.temp[*i__ - 1] < -170.) {
915  *f = -.99999999989999999;
916  }
917  else {
918  yy = TMath::Exp(-(*u) / fDel_1.temp[*i__ - 1]);
919  *f = (1. - yy) / (yy + 1.);
920  }
921 }
922 
923 #undef w_ref
924 
925 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
926 
928 {
929  // [smart comments to be added]
930  Int_t i__1, i__2;
931  Double_t d__1;
932 
933  Double_t c__;
934  Int_t i__, j;
935 
936  c__ = 0.;
937  i__1 = fParam_1.nevt;
938  for (i__ = 1; i__ <= i__1; ++i__) {
939  En_avant2(&i__);
940  i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
941  for (j = 1; j <= i__2; ++j) {
942  if (fVarn_1.mclass[i__ - 1] == j) {
943  fNeur_1.o[j - 1] = 1.;
944  }
945  else {
946  fNeur_1.o[j - 1] = -1.;
947  }
948  /* Computing 2nd power */
949  d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
950  c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
951  }
952  }
953  c__ /= (Double_t) (fParam_1.nevt * fParam_1.lclass) * 2.;
954  *yyy = c__;
955 }
956 
957 #undef y_ref
958 
959 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
960 
962 {
963  // [smart comments to be added]
964  Int_t i__1, i__2;
965 
966  Int_t i__, j, l;
967  // Int_t mocla[max_nNodes_];
968  Int_t ikend;
969  Double_t xpg[max_nVar_];
970 
971  /* NTRAIN: Nb of events used during the learning */
972  /* NTEST: Nb of events used for the test */
973  /* TIN: Input variables */
974  /* TOUT: type of the event */
975 
976  *ktest = 0;
977  i__1 = fParam_1.lclass;
978  // for (k = 1; k <= i__1; ++k) {
979  // mocla[k - 1] = 0;
980  // }
981  i__1 = fParam_1.nevt;
982  for (i__ = 1; i__ <= i__1; ++i__) {
983  DataInterface(tout2, tin2, &fg_999, &fg_0, &fParam_1.nevt, &fParam_1.nvar,
984  xpg, &fVarn_1.mclass[i__ - 1], &ikend);
985 
986  if (ikend == -1) {
987  break;
988  }
989 
990  i__2 = fParam_1.nvar;
991  for (j = 1; j <= i__2; ++j) {
992  xx_ref(i__, j) = xpg[j - 1];
993  }
994  }
995 
996  i__1 = fParam_1.nevt;
997  for (i__ = 1; i__ <= i__1; ++i__) {
998  i__2 = fParam_1.nvar;
999  for (l = 1; l <= i__2; ++l) {
1000  if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
1001  Float_t)0.) {
1002  xx_ref(i__, l) = (Float_t)0.;
1003  }
1004  else {
1005  xx_ref(i__, l) = xx_ref(i__, l) - (fVarn_1.xmax[l - 1] +
1006  fVarn_1.xmin[l - 1]) / 2.;
1007  xx_ref(i__, l) = xx_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
1008  fVarn_1.xmin[l - 1]) / 2.);
1009  }
1010  }
1011  }
1012 }
1013 
1014 #undef xx_ref
1015 
1016 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
1017 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
1018 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
1019 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
1020 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
1021 
1023 {
1024  // [smart comments to be added]
1025  Int_t i__1, i__2, i__3;
1026 
1027  Double_t f;
1028  Int_t i__, j;
1029  Int_t layer;
1030 
1031  i__1 = fNeur_1.neuron[0];
1032  for (i__ = 1; i__ <= i__1; ++i__) {
1033  y_ref(1, i__) = xx_ref(*ievent, i__);
1034  }
1035  i__1 = fParam_1.layerm - 1;
1036  for (layer = 1; layer <= i__1; ++layer) {
1037  i__2 = fNeur_1.neuron[layer];
1038  for (j = 1; j <= i__2; ++j) {
1039  x_ref(layer + 1, j) = 0.;
1040  i__3 = fNeur_1.neuron[layer - 1];
1041  for (i__ = 1; i__ <= i__3; ++i__) {
1042  x_ref(layer + 1, j) = x_ref(layer + 1, j) + y_ref(layer, i__)
1043  * w_ref(layer + 1, j, i__);
1044  }
1045  x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
1046  i__3 = layer + 1;
1047  Foncf(&i__3, &x_ref(layer + 1, j), &f);
1048  y_ref(layer + 1, j) = f;
1049  /* L2: */
1050  }
1051  }
1052 }
1053 
1054 #undef xx_ref
1055 #undef ww_ref
1056 #undef y_ref
1057 #undef x_ref
1058 #undef w_ref
1059 
1060 void TMVA::MethodCFMlpANN_Utils::Arret( const char* mot )
1061 {
1062  // fatal error occurred: stop execution
1063  printf("%s: %s",fg_MethodName, mot);
1064  std::exit(1);
1065 }
1066 
1067 void TMVA::MethodCFMlpANN_Utils::CollectVar( Int_t * /*nvar*/, Int_t * /*class__*/, Double_t * /*xpg*/ )
1068 {
1069  // // [smart comments to be added]
1070  // Int_t i__1;
1071 
1072  // Int_t i__;
1073  // Float_t x[201];
1074 
1075  // // Parameter adjustments
1076  // --xpg;
1077 
1078  // for (i__ = 1; i__ <= 201; ++i__) {
1079  // x[i__ - 1] = 0.0;
1080  // }
1081  // x[0] = (Float_t) (*class__);
1082  // i__1 = *nvar;
1083  // for (i__ = 1; i__ <= i__1; ++i__) {
1084  // x[i__] = (Float_t) xpg[i__];
1085  // }
1086 }
void Foncf(Int_t *i__, Double_t *u, Double_t *f)
struct TMVA::MethodCFMlpANN_Utils::@183 fDel_1
#define w_ref(a_1, a_2, a_3)
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:162
#define ww_ref(a_1, a_2)
#define y_ref(a_1, a_2)
float Float_t
Definition: RtypesCore.h:53
#define del_ref(a_1, a_2)
const int max_nVar_
void Entree_new(Int_t *, char *, Int_t *ntrain, Int_t *ntest, Int_t *numlayer, Int_t *nodes, Int_t *numcycle, Int_t)
void DrawProgressBar(Int_t, const TString &comment="")
draws progress bar in color or B&W caution:
Definition: Timer.cxx:186
const int max_nLayers_
int Int_t
Definition: RtypesCore.h:41
bool Bool_t
Definition: RtypesCore.h:59
const Bool_t kFALSE
Definition: Rtypes.h:92
virtual Int_t DataInterface(Double_t *, Double_t *, Int_t *, Int_t *, Int_t *, Int_t *, Double_t *, Int_t *, Int_t *)=0
const int max_Events_
TStopwatch timer
Definition: pirndm.C:37
void Cout(Int_t *, Double_t *xxx)
void Out(Int_t *iii, Int_t *maxcycle)
#define xeev_ref(a_1, a_2)
const int max_nNodes_
void Cout2(Int_t *, Double_t *yyy)
void GraphNN(Int_t *ilearn, Double_t *, Double_t *, char *, Int_t)
void Train_nn(Double_t *tin2, Double_t *tout2, Int_t *ntrain, Int_t *ntest, Int_t *nvar2, Int_t *nlayer, Int_t *nodes, Int_t *ncycle)
struct TMVA::MethodCFMlpANN_Utils::@184 fCost_1
#define x_ref(a_1, a_2)
#define delta_ref(a_1, a_2, a_3)
struct TMVA::MethodCFMlpANN_Utils::@182 fNeur_1
void Leclearn(Int_t *ktest, Double_t *tout2, Double_t *tin2)
TLine * l
Definition: textangle.C:4
Double_t Exp(Double_t x)
Definition: TMath.h:495
#define ClassImp(name)
Definition: Rtypes.h:279
double f(double x)
void Create(Int_t nevt, Int_t nvar)
double Double_t
Definition: RtypesCore.h:55
void Lecev2(Int_t *ktest, Double_t *tout2, Double_t *tin2)
you should not use this method at all Int_t Int_t Double_t Double_t Double_t e
Definition: TRolke.cxx:630
void Innit(char *det, Double_t *tout2, Double_t *tin2, Int_t)
class TMVA::MethodCFMlpANN_Utils::VARn2 fVarn3_1
double f2(const double *x)
class TMVA::MethodCFMlpANN_Utils::VARn2 fVarn2_1
TF1 * f1
Definition: legend1.C:11
struct TMVA::MethodCFMlpANN_Utils::@181 fVarn_1
#define delw_ref(a_1, a_2, a_3)
const Bool_t kTRUE
Definition: Rtypes.h:91
struct TMVA::MethodCFMlpANN_Utils::@180 fParam_1
void CollectVar(Int_t *nvar, Int_t *class__, Double_t *xpg)
#define xx_ref(a_1, a_2)
#define deltaww_ref(a_1, a_2)
#define delww_ref(a_1, a_2)
static const char *const fg_MethodName