ROOT  6.06/09
Reference Guide
MethodCFMlpANN_Utils.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6  * Package: TMVA *
7  * Class : TMVA::MethodCFMlpANN_utils *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Reference for the original FORTRAN version "mlpl3.F": *
11  * Authors : J. Proriol and contributions from ALEPH-Clermont-Ferrand *
12  * Team members *
13  * Copyright: Laboratoire Physique Corpusculaire *
14  * Universite de Blaise Pascal, IN2P3/CNRS *
15  * *
16  * Modifications by present authors: *
17  * use dynamical data tables (not for all of them, but for the big ones) *
18  * *
19  * Description: *
20  * Utility routine translated from original mlpl3.F FORTRAN routine *
21  * *
22  * MultiLayerPerceptron : Training code *
23  * *
24  * NTRAIN: Nb of events used during the learning *
25  * NTEST: Nb of events used for the test *
26  * TIN: Input variables *
27  * TOUT: type of the event *
28  * *
29  * ---------------------------------------------------------------------------- *
30  * *
31  * Authors (alphabetical): *
32  * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
33  * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France *
34  * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany *
35  * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada *
36  * *
37  * Copyright (c) 2005: *
38  * CERN, Switzerland *
39  * U. of Victoria, Canada *
40  * MPI-K Heidelberg, Germany *
41  * LAPP, Annecy, France *
42  * *
43  * Redistribution and use in source and binary forms, with or without *
44  * modification, are permitted according to the terms listed in LICENSE *
45  * (http://tmva.sourceforge.net/LICENSE) *
46  * *
47  **********************************************************************************/
48 
49 //_______________________________________________________________________
50 //
51 // Implementation of Clermond-Ferrand artificial neural network
52 //
53 // Reference for the original FORTRAN version "mlpl3.F":
54 // Authors : J. Proriol and contributions from ALEPH-Clermont-Ferrand
55 // Team members
56 // Copyright: Laboratoire Physique Corpusculaire
57 // Universite de Blaise Pascal, IN2P3/CNRS
58 //_______________________________________________________________________
59 
60 #include <string>
61 #include <iostream>
62 #include <cstdlib>
63 
64 #include "TMath.h"
65 #include "TString.h"
66 
68 #include "TMVA/Timer.h"
69 
70 using std::cout;
71 using std::endl;
72 
74 
75 Int_t TMVA::MethodCFMlpANN_Utils::fg_100 = 100;
76 Int_t TMVA::MethodCFMlpANN_Utils::fg_0 = 0;
77 const Int_t TMVA::MethodCFMlpANN_Utils::fg_max_nVar_ = max_nVar_;
78 const Int_t TMVA::MethodCFMlpANN_Utils::fg_max_nNodes_ = max_nNodes_;
79 Int_t TMVA::MethodCFMlpANN_Utils::fg_999 = 999;
80 const char* const TMVA::MethodCFMlpANN_Utils::fg_MethodName = "--- CFMlpANN ";
81 
83 {
84  // default constructor
85  Int_t i(0);
86  for(i=0; i<max_nVar_;++i) fVarn_1.xmin[i] = 0;
87  fCost_1.ancout = 0;
88  fCost_1.ieps = 0;
89  fCost_1.tolcou = 0;
90 
91  for(i=0; i<max_nNodes_;++i) fDel_1.coef[i] = 0;
92  for(i=0; i<max_nLayers_*max_nNodes_;++i) fDel_1.del[i] = 0;
93  for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delta[i] = 0;
94  for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delw[i] = 0;
95  for(i=0; i<max_nLayers_*max_nNodes_;++i) fDel_1.delww[i] = 0;
96  fDel_1.demin = 0;
97  fDel_1.demax = 0;
98  fDel_1.idde = 0;
99  for(i=0; i<max_nLayers_;++i) fDel_1.temp[i] = 0;
100 
101  for(i=0; i<max_nNodes_;++i) fNeur_1.cut[i] = 0;
102  for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.deltaww[i] = 0;
103  for(i=0; i<max_nLayers_;++i) fNeur_1.neuron[i] = 0;
104  for(i=0; i<max_nNodes_;++i) fNeur_1.o[i] = 0;
105  for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fNeur_1.w[i] = 0;
106  for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.ww[i] = 0;
107  for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.x[i] = 0;
108  for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.y[i] = 0;
109 
110  fParam_1.eeps = 0;
111  fParam_1.epsmin = 0;
112  fParam_1.epsmax = 0;
113  fParam_1.eta = 0;
114  fParam_1.ichoi = 0;
115  fParam_1.itest = 0;
116  fParam_1.layerm = 0;
117  fParam_1.lclass = 0;
118  fParam_1.nblearn = 0;
119  fParam_1.ndiv = 0;
120  fParam_1.ndivis = 0;
121  fParam_1.nevl = 0;
122  fParam_1.nevt = 0;
123  fParam_1.nunap = 0;
124  fParam_1.nunilec = 0;
125  fParam_1.nunishort = 0;
126  fParam_1.nunisor = 0;
127  fParam_1.nvar = 0;
128 
129  fVarn_1.iclass = 0;
130  for(i=0; i<max_Events_;++i) fVarn_1.mclass[i] = 0;
131  for(i=0; i<max_Events_;++i) fVarn_1.nclass[i] = 0;
132  for(i=0; i<max_nVar_;++i) fVarn_1.xmax[i] = 0;
133 
134  fLogger = 0;
135 }
136 
138 {
139  // destructor
140 }
141 
143  Int_t *ntest, Int_t *nvar2, Int_t *nlayer,
144  Int_t *nodes, Int_t *ncycle )
145 {
146  // training interface - called from MethodCFMlpANN class object
147 
148  // sanity checks
149  if (*ntrain + *ntest > max_Events_) {
150  printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of training + testing" \
151  " events exceeds hardcoded maximum - reset to maximum allowed number");
152  *ntrain = *ntrain*(max_Events_/(*ntrain + *ntest));
153  *ntest = *ntest *(max_Events_/(*ntrain + *ntest));
154  }
155  if (*nvar2 > max_nVar_) {
156  printf( "*** CFMlpANN_f2c: ERROR in Train_nn: number of variables" \
157  " exceeds hardcoded maximum ==> abort");
158  std::exit(1);
159  }
160  if (*nlayer > max_nLayers_) {
161  printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of layers" \
162  " exceeds hardcoded maximum - reset to maximum allowed number");
163  *nlayer = max_nLayers_;
164  }
165  if (*nodes > max_nNodes_) {
166  printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of nodes" \
167  " exceeds hardcoded maximum - reset to maximum allowed number");
168  *nodes = max_nNodes_;
169  }
170 
171  // create dynamic data tables (AH)
172  fVarn2_1.Create( *ntrain + *ntest, *nvar2 );
173  fVarn3_1.Create( *ntrain + *ntest, *nvar2 );
174 
175  // Int_t imax;
176  char det[20];
177 
178  Entree_new(nvar2, det, ntrain, ntest, nlayer, nodes, ncycle, (Int_t)20);
179  if (fNeur_1.neuron[fParam_1.layerm - 1] == 1) {
180  // imax = 2;
181  fParam_1.lclass = 2;
182  }
183  else {
184  // imax = fNeur_1.neuron[fParam_1.layerm - 1] << 1;
185  fParam_1.lclass = fNeur_1.neuron[fParam_1.layerm - 1];
186  }
187  fParam_1.nvar = fNeur_1.neuron[0];
188  TestNN();
189  Innit(det, tout2, tin2, (Int_t)20);
190 
191  // delete data tables
192  fVarn2_1.Delete();
193  fVarn3_1.Delete();
194 }
195 
197  Int_t *ntest, Int_t *numlayer, Int_t *nodes,
198  Int_t *numcycle, Int_t /*det_len*/)
199 {
200  // first initialisation of ANN
201  Int_t i__1;
202 
203  Int_t rewrite, i__, j, ncoef;
204  Int_t ntemp, num, retrain;
205 
206  /* NTRAIN: Nb of events used during the learning */
207  /* NTEST: Nb of events used for the test */
208  /* TIN: Input variables */
209  /* TOUT: type of the event */
210 
211  fCost_1.ancout = 1e30;
212 
213  /* .............. HardCoded Values .................... */
214  retrain = 0;
215  rewrite = 1000;
216  for (i__ = 1; i__ <= max_nNodes_; ++i__) {
217  fDel_1.coef[i__ - 1] = (Float_t)0.;
218  }
219  for (i__ = 1; i__ <= max_nLayers_; ++i__) {
220  fDel_1.temp[i__ - 1] = (Float_t)0.;
221  }
222  fParam_1.layerm = *numlayer;
223  if (fParam_1.layerm > max_nLayers_) {
224  printf("Error: number of layers exceeds maximum: %i, %i ==> abort",
225  fParam_1.layerm, max_nLayers_ );
226  Arret("modification of mlpl3_param_lim.inc is needed ");
227  }
228  fParam_1.nevl = *ntrain;
229  fParam_1.nevt = *ntest;
230  fParam_1.nblearn = *numcycle;
231  fVarn_1.iclass = 2;
232  fParam_1.nunilec = 10;
233  fParam_1.epsmin = 1e-10;
234  fParam_1.epsmax = 1e-4;
235  fParam_1.eta = .5;
236  fCost_1.tolcou = 1e-6;
237  fCost_1.ieps = 2;
238  fParam_1.nunisor = 30;
239  fParam_1.nunishort = 48;
240  fParam_1.nunap = 40;
241 
242  ULog() << kINFO << "Total number of events for training: " << fParam_1.nevl << Endl;
243  ULog() << kINFO << "Total number of training cycles : " << fParam_1.nblearn << Endl;
244  if (fParam_1.nevl > max_Events_) {
245  printf("Error: number of learning events exceeds maximum: %i, %i ==> abort",
246  fParam_1.nevl, max_Events_ );
247  Arret("modification of mlpl3_param_lim.inc is needed ");
248  }
249  if (fParam_1.nevt > max_Events_) {
250  printf("Error: number of testing events exceeds maximum: %i, %i ==> abort",
251  fParam_1.nevt, max_Events_ );
252  Arret("modification of mlpl3_param_lim.inc is needed ");
253  }
254  i__1 = fParam_1.layerm;
255  for (j = 1; j <= i__1; ++j) {
256  num = nodes[j-1];
257  if (num < 2) {
258  num = 2;
259  }
260  if (j == fParam_1.layerm && num != 2) {
261  num = 2;
262  }
263  fNeur_1.neuron[j - 1] = num;
264  }
265  i__1 = fParam_1.layerm;
266  for (j = 1; j <= i__1; ++j) {
267  ULog() << kINFO << "Number of layers for neuron(" << j << "): " << fNeur_1.neuron[j - 1] << Endl;
268  }
269  if (fNeur_1.neuron[fParam_1.layerm - 1] != 2) {
270  printf("Error: wrong number of classes at ouput layer: %i != 2 ==> abort\n",
271  fNeur_1.neuron[fParam_1.layerm - 1]);
272  Arret("stop");
273  }
274  i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
275  for (j = 1; j <= i__1; ++j) {
276  fDel_1.coef[j - 1] = 1.;
277  }
278  i__1 = fParam_1.layerm;
279  for (j = 1; j <= i__1; ++j) {
280  fDel_1.temp[j - 1] = 1.;
281  }
282  fParam_1.ichoi = retrain;
283  fParam_1.ndivis = rewrite;
284  fDel_1.idde = 1;
285  if (! (fParam_1.ichoi == 0 || fParam_1.ichoi == 1)) {
286  printf( "Big troubles !!! \n" );
287  Arret("new training or continued one !");
288  }
289  if (fParam_1.ichoi == 0) {
290  ULog() << kINFO << "New training will be performed" << Endl;
291  }
292  else {
293  printf("%s: New training will be continued from a weight file\n", fg_MethodName);
294  }
295  ncoef = 0;
296  ntemp = 0;
297  for (i__ = 1; i__ <= max_nNodes_; ++i__) {
298  if (fDel_1.coef[i__ - 1] != (Float_t)0.) {
299  ++ncoef;
300  }
301  }
302  for (i__ = 1; i__ <= max_nLayers_; ++i__) {
303  if (fDel_1.temp[i__ - 1] != (Float_t)0.) {
304  ++ntemp;
305  }
306  }
307  if (ncoef != fNeur_1.neuron[fParam_1.layerm - 1]) {
308  Arret(" entree error code 1 : need to reported");
309  }
310  if (ntemp != fParam_1.layerm) {
311  Arret("entree error code 2 : need to reported");
312  }
313 }
314 
315 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
316 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
317 
319 {
320  // [smart comments to be added]
321  Int_t i__1, i__2, i__3;
322  Int_t i__, j;
323  Int_t layer;
324 
325  i__1 = fParam_1.layerm;
326  for (layer = 2; layer <= i__1; ++layer) {
327  i__2 = fNeur_1.neuron[layer - 2];
328  for (i__ = 1; i__ <= i__2; ++i__) {
329  i__3 = fNeur_1.neuron[layer - 1];
330  for (j = 1; j <= i__3; ++j) {
331  w_ref(layer, j, i__) = (Sen3a() * 2. - 1.) * .2;
332  ww_ref(layer, j) = (Sen3a() * 2. - 1.) * .2;
333  }
334  }
335  }
336 }
337 
338 #undef ww_ref
339 #undef w_ref
340 
341 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
342 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
343 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
344 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
345 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
346 
348 {
349  // [smart comments to be added]
350  Int_t i__1, i__2, i__3;
351 
352  Double_t f;
353  Int_t i__, j;
354  Int_t layer;
355 
356  i__1 = fNeur_1.neuron[0];
357  for (i__ = 1; i__ <= i__1; ++i__) {
358  y_ref(1, i__) = xeev_ref(*ievent, i__);
359  }
360  i__1 = fParam_1.layerm - 1;
361  for (layer = 1; layer <= i__1; ++layer) {
362  i__2 = fNeur_1.neuron[layer];
363  for (j = 1; j <= i__2; ++j) {
364  x_ref(layer + 1, j) = 0.;
365  i__3 = fNeur_1.neuron[layer - 1];
366  for (i__ = 1; i__ <= i__3; ++i__) {
367  x_ref(layer + 1, j) = ( x_ref(layer + 1, j) + y_ref(layer, i__)
368  * w_ref(layer + 1, j, i__) );
369  }
370  x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
371  i__3 = layer + 1;
372  Foncf(&i__3, &x_ref(layer + 1, j), &f);
373  y_ref(layer + 1, j) = f;
374  }
375  }
376 }
377 
378 #undef ww_ref
379 #undef y_ref
380 #undef x_ref
381 #undef w_ref
382 #undef xeev_ref
383 
384 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
385 
387 {
388  // [smart comments to be added]
389  Int_t i__1, i__2;
390 
391  Int_t i__, j, k, l;
392  Int_t nocla[max_nNodes_], ikend;
393  Double_t xpg[max_nVar_];
394 
395  *ktest = 0;
396  i__1 = fParam_1.lclass;
397  for (k = 1; k <= i__1; ++k) {
398  nocla[k - 1] = 0;
399  }
400  i__1 = fParam_1.nvar;
401  for (i__ = 1; i__ <= i__1; ++i__) {
402  fVarn_1.xmin[i__ - 1] = 1e30;
403  fVarn_1.xmax[i__ - 1] = -fVarn_1.xmin[i__ - 1];
404  }
405  i__1 = fParam_1.nevl;
406  for (i__ = 1; i__ <= i__1; ++i__) {
407  DataInterface(tout2, tin2, &fg_100, &fg_0, &fParam_1.nevl, &fParam_1.nvar,
408  xpg, &fVarn_1.nclass[i__ - 1], &ikend);
409  if (ikend == -1) {
410  break;
411  }
412 
413  CollectVar(&fParam_1.nvar, &fVarn_1.nclass[i__ - 1], xpg);
414 
415  i__2 = fParam_1.nvar;
416  for (j = 1; j <= i__2; ++j) {
417  xeev_ref(i__, j) = xpg[j - 1];
418  }
419  if (fVarn_1.iclass == 1) {
420  i__2 = fParam_1.lclass;
421  for (k = 1; k <= i__2; ++k) {
422  if (fVarn_1.nclass[i__ - 1] == k) {
423  ++nocla[k - 1];
424  }
425  }
426  }
427  i__2 = fParam_1.nvar;
428  for (k = 1; k <= i__2; ++k) {
429  if (xeev_ref(i__, k) < fVarn_1.xmin[k - 1]) {
430  fVarn_1.xmin[k - 1] = xeev_ref(i__, k);
431  }
432  if (xeev_ref(i__, k) > fVarn_1.xmax[k - 1]) {
433  fVarn_1.xmax[k - 1] = xeev_ref(i__, k);
434  }
435  }
436  }
437 
438  if (fVarn_1.iclass == 1) {
439  i__2 = fParam_1.lclass;
440  for (k = 1; k <= i__2; ++k) {
441  i__1 = fParam_1.lclass;
442  for (l = 1; l <= i__1; ++l) {
443  if (nocla[k - 1] != nocla[l - 1]) {
444  *ktest = 1;
445  }
446  }
447  }
448  }
449  i__1 = fParam_1.nevl;
450  for (i__ = 1; i__ <= i__1; ++i__) {
451  i__2 = fParam_1.nvar;
452  for (l = 1; l <= i__2; ++l) {
453  if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
454  Float_t)0.) {
455  xeev_ref(i__, l) = (Float_t)0.;
456  }
457  else {
458  xeev_ref(i__, l) = xeev_ref(i__, l) - (fVarn_1.xmax[l - 1] +
459  fVarn_1.xmin[l - 1]) / 2.;
460  xeev_ref(i__, l) = xeev_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
461  fVarn_1.xmin[l - 1]) / 2.);
462  }
463  }
464  }
465 }
466 
467 #undef xeev_ref
468 
469 #define delw_ref(a_1,a_2,a_3) fDel_1.delw[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
470 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
471 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
472 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
473 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
474 #define delww_ref(a_1,a_2) fDel_1.delww[(a_2)*max_nLayers_ + a_1 - 7]
475 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
476 #define del_ref(a_1,a_2) fDel_1.del[(a_2)*max_nLayers_ + a_1 - 7]
477 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
478 
480 {
481  // [smart comments to be added]
482  Int_t i__1, i__2, i__3;
483 
484  Double_t f;
485  Int_t i__, j, k, l;
486  Double_t df, uu;
487 
488  i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
489  for (i__ = 1; i__ <= i__1; ++i__) {
490  if (fVarn_1.nclass[*ievent - 1] == i__) {
491  fNeur_1.o[i__ - 1] = 1.;
492  }
493  else {
494  fNeur_1.o[i__ - 1] = -1.;
495  }
496  }
497  l = fParam_1.layerm;
498  i__1 = fNeur_1.neuron[l - 1];
499  for (i__ = 1; i__ <= i__1; ++i__) {
500  f = y_ref(l, i__);
501  df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
502  del_ref(l, i__) = df * (fNeur_1.o[i__ - 1] - y_ref(l, i__)) *
503  fDel_1.coef[i__ - 1];
504  delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
505  i__2 = fNeur_1.neuron[l - 2];
506  for (j = 1; j <= i__2; ++j) {
507  delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(l -
508  1, j);
509  /* L20: */
510  }
511  }
512  for (l = fParam_1.layerm - 1; l >= 2; --l) {
513  i__2 = fNeur_1.neuron[l - 1];
514  for (i__ = 1; i__ <= i__2; ++i__) {
515  uu = 0.;
516  i__1 = fNeur_1.neuron[l];
517  for (k = 1; k <= i__1; ++k) {
518  uu += w_ref(l + 1, k, i__) * del_ref(l + 1, k);
519  }
520  Foncf(&l, &x_ref(l, i__), &f);
521  df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
522  del_ref(l, i__) = df * uu;
523  delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
524  i__1 = fNeur_1.neuron[l - 2];
525  for (j = 1; j <= i__1; ++j) {
526  delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(
527  l - 1, j);
528  }
529  }
530  }
531  i__1 = fParam_1.layerm;
532  for (l = 2; l <= i__1; ++l) {
533  i__2 = fNeur_1.neuron[l - 1];
534  for (i__ = 1; i__ <= i__2; ++i__) {
535  deltaww_ref(l, i__) = delww_ref(l, i__) + fParam_1.eta *
536  deltaww_ref(l, i__);
537  ww_ref(l, i__) = ww_ref(l, i__) + deltaww_ref(l, i__);
538  i__3 = fNeur_1.neuron[l - 2];
539  for (j = 1; j <= i__3; ++j) {
540  delta_ref(l, i__, j) = delw_ref(l, i__, j) + fParam_1.eta *
541  delta_ref(l, i__, j);
542  w_ref(l, i__, j) = w_ref(l, i__, j) + delta_ref(l, i__, j);
543  }
544  }
545  }
546 }
547 
548 #undef deltaww_ref
549 #undef del_ref
550 #undef ww_ref
551 #undef delww_ref
552 #undef delta_ref
553 #undef y_ref
554 #undef x_ref
555 #undef w_ref
556 #undef delw_ref
557 
558 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
559 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
560 
562 {
563  // write weights to file
564 
565  if (*iii == *maxcycle) {
566  // now in MethodCFMlpANN.cxx
567  }
568 }
569 
570 #undef ww_ref
571 #undef w_ref
572 
573 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
574 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
575 
576 void TMVA::MethodCFMlpANN_Utils::Innit( char *det, Double_t *tout2, Double_t *tin2, Int_t )
577 {
578  // Initialization
579  Int_t i__1, i__2, i__3;
580 
581  Int_t i__, j;
582  Int_t nevod, layer, ktest, i1, nrest;
583  Int_t ievent(0);
584  Int_t kkk;
585  Double_t xxx = 0.0, yyy = 0.0;
586 
587  Leclearn(&ktest, tout2, tin2);
588  Lecev2(&ktest, tout2, tin2);
589  if (ktest == 1) {
590  printf( " .... strange to be here (1) ... \n");
591  std::exit(1);
592  }
593  i__1 = fParam_1.layerm - 1;
594  for (layer = 1; layer <= i__1; ++layer) {
595  i__2 = fNeur_1.neuron[layer];
596  for (j = 1; j <= i__2; ++j) {
597  deltaww_ref(layer + 1, j) = 0.;
598  i__3 = fNeur_1.neuron[layer - 1];
599  for (i__ = 1; i__ <= i__3; ++i__) {
600  delta_ref(layer + 1, j, i__) = 0.;
601  }
602  }
603  }
604  if (fParam_1.ichoi == 1) {
605  Inl();
606  }
607  else {
608  Wini();
609  }
610  kkk = 0;
611  i__3 = fParam_1.nblearn;
612  Timer timer( i__3, "CFMlpANN" );
613  Int_t num = i__3/100;
614 
615  for (i1 = 1; i1 <= i__3; ++i1) {
616 
617  if ( ( num>0 && (i1-1)%num == 0) || (i1 == i__3) ) timer.DrawProgressBar( i1-1 );
618 
619  i__2 = fParam_1.nevl;
620  for (i__ = 1; i__ <= i__2; ++i__) {
621  ++kkk;
622  if (fCost_1.ieps == 2) {
623  fParam_1.eeps = Fdecroi(&kkk);
624  }
625  if (fCost_1.ieps == 1) {
626  fParam_1.eeps = fParam_1.epsmin;
627  }
628  Bool_t doCont = kTRUE;
629  if (fVarn_1.iclass == 2) {
630  ievent = (Int_t) ((Double_t) fParam_1.nevl * Sen3a());
631  if (ievent == 0) {
632  doCont = kFALSE;
633  }
634  }
635  if (doCont) {
636  if (fVarn_1.iclass == 1) {
637  nevod = fParam_1.nevl / fParam_1.lclass;
638  nrest = i__ % fParam_1.lclass;
639  fParam_1.ndiv = i__ / fParam_1.lclass;
640  if (nrest != 0) {
641  ievent = fParam_1.ndiv + 1 + (fParam_1.lclass - nrest) *
642  nevod;
643  }
644  else {
645  ievent = fParam_1.ndiv;
646  }
647  }
648  En_avant(&ievent);
649  En_arriere(&ievent);
650  }
651  }
652  yyy = 0.;
653  if (i1 % fParam_1.ndivis == 0 || i1 == 1 || i1 == fParam_1.nblearn) {
654  Cout(&i1, &xxx);
655  Cout2(&i1, &yyy);
656  GraphNN(&i1, &xxx, &yyy, det, (Int_t)20);
657  Out(&i1, &fParam_1.nblearn);
658  }
659  if (xxx < fCost_1.tolcou) {
660  GraphNN(&fParam_1.nblearn, &xxx, &yyy, det, (Int_t)20);
661  Out(&fParam_1.nblearn, &fParam_1.nblearn);
662  break;
663  }
664  }
665 }
666 
667 #undef deltaww_ref
668 #undef delta_ref
669 
671 {
672  // [smart comments to be added]
673  Int_t i__1;
674 
675  Int_t i__;
676  Int_t ktest;
677 
678  ktest = 0;
679  if (fParam_1.layerm > max_nLayers_) {
680  ktest = 1;
681  printf("Error: number of layers exceeds maximum: %i, %i ==> abort",
682  fParam_1.layerm, max_nLayers_ );
683  Arret("modification of mlpl3_param_lim.inc is needed ");
684  }
685  if (fParam_1.nevl > max_Events_) {
686  ktest = 1;
687  printf("Error: number of training events exceeds maximum: %i, %i ==> abort",
688  fParam_1.nevl, max_Events_ );
689  Arret("modification of mlpl3_param_lim.inc is needed ");
690  }
691  if (fParam_1.nevt > max_Events_) {
692  printf("Error: number of testing events exceeds maximum: %i, %i ==> abort",
693  fParam_1.nevt, max_Events_ );
694  Arret("modification of mlpl3_param_lim.inc is needed ");
695  }
696  if (fParam_1.lclass < fNeur_1.neuron[fParam_1.layerm - 1]) {
697  ktest = 1;
698  printf("Error: wrong number of classes at ouput layer: %i != %i ==> abort\n",
699  fNeur_1.neuron[fParam_1.layerm - 1], fParam_1.lclass);
700  Arret("problem needs to reported ");
701  }
702  if (fParam_1.nvar > max_nVar_) {
703  ktest = 1;
704  printf("Error: number of variables exceeds maximum: %i, %i ==> abort",
705  fParam_1.nvar, fg_max_nVar_ );
706  Arret("modification of mlpl3_param_lim.inc is needed");
707  }
708  i__1 = fParam_1.layerm;
709  for (i__ = 1; i__ <= i__1; ++i__) {
710  if (fNeur_1.neuron[i__ - 1] > max_nNodes_) {
711  ktest = 1;
712  printf("Error: number of neurons at layer exceeds maximum: %i, %i ==> abort",
713  i__, fg_max_nNodes_ );
714  }
715  }
716  if (ktest == 1) {
717  printf( " .... strange to be here (2) ... \n");
718  std::exit(1);
719  }
720 }
721 
722 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
723 
725 {
726  // [smart comments to be added]
727  Int_t i__1, i__2;
728  Double_t d__1;
729 
730  Double_t c__;
731  Int_t i__, j;
732 
733  c__ = 0.;
734  i__1 = fParam_1.nevl;
735  for (i__ = 1; i__ <= i__1; ++i__) {
736  En_avant(&i__);
737  i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
738  for (j = 1; j <= i__2; ++j) {
739  if (fVarn_1.nclass[i__ - 1] == j) {
740  fNeur_1.o[j - 1] = 1.;
741  }
742  else {
743  fNeur_1.o[j - 1] = -1.;
744  }
745  // Computing 2nd power
746  d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
747  c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
748  }
749  }
750  c__ /= (Double_t) (fParam_1.nevl * fParam_1.lclass) * 2.;
751  *xxx = c__;
752  fCost_1.ancout = c__;
753 }
754 
755 #undef y_ref
756 
757 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
758 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
759 
761 {
762  // [smart comments to be added]
763  Int_t i__1, i__2;
764 
765  Int_t jmax, k, layer, kk, nq, nr;
766 
767  i__1 = fParam_1.nvar;
768  i__1 = fParam_1.layerm;
769  i__1 = fParam_1.layerm - 1;
770  for (layer = 1; layer <= i__1; ++layer) {
771  nq = fNeur_1.neuron[layer] / 10;
772  nr = fNeur_1.neuron[layer] - nq * 10;
773  if (nr == 0) {
774  kk = nq;
775  }
776  else {
777  kk = nq + 1;
778  }
779  i__2 = kk;
780  for (k = 1; k <= i__2; ++k) {
781  // jmin = k * 10 - 9;
782  jmax = k * 10;
783  if (fNeur_1.neuron[layer] < jmax) {
784  jmax = fNeur_1.neuron[layer];
785  }
786  // i__3 = fNeur_1.neuron[layer - 1];
787  }
788  }
789 }
790 
791 #undef ww_ref
792 #undef w_ref
793 
795 {
796  // [smart comments to be added]
797  Double_t ret_val;
798 
799  Double_t aaa, bbb;
800 
801  aaa = (fParam_1.epsmin - fParam_1.epsmax) / (Double_t) (fParam_1.nblearn *
802  fParam_1.nevl - 1);
803  bbb = fParam_1.epsmax - aaa;
804  ret_val = aaa * (Double_t) (*i__) + bbb;
805  return ret_val;
806 }
807 
808 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
809 
811  Double_t * /*yyy*/, char * /*det*/, Int_t /*det_len*/ )
812 {
813  // [smart comments to be added]
814  Int_t i__1, i__2;
815 
816  Double_t xmok[max_nNodes_];
817  // Float_t xpaw;
818  Double_t xmko[max_nNodes_];
819  Int_t i__, j;
820  Int_t ix;
821  // Int_t jjj;
822  // Float_t vbn[10];
823  Int_t nko[max_nNodes_], nok[max_nNodes_];
824 
825  // for (i__ = 1; i__ <= 10; ++i__) {
826  // vbn[i__ - 1] = (Float_t)0.;
827  // }
828  if (*ilearn == 1) {
829  // AH: removed output
830  }
831  i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
832  for (i__ = 1; i__ <= i__1; ++i__) {
833  nok[i__ - 1] = 0;
834  nko[i__ - 1] = 0;
835  xmok[i__ - 1] = 0.;
836  xmko[i__ - 1] = 0.;
837  }
838  i__1 = fParam_1.nevl;
839  for (i__ = 1; i__ <= i__1; ++i__) {
840  En_avant(&i__);
841  i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
842  for (j = 1; j <= i__2; ++j) {
843  // xpaw = (Float_t) y_ref(fParam_1.layerm, j);
844  if (fVarn_1.nclass[i__ - 1] == j) {
845  ++nok[j - 1];
846  xmok[j - 1] += y_ref(fParam_1.layerm, j);
847  }
848  else {
849  ++nko[j - 1];
850  xmko[j - 1] += y_ref(fParam_1.layerm, j);
851  // jjj = j + fNeur_1.neuron[fParam_1.layerm - 1];
852  }
853  // if (j <= 9) {
854  // vbn[j - 1] = xpaw;
855  // }
856  }
857  // vbn[9] = (Float_t) fVarn_1.nclass[i__ - 1];
858  }
859  i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
860  for (j = 1; j <= i__1; ++j) {
861  xmok[j - 1] /= (Double_t) nok[j - 1];
862  xmko[j - 1] /= (Double_t) nko[j - 1];
863  fNeur_1.cut[j - 1] = (xmok[j - 1] + xmko[j - 1]) / 2.;
864  }
865  ix = fNeur_1.neuron[fParam_1.layerm - 1];
866  i__1 = ix;
867 }
868 
869 #undef y_ref
870 
872 {
873  // [smart comments to be added]
874 
875  // Initialized data
876  Int_t m12 = 4096;
877  Double_t f1 = 2.44140625e-4;
878  Double_t f2 = 5.96046448e-8;
879  Double_t f3 = 1.45519152e-11;
880  Int_t j1 = 3823;
881  Int_t j2 = 4006;
882  Int_t j3 = 2903;
883  static Int_t fg_i1 = 3823;
884  static Int_t fg_i2 = 4006;
885  static Int_t fg_i3 = 2903;
886 
887  Double_t ret_val;
888  Int_t k3, l3, k2, l2, k1, l1;
889 
890  // reference: /k.d.senne/j. stochastics/ vol 1,no 3 (1974),pp.215-38
891  k3 = fg_i3 * j3;
892  l3 = k3 / m12;
893  k2 = fg_i2 * j3 + fg_i3 * j2 + l3;
894  l2 = k2 / m12;
895  k1 = fg_i1 * j3 + fg_i2 * j2 + fg_i3 * j1 + l2;
896  l1 = k1 / m12;
897  fg_i1 = k1 - l1 * m12;
898  fg_i2 = k2 - l2 * m12;
899  fg_i3 = k3 - l3 * m12;
900  ret_val = f1 * (Double_t) fg_i1 + f2 * (Float_t) fg_i2 + f3 * (Double_t) fg_i3;
901 
902  return ret_val;
903 }
904 
906 {
907  // [needs to be checked]
908  Double_t yy;
909 
910  if (*u / fDel_1.temp[*i__ - 1] > 170.) {
911  *f = .99999999989999999;
912  }
913  else if (*u / fDel_1.temp[*i__ - 1] < -170.) {
914  *f = -.99999999989999999;
915  }
916  else {
917  yy = TMath::Exp(-(*u) / fDel_1.temp[*i__ - 1]);
918  *f = (1. - yy) / (yy + 1.);
919  }
920 }
921 
922 #undef w_ref
923 
924 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
925 
927 {
928  // [smart comments to be added]
929  Int_t i__1, i__2;
930  Double_t d__1;
931 
932  Double_t c__;
933  Int_t i__, j;
934 
935  c__ = 0.;
936  i__1 = fParam_1.nevt;
937  for (i__ = 1; i__ <= i__1; ++i__) {
938  En_avant2(&i__);
939  i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
940  for (j = 1; j <= i__2; ++j) {
941  if (fVarn_1.mclass[i__ - 1] == j) {
942  fNeur_1.o[j - 1] = 1.;
943  }
944  else {
945  fNeur_1.o[j - 1] = -1.;
946  }
947  /* Computing 2nd power */
948  d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
949  c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
950  }
951  }
952  c__ /= (Double_t) (fParam_1.nevt * fParam_1.lclass) * 2.;
953  *yyy = c__;
954 }
955 
956 #undef y_ref
957 
958 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
959 
961 {
962  // [smart comments to be added]
963  Int_t i__1, i__2;
964 
965  Int_t i__, j, l;
966  // Int_t mocla[max_nNodes_];
967  Int_t ikend;
968  Double_t xpg[max_nVar_];
969 
970  /* NTRAIN: Nb of events used during the learning */
971  /* NTEST: Nb of events used for the test */
972  /* TIN: Input variables */
973  /* TOUT: type of the event */
974 
975  *ktest = 0;
976  i__1 = fParam_1.lclass;
977  // for (k = 1; k <= i__1; ++k) {
978  // mocla[k - 1] = 0;
979  // }
980  i__1 = fParam_1.nevt;
981  for (i__ = 1; i__ <= i__1; ++i__) {
982  DataInterface(tout2, tin2, &fg_999, &fg_0, &fParam_1.nevt, &fParam_1.nvar,
983  xpg, &fVarn_1.mclass[i__ - 1], &ikend);
984 
985  if (ikend == -1) {
986  break;
987  }
988 
989  i__2 = fParam_1.nvar;
990  for (j = 1; j <= i__2; ++j) {
991  xx_ref(i__, j) = xpg[j - 1];
992  }
993  }
994 
995  i__1 = fParam_1.nevt;
996  for (i__ = 1; i__ <= i__1; ++i__) {
997  i__2 = fParam_1.nvar;
998  for (l = 1; l <= i__2; ++l) {
999  if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
1000  Float_t)0.) {
1001  xx_ref(i__, l) = (Float_t)0.;
1002  }
1003  else {
1004  xx_ref(i__, l) = xx_ref(i__, l) - (fVarn_1.xmax[l - 1] +
1005  fVarn_1.xmin[l - 1]) / 2.;
1006  xx_ref(i__, l) = xx_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
1007  fVarn_1.xmin[l - 1]) / 2.);
1008  }
1009  }
1010  }
1011 }
1012 
1013 #undef xx_ref
1014 
1015 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
1016 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
1017 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
1018 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
1019 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
1020 
1022 {
1023  // [smart comments to be added]
1024  Int_t i__1, i__2, i__3;
1025 
1026  Double_t f;
1027  Int_t i__, j;
1028  Int_t layer;
1029 
1030  i__1 = fNeur_1.neuron[0];
1031  for (i__ = 1; i__ <= i__1; ++i__) {
1032  y_ref(1, i__) = xx_ref(*ievent, i__);
1033  }
1034  i__1 = fParam_1.layerm - 1;
1035  for (layer = 1; layer <= i__1; ++layer) {
1036  i__2 = fNeur_1.neuron[layer];
1037  for (j = 1; j <= i__2; ++j) {
1038  x_ref(layer + 1, j) = 0.;
1039  i__3 = fNeur_1.neuron[layer - 1];
1040  for (i__ = 1; i__ <= i__3; ++i__) {
1041  x_ref(layer + 1, j) = x_ref(layer + 1, j) + y_ref(layer, i__)
1042  * w_ref(layer + 1, j, i__);
1043  }
1044  x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
1045  i__3 = layer + 1;
1046  Foncf(&i__3, &x_ref(layer + 1, j), &f);
1047  y_ref(layer + 1, j) = f;
1048  /* L2: */
1049  }
1050  }
1051 }
1052 
1053 #undef xx_ref
1054 #undef ww_ref
1055 #undef y_ref
1056 #undef x_ref
1057 #undef w_ref
1058 
1059 void TMVA::MethodCFMlpANN_Utils::Arret( const char* mot )
1060 {
1061  // fatal error occurred: stop execution
1062  printf("%s: %s",fg_MethodName, mot);
1063  std::exit(1);
1064 }
1065 
1066 void TMVA::MethodCFMlpANN_Utils::CollectVar( Int_t * /*nvar*/, Int_t * /*class__*/, Double_t * /*xpg*/ )
1067 {
1068  // // [smart comments to be added]
1069  // Int_t i__1;
1070 
1071  // Int_t i__;
1072  // Float_t x[201];
1073 
1074  // // Parameter adjustments
1075  // --xpg;
1076 
1077  // for (i__ = 1; i__ <= 201; ++i__) {
1078  // x[i__ - 1] = 0.0;
1079  // }
1080  // x[0] = (Float_t) (*class__);
1081  // i__1 = *nvar;
1082  // for (i__ = 1; i__ <= i__1; ++i__) {
1083  // x[i__] = (Float_t) xpg[i__];
1084  // }
1085 }
void Foncf(Int_t *i__, Double_t *u, Double_t *f)
#define w_ref(a_1, a_2, a_3)
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:162
#define ww_ref(a_1, a_2)
#define y_ref(a_1, a_2)
float Float_t
Definition: RtypesCore.h:53
#define del_ref(a_1, a_2)
const int max_nVar_
void Entree_new(Int_t *, char *, Int_t *ntrain, Int_t *ntest, Int_t *numlayer, Int_t *nodes, Int_t *numcycle, Int_t)
const int max_nLayers_
int Int_t
Definition: RtypesCore.h:41
bool Bool_t
Definition: RtypesCore.h:59
const Bool_t kFALSE
Definition: Rtypes.h:92
const int max_Events_
TStopwatch timer
Definition: pirndm.C:37
void Cout(Int_t *, Double_t *xxx)
void Out(Int_t *iii, Int_t *maxcycle)
#define xeev_ref(a_1, a_2)
const int max_nNodes_
void Cout2(Int_t *, Double_t *yyy)
void GraphNN(Int_t *ilearn, Double_t *, Double_t *, char *, Int_t)
void Train_nn(Double_t *tin2, Double_t *tout2, Int_t *ntrain, Int_t *ntest, Int_t *nvar2, Int_t *nlayer, Int_t *nodes, Int_t *ncycle)
#define x_ref(a_1, a_2)
#define delta_ref(a_1, a_2, a_3)
void Leclearn(Int_t *ktest, Double_t *tout2, Double_t *tin2)
TLine * l
Definition: textangle.C:4
Double_t Exp(Double_t x)
Definition: TMath.h:495
#define ClassImp(name)
Definition: Rtypes.h:279
double f(double x)
double Double_t
Definition: RtypesCore.h:55
void Lecev2(Int_t *ktest, Double_t *tout2, Double_t *tin2)
ClassImp(TMCParticle) void TMCParticle printf(": p=(%7.3f,%7.3f,%9.3f) ;", fPx, fPy, fPz)
void Innit(char *det, Double_t *tout2, Double_t *tin2, Int_t)
Abstract ClassifierFactory template that handles arbitrary types.
double f2(const double *x)
TF1 * f1
Definition: legend1.C:11
void DrawProgressBar(Int_t, const TString &comment="")
draws progress bar in color or B&W caution:
Definition: Timer.cxx:183
#define delw_ref(a_1, a_2, a_3)
const Bool_t kTRUE
Definition: Rtypes.h:91
void CollectVar(Int_t *nvar, Int_t *class__, Double_t *xpg)
#define xx_ref(a_1, a_2)
#define deltaww_ref(a_1, a_2)
#define delww_ref(a_1, a_2)