75 Int_t TMVA::MethodCFMlpANN_Utils::fg_100 = 100;
76 Int_t TMVA::MethodCFMlpANN_Utils::fg_0 = 0;
79 Int_t TMVA::MethodCFMlpANN_Utils::fg_999 = 999;
80 const
char* const
TMVA::MethodCFMlpANN_Utils::fg_MethodName = "--- CFMlpANN ";
86 for(i=0; i<
max_nVar_;++i) fVarn_1.xmin[i] = 0;
91 for(i=0; i<max_nNodes_;++i) fDel_1.coef[i] = 0;
93 for(i=0; i<
max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delta[i] = 0;
95 for(i=0; i<
max_nLayers_*max_nNodes_;++i) fDel_1.delww[i] = 0;
101 for(i=0; i<max_nNodes_;++i) fNeur_1.cut[i] = 0;
102 for(i=0; i<max_nLayers_*
max_nNodes_;++i) fNeur_1.deltaww[i] = 0;
103 for(i=0; i<max_nLayers_;++i) fNeur_1.neuron[i] = 0;
105 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fNeur_1.w[i] = 0;
106 for(i=0; i<max_nLayers_*
max_nNodes_;++i) fNeur_1.ww[i] = 0;
107 for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.x[i] = 0;
108 for(i=0; i<max_nLayers_*
max_nNodes_;++i) fNeur_1.y[i] = 0;
118 fParam_1.nblearn = 0;
124 fParam_1.nunilec = 0;
125 fParam_1.nunishort = 0;
126 fParam_1.nunisor = 0;
132 for(i=0; i<max_nVar_;++i) fVarn_1.xmax[i] = 0;
150 printf(
"*** CFMlpANN_f2c: Warning in Train_nn: number of training + testing" \
151 " events exceeds hardcoded maximum - reset to maximum allowed number");
152 *ntrain = *ntrain*(
max_Events_/(*ntrain + *ntest));
156 printf(
"*** CFMlpANN_f2c: ERROR in Train_nn: number of variables" \
157 " exceeds hardcoded maximum ==> abort");
161 printf(
"*** CFMlpANN_f2c: Warning in Train_nn: number of layers" \
162 " exceeds hardcoded maximum - reset to maximum allowed number");
166 printf(
"*** CFMlpANN_f2c: Warning in Train_nn: number of nodes" \
167 " exceeds hardcoded maximum - reset to maximum allowed number");
172 fVarn2_1.Create( *ntrain + *ntest, *nvar2 );
173 fVarn3_1.Create( *ntrain + *ntest, *nvar2 );
178 Entree_new(nvar2, det, ntrain, ntest, nlayer, nodes, ncycle, (
Int_t)20);
179 if (fNeur_1.neuron[fParam_1.layerm - 1] == 1) {
185 fParam_1.lclass = fNeur_1.neuron[fParam_1.layerm - 1];
187 fParam_1.nvar = fNeur_1.neuron[0];
189 Innit(det, tout2, tin2, (
Int_t)20);
203 Int_t rewrite, i__, j, ncoef;
204 Int_t ntemp, num, retrain;
211 fCost_1.ancout = 1e30;
217 fDel_1.coef[i__ - 1] = (
Float_t)0.;
220 fDel_1.temp[i__ - 1] = (
Float_t)0.;
222 fParam_1.layerm = *numlayer;
224 printf(
"Error: number of layers exceeds maximum: %i, %i ==> abort",
226 Arret(
"modification of mlpl3_param_lim.inc is needed ");
228 fParam_1.nevl = *ntrain;
229 fParam_1.nevt = *ntest;
230 fParam_1.nblearn = *numcycle;
232 fParam_1.nunilec = 10;
233 fParam_1.epsmin = 1e-10;
234 fParam_1.epsmax = 1e-4;
236 fCost_1.tolcou = 1e-6;
238 fParam_1.nunisor = 30;
239 fParam_1.nunishort = 48;
242 ULog() <<
kINFO <<
"Total number of events for training: " << fParam_1.nevl <<
Endl;
243 ULog() <<
kINFO <<
"Total number of training cycles : " << fParam_1.nblearn <<
Endl;
245 printf(
"Error: number of learning events exceeds maximum: %i, %i ==> abort",
247 Arret(
"modification of mlpl3_param_lim.inc is needed ");
250 printf(
"Error: number of testing events exceeds maximum: %i, %i ==> abort",
252 Arret(
"modification of mlpl3_param_lim.inc is needed ");
254 i__1 = fParam_1.layerm;
255 for (j = 1; j <= i__1; ++j) {
260 if (j == fParam_1.layerm && num != 2) {
263 fNeur_1.neuron[j - 1] = num;
265 i__1 = fParam_1.layerm;
266 for (j = 1; j <= i__1; ++j) {
267 ULog() <<
kINFO <<
"Number of layers for neuron(" << j <<
"): " << fNeur_1.neuron[j - 1] <<
Endl;
269 if (fNeur_1.neuron[fParam_1.layerm - 1] != 2) {
270 printf(
"Error: wrong number of classes at ouput layer: %i != 2 ==> abort\n",
271 fNeur_1.neuron[fParam_1.layerm - 1]);
274 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
275 for (j = 1; j <= i__1; ++j) {
276 fDel_1.coef[j - 1] = 1.;
278 i__1 = fParam_1.layerm;
279 for (j = 1; j <= i__1; ++j) {
280 fDel_1.temp[j - 1] = 1.;
282 fParam_1.ichoi = retrain;
283 fParam_1.ndivis = rewrite;
285 if (! (fParam_1.ichoi == 0 || fParam_1.ichoi == 1)) {
286 printf(
"Big troubles !!! \n" );
287 Arret(
"new training or continued one !");
289 if (fParam_1.ichoi == 0) {
290 ULog() <<
kINFO <<
"New training will be performed" <<
Endl;
293 printf(
"%s: New training will be continued from a weight file\n", fg_MethodName);
298 if (fDel_1.coef[i__ - 1] != (
Float_t)0.) {
303 if (fDel_1.temp[i__ - 1] != (
Float_t)0.) {
307 if (ncoef != fNeur_1.neuron[fParam_1.layerm - 1]) {
308 Arret(
" entree error code 1 : need to reported");
310 if (ntemp != fParam_1.layerm) {
311 Arret(
"entree error code 2 : need to reported");
315 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
316 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
321 Int_t i__1, i__2, i__3;
325 i__1 = fParam_1.layerm;
326 for (layer = 2; layer <= i__1; ++layer) {
327 i__2 = fNeur_1.neuron[layer - 2];
328 for (i__ = 1; i__ <= i__2; ++i__) {
329 i__3 = fNeur_1.neuron[layer - 1];
330 for (j = 1; j <= i__3; ++j) {
331 w_ref(layer, j, i__) = (Sen3a() * 2. - 1.) * .2;
332 ww_ref(layer, j) = (Sen3a() * 2. - 1.) * .2;
341 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
342 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
343 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
344 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
345 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
350 Int_t i__1, i__2, i__3;
356 i__1 = fNeur_1.neuron[0];
357 for (i__ = 1; i__ <= i__1; ++i__) {
360 i__1 = fParam_1.layerm - 1;
361 for (layer = 1; layer <= i__1; ++layer) {
362 i__2 = fNeur_1.neuron[layer];
363 for (j = 1; j <= i__2; ++j) {
364 x_ref(layer + 1, j) = 0.;
365 i__3 = fNeur_1.neuron[layer - 1];
366 for (i__ = 1; i__ <= i__3; ++i__) {
368 *
w_ref(layer + 1, j, i__) );
372 Foncf(&i__3, &
x_ref(layer + 1, j), &f);
384 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
396 i__1 = fParam_1.lclass;
397 for (k = 1; k <= i__1; ++k) {
400 i__1 = fParam_1.nvar;
401 for (i__ = 1; i__ <= i__1; ++i__) {
402 fVarn_1.xmin[i__ - 1] = 1e30;
403 fVarn_1.xmax[i__ - 1] = -fVarn_1.xmin[i__ - 1];
405 i__1 = fParam_1.nevl;
406 for (i__ = 1; i__ <= i__1; ++i__) {
407 DataInterface(tout2, tin2, &fg_100, &fg_0, &fParam_1.nevl, &fParam_1.nvar,
408 xpg, &fVarn_1.nclass[i__ - 1], &ikend);
413 CollectVar(&fParam_1.nvar, &fVarn_1.nclass[i__ - 1], xpg);
415 i__2 = fParam_1.nvar;
416 for (j = 1; j <= i__2; ++j) {
419 if (fVarn_1.iclass == 1) {
420 i__2 = fParam_1.lclass;
421 for (k = 1; k <= i__2; ++k) {
422 if (fVarn_1.nclass[i__ - 1] == k) {
427 i__2 = fParam_1.nvar;
428 for (k = 1; k <= i__2; ++k) {
429 if (
xeev_ref(i__, k) < fVarn_1.xmin[k - 1]) {
430 fVarn_1.xmin[k - 1] =
xeev_ref(i__, k);
432 if (
xeev_ref(i__, k) > fVarn_1.xmax[k - 1]) {
433 fVarn_1.xmax[k - 1] =
xeev_ref(i__, k);
438 if (fVarn_1.iclass == 1) {
439 i__2 = fParam_1.lclass;
440 for (k = 1; k <= i__2; ++k) {
441 i__1 = fParam_1.lclass;
442 for (l = 1; l <= i__1; ++
l) {
443 if (nocla[k - 1] != nocla[l - 1]) {
449 i__1 = fParam_1.nevl;
450 for (i__ = 1; i__ <= i__1; ++i__) {
451 i__2 = fParam_1.nvar;
452 for (l = 1; l <= i__2; ++
l) {
453 if (fVarn_1.xmax[l - 1] == (
Float_t)0. && fVarn_1.xmin[l - 1] == (
459 fVarn_1.xmin[l - 1]) / 2.;
461 fVarn_1.xmin[l - 1]) / 2.);
469 #define delw_ref(a_1,a_2,a_3) fDel_1.delw[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
470 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
471 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
472 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
473 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
474 #define delww_ref(a_1,a_2) fDel_1.delww[(a_2)*max_nLayers_ + a_1 - 7]
475 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
476 #define del_ref(a_1,a_2) fDel_1.del[(a_2)*max_nLayers_ + a_1 - 7]
477 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
482 Int_t i__1, i__2, i__3;
488 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
489 for (i__ = 1; i__ <= i__1; ++i__) {
490 if (fVarn_1.nclass[*ievent - 1] == i__) {
491 fNeur_1.o[i__ - 1] = 1.;
494 fNeur_1.o[i__ - 1] = -1.;
498 i__1 = fNeur_1.neuron[l - 1];
499 for (i__ = 1; i__ <= i__1; ++i__) {
501 df = (f + 1.) * (1. -
f) / (fDel_1.temp[l - 1] * 2.);
502 del_ref(l, i__) = df * (fNeur_1.o[i__ - 1] -
y_ref(l, i__)) *
503 fDel_1.coef[i__ - 1];
505 i__2 = fNeur_1.neuron[l - 2];
506 for (j = 1; j <= i__2; ++j) {
512 for (l = fParam_1.layerm - 1; l >= 2; --l) {
513 i__2 = fNeur_1.neuron[l - 1];
514 for (i__ = 1; i__ <= i__2; ++i__) {
516 i__1 = fNeur_1.neuron[
l];
517 for (k = 1; k <= i__1; ++k) {
520 Foncf(&l, &
x_ref(l, i__), &f);
521 df = (f + 1.) * (1. -
f) / (fDel_1.temp[l - 1] * 2.);
524 i__1 = fNeur_1.neuron[l - 2];
525 for (j = 1; j <= i__1; ++j) {
531 i__1 = fParam_1.layerm;
532 for (l = 2; l <= i__1; ++
l) {
533 i__2 = fNeur_1.neuron[l - 1];
534 for (i__ = 1; i__ <= i__2; ++i__) {
538 i__3 = fNeur_1.neuron[l - 2];
539 for (j = 1; j <= i__3; ++j) {
558 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
559 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
565 if (*iii == *maxcycle) {
573 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
574 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
579 Int_t i__1, i__2, i__3;
582 Int_t nevod, layer, ktest, i1, nrest;
587 Leclearn(&ktest, tout2, tin2);
588 Lecev2(&ktest, tout2, tin2);
590 printf(
" .... strange to be here (1) ... \n");
593 i__1 = fParam_1.layerm - 1;
594 for (layer = 1; layer <= i__1; ++layer) {
595 i__2 = fNeur_1.neuron[layer];
596 for (j = 1; j <= i__2; ++j) {
598 i__3 = fNeur_1.neuron[layer - 1];
599 for (i__ = 1; i__ <= i__3; ++i__) {
604 if (fParam_1.ichoi == 1) {
611 i__3 = fParam_1.nblearn;
613 Int_t num = i__3/100;
615 for (i1 = 1; i1 <= i__3; ++i1) {
617 if ( ( num>0 && (i1-1)%num == 0) || (i1 == i__3) ) timer.
DrawProgressBar( i1-1 );
619 i__2 = fParam_1.nevl;
620 for (i__ = 1; i__ <= i__2; ++i__) {
622 if (fCost_1.ieps == 2) {
623 fParam_1.eeps = Fdecroi(&kkk);
625 if (fCost_1.ieps == 1) {
626 fParam_1.eeps = fParam_1.epsmin;
629 if (fVarn_1.iclass == 2) {
636 if (fVarn_1.iclass == 1) {
637 nevod = fParam_1.nevl / fParam_1.lclass;
638 nrest = i__ % fParam_1.lclass;
639 fParam_1.ndiv = i__ / fParam_1.lclass;
641 ievent = fParam_1.ndiv + 1 + (fParam_1.lclass - nrest) *
645 ievent = fParam_1.ndiv;
653 if (i1 % fParam_1.ndivis == 0 || i1 == 1 || i1 == fParam_1.nblearn) {
656 GraphNN(&i1, &xxx, &yyy, det, (
Int_t)20);
657 Out(&i1, &fParam_1.nblearn);
659 if (xxx < fCost_1.tolcou) {
660 GraphNN(&fParam_1.nblearn, &xxx, &yyy, det, (
Int_t)20);
661 Out(&fParam_1.nblearn, &fParam_1.nblearn);
681 printf(
"Error: number of layers exceeds maximum: %i, %i ==> abort",
683 Arret(
"modification of mlpl3_param_lim.inc is needed ");
687 printf(
"Error: number of training events exceeds maximum: %i, %i ==> abort",
689 Arret(
"modification of mlpl3_param_lim.inc is needed ");
692 printf(
"Error: number of testing events exceeds maximum: %i, %i ==> abort",
694 Arret(
"modification of mlpl3_param_lim.inc is needed ");
696 if (fParam_1.lclass < fNeur_1.neuron[fParam_1.layerm - 1]) {
698 printf(
"Error: wrong number of classes at ouput layer: %i != %i ==> abort\n",
699 fNeur_1.neuron[fParam_1.layerm - 1], fParam_1.lclass);
700 Arret(
"problem needs to reported ");
704 printf(
"Error: number of variables exceeds maximum: %i, %i ==> abort",
705 fParam_1.nvar, fg_max_nVar_ );
706 Arret(
"modification of mlpl3_param_lim.inc is needed");
708 i__1 = fParam_1.layerm;
709 for (i__ = 1; i__ <= i__1; ++i__) {
712 printf(
"Error: number of neurons at layer exceeds maximum: %i, %i ==> abort",
713 i__, fg_max_nNodes_ );
717 printf(
" .... strange to be here (2) ... \n");
722 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
734 i__1 = fParam_1.nevl;
735 for (i__ = 1; i__ <= i__1; ++i__) {
737 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
738 for (j = 1; j <= i__2; ++j) {
739 if (fVarn_1.nclass[i__ - 1] == j) {
740 fNeur_1.o[j - 1] = 1.;
743 fNeur_1.o[j - 1] = -1.;
746 d__1 =
y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
747 c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
750 c__ /= (
Double_t) (fParam_1.nevl * fParam_1.lclass) * 2.;
752 fCost_1.ancout = c__;
757 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
758 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
765 Int_t jmax, k, layer, kk, nq, nr;
767 i__1 = fParam_1.nvar;
768 i__1 = fParam_1.layerm;
769 i__1 = fParam_1.layerm - 1;
770 for (layer = 1; layer <= i__1; ++layer) {
771 nq = fNeur_1.neuron[layer] / 10;
772 nr = fNeur_1.neuron[layer] - nq * 10;
780 for (k = 1; k <= i__2; ++k) {
783 if (fNeur_1.neuron[layer] < jmax) {
784 jmax = fNeur_1.neuron[layer];
801 aaa = (fParam_1.epsmin - fParam_1.epsmax) / (
Double_t) (fParam_1.nblearn *
803 bbb = fParam_1.epsmax - aaa;
804 ret_val = aaa * (
Double_t) (*i__) + bbb;
808 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
831 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
832 for (i__ = 1; i__ <= i__1; ++i__) {
838 i__1 = fParam_1.nevl;
839 for (i__ = 1; i__ <= i__1; ++i__) {
841 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
842 for (j = 1; j <= i__2; ++j) {
844 if (fVarn_1.nclass[i__ - 1] == j) {
846 xmok[j - 1] +=
y_ref(fParam_1.layerm, j);
850 xmko[j - 1] +=
y_ref(fParam_1.layerm, j);
859 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
860 for (j = 1; j <= i__1; ++j) {
861 xmok[j - 1] /= (
Double_t) nok[j - 1];
862 xmko[j - 1] /= (
Double_t) nko[j - 1];
863 fNeur_1.cut[j - 1] = (xmok[j - 1] + xmko[j - 1]) / 2.;
865 ix = fNeur_1.neuron[fParam_1.layerm - 1];
883 static Int_t fg_i1 = 3823;
884 static Int_t fg_i2 = 4006;
885 static Int_t fg_i3 = 2903;
888 Int_t k3, l3, k2, l2, k1, l1;
893 k2 = fg_i2 * j3 + fg_i3 * j2 + l3;
895 k1 = fg_i1 * j3 + fg_i2 * j2 + fg_i3 * j1 + l2;
897 fg_i1 = k1 - l1 * m12;
898 fg_i2 = k2 - l2 * m12;
899 fg_i3 = k3 - l3 * m12;
910 if (*u / fDel_1.temp[*i__ - 1] > 170.) {
911 *f = .99999999989999999;
913 else if (*u / fDel_1.temp[*i__ - 1] < -170.) {
914 *f = -.99999999989999999;
917 yy =
TMath::Exp(-(*u) / fDel_1.temp[*i__ - 1]);
918 *f = (1. - yy) / (yy + 1.);
924 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
936 i__1 = fParam_1.nevt;
937 for (i__ = 1; i__ <= i__1; ++i__) {
939 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
940 for (j = 1; j <= i__2; ++j) {
941 if (fVarn_1.mclass[i__ - 1] == j) {
942 fNeur_1.o[j - 1] = 1.;
945 fNeur_1.o[j - 1] = -1.;
948 d__1 =
y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
949 c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
952 c__ /= (
Double_t) (fParam_1.nevt * fParam_1.lclass) * 2.;
958 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
976 i__1 = fParam_1.lclass;
980 i__1 = fParam_1.nevt;
981 for (i__ = 1; i__ <= i__1; ++i__) {
982 DataInterface(tout2, tin2, &fg_999, &fg_0, &fParam_1.nevt, &fParam_1.nvar,
983 xpg, &fVarn_1.mclass[i__ - 1], &ikend);
989 i__2 = fParam_1.nvar;
990 for (j = 1; j <= i__2; ++j) {
991 xx_ref(i__, j) = xpg[j - 1];
995 i__1 = fParam_1.nevt;
996 for (i__ = 1; i__ <= i__1; ++i__) {
997 i__2 = fParam_1.nvar;
998 for (l = 1; l <= i__2; ++
l) {
999 if (fVarn_1.xmax[l - 1] == (
Float_t)0. && fVarn_1.xmin[l - 1] == (
1004 xx_ref(i__, l) =
xx_ref(i__, l) - (fVarn_1.xmax[l - 1] +
1005 fVarn_1.xmin[l - 1]) / 2.;
1006 xx_ref(i__, l) =
xx_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
1007 fVarn_1.xmin[l - 1]) / 2.);
1015 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
1016 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
1017 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
1018 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
1019 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
1024 Int_t i__1, i__2, i__3;
1030 i__1 = fNeur_1.neuron[0];
1031 for (i__ = 1; i__ <= i__1; ++i__) {
1034 i__1 = fParam_1.layerm - 1;
1035 for (layer = 1; layer <= i__1; ++layer) {
1036 i__2 = fNeur_1.neuron[layer];
1037 for (j = 1; j <= i__2; ++j) {
1038 x_ref(layer + 1, j) = 0.;
1039 i__3 = fNeur_1.neuron[layer - 1];
1040 for (i__ = 1; i__ <= i__3; ++i__) {
1042 *
w_ref(layer + 1, j, i__);
1046 Foncf(&i__3, &
x_ref(layer + 1, j), &f);
1062 printf(
"%s: %s",fg_MethodName, mot);
void Foncf(Int_t *i__, Double_t *u, Double_t *f)
#define w_ref(a_1, a_2, a_3)
MsgLogger & Endl(MsgLogger &ml)
Double_t Fdecroi(Int_t *i__)
#define del_ref(a_1, a_2)
void Entree_new(Int_t *, char *, Int_t *ntrain, Int_t *ntest, Int_t *numlayer, Int_t *nodes, Int_t *numcycle, Int_t)
void En_avant(Int_t *ievent)
void Cout(Int_t *, Double_t *xxx)
void Out(Int_t *iii, Int_t *maxcycle)
void En_arriere(Int_t *ievent)
#define xeev_ref(a_1, a_2)
void Cout2(Int_t *, Double_t *yyy)
void GraphNN(Int_t *ilearn, Double_t *, Double_t *, char *, Int_t)
void Train_nn(Double_t *tin2, Double_t *tout2, Int_t *ntrain, Int_t *ntest, Int_t *nvar2, Int_t *nlayer, Int_t *nodes, Int_t *ncycle)
#define delta_ref(a_1, a_2, a_3)
void Leclearn(Int_t *ktest, Double_t *tout2, Double_t *tin2)
void En_avant2(Int_t *ievent)
void Lecev2(Int_t *ktest, Double_t *tout2, Double_t *tin2)
ClassImp(TMCParticle) void TMCParticle printf(": p=(%7.3f,%7.3f,%9.3f) ;", fPx, fPy, fPz)
void Innit(char *det, Double_t *tout2, Double_t *tin2, Int_t)
void Arret(const char *mot)
Abstract ClassifierFactory template that handles arbitrary types.
double f2(const double *x)
void DrawProgressBar(Int_t, const TString &comment="")
draws progress bar in color or B&W caution:
#define delw_ref(a_1, a_2, a_3)
virtual ~MethodCFMlpANN_Utils()
void CollectVar(Int_t *nvar, Int_t *class__, Double_t *xpg)
#define deltaww_ref(a_1, a_2)
#define delww_ref(a_1, a_2)