77 Int_t TMVA::MethodCFMlpANN_Utils::fg_100 = 100;
78 Int_t TMVA::MethodCFMlpANN_Utils::fg_0 = 0;
81 Int_t TMVA::MethodCFMlpANN_Utils::fg_999 = 999;
82 const
char* const TMVA::MethodCFMlpANN_Utils::fg_MethodName = "--- CFMlpANN ";
88 for(i=0; i<
max_nVar_;++i) fVarn_1.xmin[i] = 0;
93 for(i=0; i<max_nNodes_;++i) fDel_1.coef[i] = 0;
95 for(i=0; i<
max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delta[i] = 0;
97 for(i=0; i<
max_nLayers_*max_nNodes_;++i) fDel_1.delww[i] = 0;
103 for(i=0; i<max_nNodes_;++i) fNeur_1.cut[i] = 0;
104 for(i=0; i<max_nLayers_*
max_nNodes_;++i) fNeur_1.deltaww[i] = 0;
105 for(i=0; i<max_nLayers_;++i) fNeur_1.neuron[i] = 0;
107 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fNeur_1.w[i] = 0;
108 for(i=0; i<max_nLayers_*
max_nNodes_;++i) fNeur_1.ww[i] = 0;
109 for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.x[i] = 0;
110 for(i=0; i<max_nLayers_*
max_nNodes_;++i) fNeur_1.y[i] = 0;
120 fParam_1.nblearn = 0;
126 fParam_1.nunilec = 0;
127 fParam_1.nunishort = 0;
128 fParam_1.nunisor = 0;
134 for(i=0; i<max_nVar_;++i) fVarn_1.xmax[i] = 0;
152 printf(
"*** CFMlpANN_f2c: Warning in Train_nn: number of training + testing" \
153 " events exceeds hardcoded maximum - reset to maximum allowed number");
154 *ntrain = *ntrain*(
max_Events_/(*ntrain + *ntest));
158 printf(
"*** CFMlpANN_f2c: ERROR in Train_nn: number of variables" \
159 " exceeds hardcoded maximum ==> abort");
163 printf(
"*** CFMlpANN_f2c: Warning in Train_nn: number of layers" \
164 " exceeds hardcoded maximum - reset to maximum allowed number");
168 printf(
"*** CFMlpANN_f2c: Warning in Train_nn: number of nodes" \
169 " exceeds hardcoded maximum - reset to maximum allowed number");
174 fVarn2_1.Create( *ntrain + *ntest, *nvar2 );
175 fVarn3_1.Create( *ntrain + *ntest, *nvar2 );
180 Entree_new(nvar2, det, ntrain, ntest, nlayer, nodes, ncycle, (
Int_t)20);
181 if (fNeur_1.neuron[fParam_1.layerm - 1] == 1) {
187 fParam_1.lclass = fNeur_1.neuron[fParam_1.layerm - 1];
189 fParam_1.nvar = fNeur_1.neuron[0];
191 Innit(det, tout2, tin2, (
Int_t)20);
205 Int_t rewrite, i__, j, ncoef;
206 Int_t ntemp, num, retrain;
213 fCost_1.ancout = 1e30;
219 fDel_1.coef[i__ - 1] = (
Float_t)0.;
222 fDel_1.temp[i__ - 1] = (
Float_t)0.;
224 fParam_1.layerm = *numlayer;
226 printf(
"Error: number of layers exceeds maximum: %i, %i ==> abort",
228 Arret(
"modification of mlpl3_param_lim.inc is needed ");
230 fParam_1.nevl = *ntrain;
231 fParam_1.nevt = *ntest;
232 fParam_1.nblearn = *numcycle;
234 fParam_1.nunilec = 10;
235 fParam_1.epsmin = 1e-10;
236 fParam_1.epsmax = 1e-4;
238 fCost_1.tolcou = 1e-6;
240 fParam_1.nunisor = 30;
241 fParam_1.nunishort = 48;
244 ULog() <<
kINFO <<
"Total number of events for training: " << fParam_1.nevl <<
Endl;
245 ULog() <<
kINFO <<
"Total number of training cycles : " << fParam_1.nblearn <<
Endl;
247 printf(
"Error: number of learning events exceeds maximum: %i, %i ==> abort",
249 Arret(
"modification of mlpl3_param_lim.inc is needed ");
252 printf(
"Error: number of testing events exceeds maximum: %i, %i ==> abort",
254 Arret(
"modification of mlpl3_param_lim.inc is needed ");
256 i__1 = fParam_1.layerm;
257 for (j = 1; j <= i__1; ++j) {
262 if (j == fParam_1.layerm && num != 2) {
265 fNeur_1.neuron[j - 1] = num;
267 i__1 = fParam_1.layerm;
268 for (j = 1; j <= i__1; ++j) {
269 ULog() <<
kINFO <<
"Number of layers for neuron(" << j <<
"): " << fNeur_1.neuron[j - 1] <<
Endl;
271 if (fNeur_1.neuron[fParam_1.layerm - 1] != 2) {
272 printf(
"Error: wrong number of classes at ouput layer: %i != 2 ==> abort\n",
273 fNeur_1.neuron[fParam_1.layerm - 1]);
276 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
277 for (j = 1; j <= i__1; ++j) {
278 fDel_1.coef[j - 1] = 1.;
280 i__1 = fParam_1.layerm;
281 for (j = 1; j <= i__1; ++j) {
282 fDel_1.temp[j - 1] = 1.;
284 fParam_1.ichoi = retrain;
285 fParam_1.ndivis = rewrite;
287 if (! (fParam_1.ichoi == 0 || fParam_1.ichoi == 1)) {
288 printf(
"Big troubles !!! \n" );
289 Arret(
"new training or continued one !");
291 if (fParam_1.ichoi == 0) {
292 ULog() <<
kINFO <<
"New training will be performed" <<
Endl;
295 printf(
"%s: New training will be continued from a weight file\n", fg_MethodName);
300 if (fDel_1.coef[i__ - 1] != (
Float_t)0.) {
305 if (fDel_1.temp[i__ - 1] != (
Float_t)0.) {
309 if (ncoef != fNeur_1.neuron[fParam_1.layerm - 1]) {
310 Arret(
" entree error code 1 : need to reported");
312 if (ntemp != fParam_1.layerm) {
313 Arret(
"entree error code 2 : need to reported");
317 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
318 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
323 Int_t i__1, i__2, i__3;
327 i__1 = fParam_1.layerm;
328 for (layer = 2; layer <= i__1; ++layer) {
329 i__2 = fNeur_1.neuron[layer - 2];
330 for (i__ = 1; i__ <= i__2; ++i__) {
331 i__3 = fNeur_1.neuron[layer - 1];
332 for (j = 1; j <= i__3; ++j) {
333 w_ref(layer, j, i__) = (Sen3a() * 2. - 1.) * .2;
334 ww_ref(layer, j) = (Sen3a() * 2. - 1.) * .2;
343 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
344 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
345 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
346 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
347 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
352 Int_t i__1, i__2, i__3;
358 i__1 = fNeur_1.neuron[0];
359 for (i__ = 1; i__ <= i__1; ++i__) {
362 i__1 = fParam_1.layerm - 1;
363 for (layer = 1; layer <= i__1; ++layer) {
364 i__2 = fNeur_1.neuron[layer];
365 for (j = 1; j <= i__2; ++j) {
366 x_ref(layer + 1, j) = 0.;
367 i__3 = fNeur_1.neuron[layer - 1];
368 for (i__ = 1; i__ <= i__3; ++i__) {
370 *
w_ref(layer + 1, j, i__) );
374 Foncf(&i__3, &
x_ref(layer + 1, j), &f);
386 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
398 i__1 = fParam_1.lclass;
399 for (k = 1; k <= i__1; ++k) {
402 i__1 = fParam_1.nvar;
403 for (i__ = 1; i__ <= i__1; ++i__) {
404 fVarn_1.xmin[i__ - 1] = 1e30;
405 fVarn_1.xmax[i__ - 1] = -fVarn_1.xmin[i__ - 1];
407 i__1 = fParam_1.nevl;
408 for (i__ = 1; i__ <= i__1; ++i__) {
409 DataInterface(tout2, tin2, &fg_100, &fg_0, &fParam_1.nevl, &fParam_1.nvar,
410 xpg, &fVarn_1.nclass[i__ - 1], &ikend);
415 CollectVar(&fParam_1.nvar, &fVarn_1.nclass[i__ - 1], xpg);
417 i__2 = fParam_1.nvar;
418 for (j = 1; j <= i__2; ++j) {
421 if (fVarn_1.iclass == 1) {
422 i__2 = fParam_1.lclass;
423 for (k = 1; k <= i__2; ++k) {
424 if (fVarn_1.nclass[i__ - 1] == k) {
429 i__2 = fParam_1.nvar;
430 for (k = 1; k <= i__2; ++k) {
431 if (
xeev_ref(i__, k) < fVarn_1.xmin[k - 1]) {
432 fVarn_1.xmin[k - 1] =
xeev_ref(i__, k);
434 if (
xeev_ref(i__, k) > fVarn_1.xmax[k - 1]) {
435 fVarn_1.xmax[k - 1] =
xeev_ref(i__, k);
440 if (fVarn_1.iclass == 1) {
441 i__2 = fParam_1.lclass;
442 for (k = 1; k <= i__2; ++k) {
443 i__1 = fParam_1.lclass;
444 for (l = 1; l <= i__1; ++
l) {
445 if (nocla[k - 1] != nocla[l - 1]) {
451 i__1 = fParam_1.nevl;
452 for (i__ = 1; i__ <= i__1; ++i__) {
453 i__2 = fParam_1.nvar;
454 for (l = 1; l <= i__2; ++
l) {
455 if (fVarn_1.xmax[l - 1] == (
Float_t)0. && fVarn_1.xmin[l - 1] == (
461 fVarn_1.xmin[l - 1]) / 2.;
463 fVarn_1.xmin[l - 1]) / 2.);
471 #define delw_ref(a_1,a_2,a_3) fDel_1.delw[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
472 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
473 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
474 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
475 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
476 #define delww_ref(a_1,a_2) fDel_1.delww[(a_2)*max_nLayers_ + a_1 - 7]
477 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
478 #define del_ref(a_1,a_2) fDel_1.del[(a_2)*max_nLayers_ + a_1 - 7]
479 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
484 Int_t i__1, i__2, i__3;
490 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
491 for (i__ = 1; i__ <= i__1; ++i__) {
492 if (fVarn_1.nclass[*ievent - 1] == i__) {
493 fNeur_1.o[i__ - 1] = 1.;
496 fNeur_1.o[i__ - 1] = -1.;
500 i__1 = fNeur_1.neuron[l - 1];
501 for (i__ = 1; i__ <= i__1; ++i__) {
503 df = (f + 1.) * (1. -
f) / (fDel_1.temp[l - 1] * 2.);
504 del_ref(l, i__) = df * (fNeur_1.o[i__ - 1] -
y_ref(l, i__)) *
505 fDel_1.coef[i__ - 1];
507 i__2 = fNeur_1.neuron[l - 2];
508 for (j = 1; j <= i__2; ++j) {
514 for (l = fParam_1.layerm - 1; l >= 2; --l) {
515 i__2 = fNeur_1.neuron[l - 1];
516 for (i__ = 1; i__ <= i__2; ++i__) {
518 i__1 = fNeur_1.neuron[
l];
519 for (k = 1; k <= i__1; ++k) {
522 Foncf(&l, &
x_ref(l, i__), &f);
523 df = (f + 1.) * (1. -
f) / (fDel_1.temp[l - 1] * 2.);
526 i__1 = fNeur_1.neuron[l - 2];
527 for (j = 1; j <= i__1; ++j) {
533 i__1 = fParam_1.layerm;
534 for (l = 2; l <= i__1; ++
l) {
535 i__2 = fNeur_1.neuron[l - 1];
536 for (i__ = 1; i__ <= i__2; ++i__) {
540 i__3 = fNeur_1.neuron[l - 2];
541 for (j = 1; j <= i__3; ++j) {
560 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
561 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
567 if (*iii == *maxcycle) {
575 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
576 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
581 Int_t i__1, i__2, i__3;
584 Int_t nevod, layer, ktest, i1, nrest;
589 Leclearn(&ktest, tout2, tin2);
590 Lecev2(&ktest, tout2, tin2);
592 printf(
" .... strange to be here (1) ... \n");
595 i__1 = fParam_1.layerm - 1;
596 for (layer = 1; layer <= i__1; ++layer) {
597 i__2 = fNeur_1.neuron[layer];
598 for (j = 1; j <= i__2; ++j) {
600 i__3 = fNeur_1.neuron[layer - 1];
601 for (i__ = 1; i__ <= i__3; ++i__) {
606 if (fParam_1.ichoi == 1) {
613 i__3 = fParam_1.nblearn;
615 Int_t num = i__3/100;
617 for (i1 = 1; i1 <= i__3; ++i1) {
619 if ( ( num>0 && (i1-1)%num == 0) || (i1 == i__3) ) timer.
DrawProgressBar( i1-1 );
621 i__2 = fParam_1.nevl;
622 for (i__ = 1; i__ <= i__2; ++i__) {
624 if (fCost_1.ieps == 2) {
625 fParam_1.eeps = Fdecroi(&kkk);
627 if (fCost_1.ieps == 1) {
628 fParam_1.eeps = fParam_1.epsmin;
631 if (fVarn_1.iclass == 2) {
638 if (fVarn_1.iclass == 1) {
639 nevod = fParam_1.nevl / fParam_1.lclass;
640 nrest = i__ % fParam_1.lclass;
641 fParam_1.ndiv = i__ / fParam_1.lclass;
643 ievent = fParam_1.ndiv + 1 + (fParam_1.lclass - nrest) *
647 ievent = fParam_1.ndiv;
655 if (i1 % fParam_1.ndivis == 0 || i1 == 1 || i1 == fParam_1.nblearn) {
658 GraphNN(&i1, &xxx, &yyy, det, (
Int_t)20);
659 Out(&i1, &fParam_1.nblearn);
661 if (xxx < fCost_1.tolcou) {
662 GraphNN(&fParam_1.nblearn, &xxx, &yyy, det, (
Int_t)20);
663 Out(&fParam_1.nblearn, &fParam_1.nblearn);
683 printf(
"Error: number of layers exceeds maximum: %i, %i ==> abort",
685 Arret(
"modification of mlpl3_param_lim.inc is needed ");
689 printf(
"Error: number of training events exceeds maximum: %i, %i ==> abort",
691 Arret(
"modification of mlpl3_param_lim.inc is needed ");
694 printf(
"Error: number of testing events exceeds maximum: %i, %i ==> abort",
696 Arret(
"modification of mlpl3_param_lim.inc is needed ");
698 if (fParam_1.lclass < fNeur_1.neuron[fParam_1.layerm - 1]) {
700 printf(
"Error: wrong number of classes at ouput layer: %i != %i ==> abort\n",
701 fNeur_1.neuron[fParam_1.layerm - 1], fParam_1.lclass);
702 Arret(
"problem needs to reported ");
706 printf(
"Error: number of variables exceeds maximum: %i, %i ==> abort",
707 fParam_1.nvar, fg_max_nVar_ );
708 Arret(
"modification of mlpl3_param_lim.inc is needed");
710 i__1 = fParam_1.layerm;
711 for (i__ = 1; i__ <= i__1; ++i__) {
714 printf(
"Error: number of neurons at layer exceeds maximum: %i, %i ==> abort",
715 i__, fg_max_nNodes_ );
719 printf(
" .... strange to be here (2) ... \n");
724 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
736 i__1 = fParam_1.nevl;
737 for (i__ = 1; i__ <= i__1; ++i__) {
739 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
740 for (j = 1; j <= i__2; ++j) {
741 if (fVarn_1.nclass[i__ - 1] == j) {
742 fNeur_1.o[j - 1] = 1.;
745 fNeur_1.o[j - 1] = -1.;
748 d__1 =
y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
749 c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
752 c__ /= (
Double_t) (fParam_1.nevl * fParam_1.lclass) * 2.;
754 fCost_1.ancout = c__;
759 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
760 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
767 Int_t jmax, k, layer, kk, nq, nr;
769 i__1 = fParam_1.nvar;
770 i__1 = fParam_1.layerm;
771 i__1 = fParam_1.layerm - 1;
772 for (layer = 1; layer <= i__1; ++layer) {
773 nq = fNeur_1.neuron[layer] / 10;
774 nr = fNeur_1.neuron[layer] - nq * 10;
782 for (k = 1; k <= i__2; ++k) {
785 if (fNeur_1.neuron[layer] < jmax) {
786 jmax = fNeur_1.neuron[layer];
803 aaa = (fParam_1.epsmin - fParam_1.epsmax) / (
Double_t) (fParam_1.nblearn *
805 bbb = fParam_1.epsmax - aaa;
806 ret_val = aaa * (
Double_t) (*i__) + bbb;
810 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
833 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
834 for (i__ = 1; i__ <= i__1; ++i__) {
840 i__1 = fParam_1.nevl;
841 for (i__ = 1; i__ <= i__1; ++i__) {
843 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
844 for (j = 1; j <= i__2; ++j) {
846 if (fVarn_1.nclass[i__ - 1] == j) {
848 xmok[j - 1] +=
y_ref(fParam_1.layerm, j);
852 xmko[j - 1] +=
y_ref(fParam_1.layerm, j);
861 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
862 for (j = 1; j <= i__1; ++j) {
863 xmok[j - 1] /= (
Double_t) nok[j - 1];
864 xmko[j - 1] /= (
Double_t) nko[j - 1];
865 fNeur_1.cut[j - 1] = (xmok[j - 1] + xmko[j - 1]) / 2.;
867 ix = fNeur_1.neuron[fParam_1.layerm - 1];
885 static Int_t fg_i1 = 3823;
886 static Int_t fg_i2 = 4006;
887 static Int_t fg_i3 = 2903;
895 k2 = fg_i2 * j3 + fg_i3 * j2 + l3;
897 k1 = fg_i1 * j3 + fg_i2 * j2 + fg_i3 * j1 + l2;
899 fg_i1 = k1 - l1 * m12;
900 fg_i2 = k2 - l2 * m12;
901 fg_i3 = k3 - l3 * m12;
912 if (*u / fDel_1.temp[*i__ - 1] > 170.) {
913 *f = .99999999989999999;
915 else if (*u / fDel_1.temp[*i__ - 1] < -170.) {
916 *f = -.99999999989999999;
919 yy =
TMath::Exp(-(*u) / fDel_1.temp[*i__ - 1]);
920 *f = (1. - yy) / (yy + 1.);
926 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
938 i__1 = fParam_1.nevt;
939 for (i__ = 1; i__ <= i__1; ++i__) {
941 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
942 for (j = 1; j <= i__2; ++j) {
943 if (fVarn_1.mclass[i__ - 1] == j) {
944 fNeur_1.o[j - 1] = 1.;
947 fNeur_1.o[j - 1] = -1.;
950 d__1 =
y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
951 c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
954 c__ /= (
Double_t) (fParam_1.nevt * fParam_1.lclass) * 2.;
960 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
978 i__1 = fParam_1.lclass;
982 i__1 = fParam_1.nevt;
983 for (i__ = 1; i__ <= i__1; ++i__) {
984 DataInterface(tout2, tin2, &fg_999, &fg_0, &fParam_1.nevt, &fParam_1.nvar,
985 xpg, &fVarn_1.mclass[i__ - 1], &ikend);
991 i__2 = fParam_1.nvar;
992 for (j = 1; j <= i__2; ++j) {
993 xx_ref(i__, j) = xpg[j - 1];
997 i__1 = fParam_1.nevt;
998 for (i__ = 1; i__ <= i__1; ++i__) {
999 i__2 = fParam_1.nvar;
1000 for (l = 1; l <= i__2; ++
l) {
1001 if (fVarn_1.xmax[l - 1] == (
Float_t)0. && fVarn_1.xmin[l - 1] == (
1006 xx_ref(i__, l) =
xx_ref(i__, l) - (fVarn_1.xmax[l - 1] +
1007 fVarn_1.xmin[l - 1]) / 2.;
1008 xx_ref(i__, l) =
xx_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
1009 fVarn_1.xmin[l - 1]) / 2.);
1017 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
1018 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
1019 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
1020 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
1021 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
1026 Int_t i__1, i__2, i__3;
1032 i__1 = fNeur_1.neuron[0];
1033 for (i__ = 1; i__ <= i__1; ++i__) {
1036 i__1 = fParam_1.layerm - 1;
1037 for (layer = 1; layer <= i__1; ++layer) {
1038 i__2 = fNeur_1.neuron[layer];
1039 for (j = 1; j <= i__2; ++j) {
1040 x_ref(layer + 1, j) = 0.;
1041 i__3 = fNeur_1.neuron[layer - 1];
1042 for (i__ = 1; i__ <= i__3; ++i__) {
1044 *
w_ref(layer + 1, j, i__);
1048 Foncf(&i__3, &
x_ref(layer + 1, j), &f);
1064 printf(
"%s: %s",fg_MethodName, mot);
void Foncf(Int_t *i__, Double_t *u, Double_t *f)
#define w_ref(a_1, a_2, a_3)
MsgLogger & Endl(MsgLogger &ml)
Double_t Fdecroi(Int_t *i__)
#define del_ref(a_1, a_2)
void Entree_new(Int_t *, char *, Int_t *ntrain, Int_t *ntest, Int_t *numlayer, Int_t *nodes, Int_t *numcycle, Int_t)
void En_avant(Int_t *ievent)
void Cout(Int_t *, Double_t *xxx)
void Out(Int_t *iii, Int_t *maxcycle)
void En_arriere(Int_t *ievent)
TLine l1(2.5, 4.5, 15.5, 4.5)
#define xeev_ref(a_1, a_2)
void Cout2(Int_t *, Double_t *yyy)
void GraphNN(Int_t *ilearn, Double_t *, Double_t *, char *, Int_t)
void Train_nn(Double_t *tin2, Double_t *tout2, Int_t *ntrain, Int_t *ntest, Int_t *nvar2, Int_t *nlayer, Int_t *nodes, Int_t *ncycle)
#define delta_ref(a_1, a_2, a_3)
void Leclearn(Int_t *ktest, Double_t *tout2, Double_t *tin2)
void En_avant2(Int_t *ievent)
void Lecev2(Int_t *ktest, Double_t *tout2, Double_t *tin2)
ClassImp(TMCParticle) void TMCParticle printf(": p=(%7.3f,%7.3f,%9.3f) ;", fPx, fPy, fPz)
void Innit(char *det, Double_t *tout2, Double_t *tin2, Int_t)
void Arret(const char *mot)
void DrawProgressBar(Int_t, const TString &comment="")
draws progress bar in color or B&W caution:
#define delw_ref(a_1, a_2, a_3)
virtual ~MethodCFMlpANN_Utils()
void CollectVar(Int_t *nvar, Int_t *class__, Double_t *xpg)
#define deltaww_ref(a_1, a_2)
#define delww_ref(a_1, a_2)