90 for(i=0; i<max_nVar_;++i)
fVarn_1.xmin[i] = 0;
95 for(i=0; i<max_nNodes_;++i)
fDel_1.coef[i] = 0;
96 for(i=0; i<max_nLayers_*max_nNodes_;++i)
fDel_1.del[i] = 0;
97 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i)
fDel_1.delta[i] = 0;
98 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i)
fDel_1.delw[i] = 0;
99 for(i=0; i<max_nLayers_*max_nNodes_;++i)
fDel_1.delww[i] = 0;
103 for(i=0; i<max_nLayers_;++i)
fDel_1.temp[i] = 0;
105 for(i=0; i<max_nNodes_;++i)
fNeur_1.cut[i] = 0;
106 for(i=0; i<max_nLayers_*max_nNodes_;++i)
fNeur_1.deltaww[i] = 0;
107 for(i=0; i<max_nLayers_;++i)
fNeur_1.neuron[i] = 0;
108 for(i=0; i<max_nNodes_;++i)
fNeur_1.o[i] = 0;
109 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i)
fNeur_1.w[i] = 0;
110 for(i=0; i<max_nLayers_*max_nNodes_;++i)
fNeur_1.ww[i] = 0;
111 for(i=0; i<max_nLayers_*max_nNodes_;++i)
fNeur_1.x[i] = 0;
112 for(i=0; i<max_nLayers_*max_nNodes_;++i)
fNeur_1.y[i] = 0;
134 for(i=0; i<max_Events_;++i)
fVarn_1.mclass[i] = 0;
135 for(i=0; i<max_Events_;++i)
fVarn_1.nclass[i] = 0;
136 for(i=0; i<max_nVar_;++i)
fVarn_1.xmax[i] = 0;
158 printf(
"*** CFMlpANN_f2c: Warning in Train_nn: number of training + testing" \
159 " events exceeds hardcoded maximum - reset to maximum allowed number");
163 if (*
nvar2 > max_nVar_) {
164 printf(
"*** CFMlpANN_f2c: ERROR in Train_nn: number of variables" \
165 " exceeds hardcoded maximum ==> abort");
168 if (*
nlayer > max_nLayers_) {
169 printf(
"*** CFMlpANN_f2c: Warning in Train_nn: number of layers" \
170 " exceeds hardcoded maximum - reset to maximum allowed number");
173 if (*nodes > max_nNodes_) {
174 printf(
"*** CFMlpANN_f2c: Warning in Train_nn: number of nodes" \
175 " exceeds hardcoded maximum - reset to maximum allowed number");
176 *nodes = max_nNodes_;
187 if (fNeur_1.neuron[fParam_1.layerm - 1] == 1) {
193 fParam_1.lclass = fNeur_1.neuron[fParam_1.layerm - 1];
195 fParam_1.nvar = fNeur_1.neuron[0];
221 fCost_1.ancout = 1
e30;
226 for (
i__ = 1;
i__ <= max_nNodes_; ++
i__) {
229 for (
i__ = 1;
i__ <= max_nLayers_; ++
i__) {
233 if (fParam_1.layerm > max_nLayers_) {
234 printf(
"Error: number of layers exceeds maximum: %i, %i ==> abort",
235 fParam_1.layerm, max_nLayers_ );
236 Arret(
"modification of mlpl3_param_lim.inc is needed ");
239 fParam_1.nevt = *
ntest;
242 fParam_1.nunilec = 10;
243 fParam_1.epsmin = 1
e-10;
244 fParam_1.epsmax = 1
e-4;
246 fCost_1.tolcou = 1
e-6;
248 fParam_1.nunisor = 30;
249 fParam_1.nunishort = 48;
252 ULog() << kINFO <<
"Total number of events for training: " << fParam_1.nevl <<
Endl;
253 ULog() << kINFO <<
"Total number of training cycles : " << fParam_1.nblearn <<
Endl;
254 if (fParam_1.nevl > max_Events_) {
255 printf(
"Error: number of learning events exceeds maximum: %i, %i ==> abort",
256 fParam_1.nevl, max_Events_ );
257 Arret(
"modification of mlpl3_param_lim.inc is needed ");
259 if (fParam_1.nevt > max_Events_) {
260 printf(
"Error: number of testing events exceeds maximum: %i, %i ==> abort",
261 fParam_1.nevt, max_Events_ );
262 Arret(
"modification of mlpl3_param_lim.inc is needed ");
264 i__1 = fParam_1.layerm;
270 if (
j == fParam_1.layerm && num != 2) {
273 fNeur_1.neuron[
j - 1] = num;
275 i__1 = fParam_1.layerm;
277 ULog() << kINFO <<
"Number of layers for neuron(" <<
j <<
"): " << fNeur_1.neuron[
j - 1] <<
Endl;
279 if (fNeur_1.neuron[fParam_1.layerm - 1] != 2) {
280 printf(
"Error: wrong number of classes at output layer: %i != 2 ==> abort\n",
281 fNeur_1.neuron[fParam_1.layerm - 1]);
284 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
286 fDel_1.coef[
j - 1] = 1.;
288 i__1 = fParam_1.layerm;
290 fDel_1.temp[
j - 1] = 1.;
295 if (! (fParam_1.ichoi == 0 || fParam_1.ichoi == 1)) {
296 printf(
"Big troubles !!! \n" );
297 Arret(
"new training or continued one !");
299 if (fParam_1.ichoi == 0) {
300 ULog() << kINFO <<
"New training will be performed" <<
Endl;
303 printf(
"%s: New training will be continued from a weight file\n", fg_MethodName);
307 for (
i__ = 1;
i__ <= max_nNodes_; ++
i__) {
312 for (
i__ = 1;
i__ <= max_nLayers_; ++
i__) {
317 if (
ncoef != fNeur_1.neuron[fParam_1.layerm - 1]) {
318 Arret(
" entree error code 1 : need to reported");
320 if (
ntemp != fParam_1.layerm) {
321 Arret(
"entree error code 2 : need to reported");
325#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
326#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
337 i__1 = fParam_1.layerm;
353#define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
354#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
355#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
356#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
357#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
370 i__1 = fNeur_1.neuron[0];
374 i__1 = fParam_1.layerm - 1;
398#define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
412 i__1 = fParam_1.lclass;
413 for (k = 1; k <=
i__1; ++k) {
416 i__1 = fParam_1.nvar;
418 fVarn_1.xmin[
i__ - 1] = 1
e30;
419 fVarn_1.xmax[
i__ - 1] = -fVarn_1.xmin[
i__ - 1];
421 i__1 = fParam_1.nevl;
423 DataInterface(
tout2,
tin2, &fg_100, &fg_0, &fParam_1.nevl, &fParam_1.nvar,
429 CollectVar(&fParam_1.nvar, &fVarn_1.nclass[
i__ - 1],
xpg);
431 i__2 = fParam_1.nvar;
435 if (fVarn_1.iclass == 1) {
436 i__2 = fParam_1.lclass;
437 for (k = 1; k <=
i__2; ++k) {
438 if (fVarn_1.nclass[
i__ - 1] == k) {
443 i__2 = fParam_1.nvar;
444 for (k = 1; k <=
i__2; ++k) {
454 if (fVarn_1.iclass == 1) {
455 i__2 = fParam_1.lclass;
456 for (k = 1; k <=
i__2; ++k) {
457 i__1 = fParam_1.lclass;
465 i__1 = fParam_1.nevl;
467 i__2 = fParam_1.nvar;
469 if (fVarn_1.xmax[
l - 1] == (
Float_t)0. && fVarn_1.xmin[
l - 1] == (
475 fVarn_1.xmin[
l - 1]) / 2.;
477 fVarn_1.xmin[
l - 1]) / 2.);
485#define delw_ref(a_1,a_2,a_3) fDel_1.delw[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
486#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
487#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
488#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
489#define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
490#define delww_ref(a_1,a_2) fDel_1.delww[(a_2)*max_nLayers_ + a_1 - 7]
491#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
492#define del_ref(a_1,a_2) fDel_1.del[(a_2)*max_nLayers_ + a_1 - 7]
493#define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
506 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
508 if (fVarn_1.nclass[*
ievent - 1] ==
i__) {
509 fNeur_1.o[
i__ - 1] = 1.;
512 fNeur_1.o[
i__ - 1] = -1.;
516 i__1 = fNeur_1.neuron[
l - 1];
519 df = (
f + 1.) * (1. -
f) / (fDel_1.temp[
l - 1] * 2.);
521 fDel_1.coef[
i__ - 1];
523 i__2 = fNeur_1.neuron[
l - 2];
530 for (
l = fParam_1.layerm - 1;
l >= 2; --
l) {
531 i__2 = fNeur_1.neuron[
l - 1];
534 i__1 = fNeur_1.neuron[
l];
535 for (k = 1; k <=
i__1; ++k) {
539 df = (
f + 1.) * (1. -
f) / (fDel_1.temp[
l - 1] * 2.);
542 i__1 = fNeur_1.neuron[
l - 2];
549 i__1 = fParam_1.layerm;
551 i__2 = fNeur_1.neuron[
l - 1];
556 i__3 = fNeur_1.neuron[
l - 2];
576#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
577#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
593#define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
594#define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
612 printf(
" .... strange to be here (1) ... \n");
615 i__1 = fParam_1.layerm - 1;
626 if (fParam_1.ichoi == 1) {
633 i__3 = fParam_1.nblearn;
639 if ( ( num>0 && (
i1-1)%num == 0) || (
i1 ==
i__3) )
timer.DrawProgressBar(
i1-1 );
641 i__2 = fParam_1.nevl;
644 if (fCost_1.ieps == 2) {
645 fParam_1.eeps = Fdecroi(&
kkk);
647 if (fCost_1.ieps == 1) {
648 fParam_1.eeps = fParam_1.epsmin;
651 if (fVarn_1.iclass == 2) {
658 if (fVarn_1.iclass == 1) {
659 nevod = fParam_1.nevl / fParam_1.lclass;
661 fParam_1.ndiv =
i__ / fParam_1.lclass;
663 ievent = fParam_1.ndiv + 1 + (fParam_1.lclass -
nrest) *
675 if (
i1 % fParam_1.ndivis == 0 ||
i1 == 1 ||
i1 == fParam_1.nblearn) {
679 Out(&
i1, &fParam_1.nblearn);
681 if (
xxx < fCost_1.tolcou) {
683 Out(&fParam_1.nblearn, &fParam_1.nblearn);
703 if (fParam_1.layerm > max_nLayers_) {
705 printf(
"Error: number of layers exceeds maximum: %i, %i ==> abort",
706 fParam_1.layerm, max_nLayers_ );
707 Arret(
"modification of mlpl3_param_lim.inc is needed ");
709 if (fParam_1.nevl > max_Events_) {
711 printf(
"Error: number of training events exceeds maximum: %i, %i ==> abort",
712 fParam_1.nevl, max_Events_ );
713 Arret(
"modification of mlpl3_param_lim.inc is needed ");
715 if (fParam_1.nevt > max_Events_) {
716 printf(
"Error: number of testing events exceeds maximum: %i, %i ==> abort",
717 fParam_1.nevt, max_Events_ );
718 Arret(
"modification of mlpl3_param_lim.inc is needed ");
720 if (fParam_1.lclass < fNeur_1.neuron[fParam_1.layerm - 1]) {
722 printf(
"Error: wrong number of classes at ouput layer: %i != %i ==> abort\n",
723 fNeur_1.neuron[fParam_1.layerm - 1], fParam_1.lclass);
724 Arret(
"problem needs to reported ");
726 if (fParam_1.nvar > max_nVar_) {
728 printf(
"Error: number of variables exceeds maximum: %i, %i ==> abort",
729 fParam_1.nvar, fg_max_nVar_ );
730 Arret(
"modification of mlpl3_param_lim.inc is needed");
732 i__1 = fParam_1.layerm;
734 if (fNeur_1.neuron[
i__ - 1] > max_nNodes_) {
736 printf(
"Error: number of neurons at layer exceeds maximum: %i, %i ==> abort",
737 i__, fg_max_nNodes_ );
741 printf(
" .... strange to be here (2) ... \n");
746#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
760 i__1 = fParam_1.nevl;
763 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
765 if (fVarn_1.nclass[
i__ - 1] ==
j) {
766 fNeur_1.o[
j - 1] = 1.;
769 fNeur_1.o[
j - 1] = -1.;
772 d__1 =
y_ref(fParam_1.layerm,
j) - fNeur_1.o[
j - 1];
776 c__ /= (
Double_t) (fParam_1.nevl * fParam_1.lclass) * 2.;
778 fCost_1.ancout =
c__;
783#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
784#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
795 i__1 = fParam_1.nvar;
796 i__1 = fParam_1.layerm;
797 i__1 = fParam_1.layerm - 1;
799 nq = fNeur_1.neuron[
layer] / 10;
808 for (k = 1; k <=
i__2; ++k) {
831 aaa = (fParam_1.epsmin - fParam_1.epsmax) / (
Double_t) (fParam_1.nblearn *
833 bbb = fParam_1.epsmax -
aaa;
838#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
863 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
870 i__1 = fParam_1.nevl;
873 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
876 if (fVarn_1.nclass[
i__ - 1] ==
j) {
891 i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
895 fNeur_1.cut[
j - 1] = (
xmok[
j - 1] +
xmko[
j - 1]) / 2.;
897 ix = fNeur_1.neuron[fParam_1.layerm - 1];
945 if (*
u / fDel_1.temp[*
i__ - 1] > 170.) {
946 *
f = .99999999989999999;
948 else if (*
u / fDel_1.temp[*
i__ - 1] < -170.) {
949 *
f = -.99999999989999999;
953 *
f = (1. -
yy) / (
yy + 1.);
959#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
973 i__1 = fParam_1.nevt;
976 i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
978 if (fVarn_1.mclass[
i__ - 1] ==
j) {
979 fNeur_1.o[
j - 1] = 1.;
982 fNeur_1.o[
j - 1] = -1.;
985 d__1 =
y_ref(fParam_1.layerm,
j) - fNeur_1.o[
j - 1];
989 c__ /= (
Double_t) (fParam_1.nevt * fParam_1.lclass) * 2.;
995#define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
1015 i__1 = fParam_1.lclass;
1019 i__1 = fParam_1.nevt;
1021 DataInterface(
tout2,
tin2, &fg_999, &fg_0, &fParam_1.nevt, &fParam_1.nvar,
1028 i__2 = fParam_1.nvar;
1034 i__1 = fParam_1.nevt;
1036 i__2 = fParam_1.nvar;
1038 if (fVarn_1.xmax[
l - 1] == (
Float_t)0. && fVarn_1.xmin[
l - 1] == (
1044 fVarn_1.xmin[
l - 1]) / 2.;
1046 fVarn_1.xmin[
l - 1]) / 2.);
1054#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
1055#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
1056#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
1057#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
1058#define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
1071 i__1 = fNeur_1.neuron[0];
1075 i__1 = fParam_1.layerm - 1;
#define del_ref(a_1, a_2)
#define xeev_ref(a_1, a_2)
#define w_ref(a_1, a_2, a_3)
#define delww_ref(a_1, a_2)
#define delta_ref(a_1, a_2, a_3)
#define delw_ref(a_1, a_2, a_3)
#define deltaww_ref(a_1, a_2)
int Int_t
Signed integer 4 bytes (int)
float Float_t
Float 4 bytes (float)
double Double_t
Double 8 bytes.
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
void Foncf(Int_t *i__, Double_t *u, Double_t *f)
void Out(Int_t *iii, Int_t *maxcycle)
MethodCFMlpANN_Utils()
default constructor
void Innit(char *det, Double_t *tout2, Double_t *tin2, Int_t)
void Entree_new(Int_t *, char *, Int_t *ntrain, Int_t *ntest, Int_t *numlayer, Int_t *nodes, Int_t *numcycle, Int_t)
struct TMVA::MethodCFMlpANN_Utils::@159 fNeur_1
void CollectVar(Int_t *nvar, Int_t *class__, Double_t *xpg)
[smart comments to be added]
void Leclearn(Int_t *ktest, Double_t *tout2, Double_t *tin2)
[smart comments to be added]
void GraphNN(Int_t *ilearn, Double_t *, Double_t *, char *, Int_t)
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@161 fCost_1
static const Int_t fg_max_nVar_
void En_avant2(Int_t *ievent)
[smart comments to be added]
Double_t Fdecroi(Int_t *i__)
[smart comments to be added]
void En_arriere(Int_t *ievent)
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@157 fParam_1
void Cout(Int_t *, Double_t *xxx)
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@158 fVarn_1
static const Int_t fg_max_nNodes_
Double_t Sen3a(void)
[smart comments to be added]
void Train_nn(Double_t *tin2, Double_t *tout2, Int_t *ntrain, Int_t *ntest, Int_t *nvar2, Int_t *nlayer, Int_t *nodes, Int_t *ncycle)
void Wini()
[smart comments to be added]
void En_avant(Int_t *ievent)
[smart comments to be added]
void Cout2(Int_t *, Double_t *yyy)
[smart comments to be added]
void TestNN()
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@160 fDel_1
void Lecev2(Int_t *ktest, Double_t *tout2, Double_t *tin2)
[smart comments to be added]
virtual ~MethodCFMlpANN_Utils()
Destructor.
void Arret(const char *mot)
static const char *const fg_MethodName
void Inl()
[smart comments to be added]
Timing information for training and evaluation of MVA methods.
MsgLogger & Endl(MsgLogger &ml)
Double_t Exp(Double_t x)
Returns the base-e exponential function of x, which is e raised to the power x.