90 for(i=0; i<max_nVar_;++i)
fVarn_1.xmin[i] = 0;
95 for(i=0; i<max_nNodes_;++i)
fDel_1.coef[i] = 0;
96 for(i=0; i<max_nLayers_*max_nNodes_;++i)
fDel_1.del[i] = 0;
97 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i)
fDel_1.delta[i] = 0;
98 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i)
fDel_1.delw[i] = 0;
99 for(i=0; i<max_nLayers_*max_nNodes_;++i)
fDel_1.delww[i] = 0;
103 for(i=0; i<max_nLayers_;++i)
fDel_1.temp[i] = 0;
105 for(i=0; i<max_nNodes_;++i)
fNeur_1.cut[i] = 0;
106 for(i=0; i<max_nLayers_*max_nNodes_;++i)
fNeur_1.deltaww[i] = 0;
107 for(i=0; i<max_nLayers_;++i)
fNeur_1.neuron[i] = 0;
108 for(i=0; i<max_nNodes_;++i)
fNeur_1.o[i] = 0;
109 for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i)
fNeur_1.w[i] = 0;
110 for(i=0; i<max_nLayers_*max_nNodes_;++i)
fNeur_1.ww[i] = 0;
111 for(i=0; i<max_nLayers_*max_nNodes_;++i)
fNeur_1.x[i] = 0;
112 for(i=0; i<max_nLayers_*max_nNodes_;++i)
fNeur_1.y[i] = 0;
134 for(i=0; i<max_Events_;++i)
fVarn_1.mclass[i] = 0;
135 for(i=0; i<max_Events_;++i)
fVarn_1.nclass[i] = 0;
136 for(i=0; i<max_nVar_;++i)
fVarn_1.xmax[i] = 0;
157 if (*ntrain + *ntest > max_Events_) {
158 printf(
"*** CFMlpANN_f2c: Warning in Train_nn: number of training + testing" \
159 " events exceeds hardcoded maximum - reset to maximum allowed number");
160 *ntrain = *ntrain*(max_Events_/(*ntrain + *ntest));
161 *ntest = *ntest *(max_Events_/(*ntrain + *ntest));
163 if (*nvar2 > max_nVar_) {
164 printf(
"*** CFMlpANN_f2c: ERROR in Train_nn: number of variables" \
165 " exceeds hardcoded maximum ==> abort");
168 if (*nlayer > max_nLayers_) {
169 printf(
"*** CFMlpANN_f2c: Warning in Train_nn: number of layers" \
170 " exceeds hardcoded maximum - reset to maximum allowed number");
171 *nlayer = max_nLayers_;
173 if (*nodes > max_nNodes_) {
174 printf(
"*** CFMlpANN_f2c: Warning in Train_nn: number of nodes" \
175 " exceeds hardcoded maximum - reset to maximum allowed number");
176 *nodes = max_nNodes_;
180 fVarn2_1.Create( *ntrain + *ntest, *nvar2 );
181 fVarn3_1.Create( *ntrain + *ntest, *nvar2 );
186 Entree_new(nvar2, det, ntrain, ntest, nlayer, nodes, ncycle, (
Int_t)20);
213 Int_t rewrite, i__, j, ncoef;
214 Int_t ntemp, num, retrain;
226 for (i__ = 1; i__ <= max_nNodes_; ++i__) {
229 for (i__ = 1; i__ <= max_nLayers_; ++i__) {
233 if (
fParam_1.layerm > max_nLayers_) {
234 printf(
"Error: number of layers exceeds maximum: %i, %i ==> abort",
236 Arret(
"modification of mlpl3_param_lim.inc is needed ");
252 ULog() << kINFO <<
"Total number of events for training: " <<
fParam_1.nevl <<
Endl;
253 ULog() << kINFO <<
"Total number of training cycles : " <<
fParam_1.nblearn <<
Endl;
255 printf(
"Error: number of learning events exceeds maximum: %i, %i ==> abort",
257 Arret(
"modification of mlpl3_param_lim.inc is needed ");
260 printf(
"Error: number of testing events exceeds maximum: %i, %i ==> abort",
262 Arret(
"modification of mlpl3_param_lim.inc is needed ");
265 for (j = 1; j <= i__1; ++j) {
270 if (j ==
fParam_1.layerm && num != 2) {
276 for (j = 1; j <= i__1; ++j) {
277 ULog() << kINFO <<
"Number of layers for neuron(" << j <<
"): " <<
fNeur_1.neuron[j - 1] <<
Endl;
280 printf(
"Error: wrong number of classes at output layer: %i != 2 ==> abort\n",
285 for (j = 1; j <= i__1; ++j) {
289 for (j = 1; j <= i__1; ++j) {
296 printf(
"Big troubles !!! \n" );
297 Arret(
"new training or continued one !");
300 ULog() << kINFO <<
"New training will be performed" <<
Endl;
303 printf(
"%s: New training will be continued from a weight file\n",
fg_MethodName);
307 for (i__ = 1; i__ <= max_nNodes_; ++i__) {
312 for (i__ = 1; i__ <= max_nLayers_; ++i__) {
318 Arret(
" entree error code 1 : need to reported");
321 Arret(
"entree error code 2 : need to reported");
325#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
326#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
333 Int_t i__1, i__2, i__3;
338 for (layer = 2; layer <= i__1; ++layer) {
339 i__2 =
fNeur_1.neuron[layer - 2];
340 for (i__ = 1; i__ <= i__2; ++i__) {
341 i__3 =
fNeur_1.neuron[layer - 1];
342 for (j = 1; j <= i__3; ++j) {
343 w_ref(layer, j, i__) = (
Sen3a() * 2. - 1.) * .2;
353#define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
354#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
355#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
356#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
357#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
364 Int_t i__1, i__2, i__3;
371 for (i__ = 1; i__ <= i__1; ++i__) {
375 for (layer = 1; layer <= i__1; ++layer) {
377 for (j = 1; j <= i__2; ++j) {
378 x_ref(layer + 1, j) = 0.;
379 i__3 =
fNeur_1.neuron[layer - 1];
380 for (i__ = 1; i__ <= i__3; ++i__) {
382 *
w_ref(layer + 1, j, i__) );
398#define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
408 Int_t nocla[max_nNodes_], ikend;
413 for (k = 1; k <= i__1; ++k) {
417 for (i__ = 1; i__ <= i__1; ++i__) {
422 for (i__ = 1; i__ <= i__1; ++i__) {
424 xpg, &
fVarn_1.nclass[i__ - 1], &ikend);
432 for (j = 1; j <= i__2; ++j) {
437 for (k = 1; k <= i__2; ++k) {
438 if (
fVarn_1.nclass[i__ - 1] == k) {
444 for (k = 1; k <= i__2; ++k) {
456 for (k = 1; k <= i__2; ++k) {
458 for (
l = 1;
l <= i__1; ++
l) {
459 if (nocla[k - 1] != nocla[
l - 1]) {
466 for (i__ = 1; i__ <= i__1; ++i__) {
468 for (
l = 1;
l <= i__2; ++
l) {
485#define delw_ref(a_1,a_2,a_3) fDel_1.delw[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
486#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
487#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
488#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
489#define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
490#define delww_ref(a_1,a_2) fDel_1.delww[(a_2)*max_nLayers_ + a_1 - 7]
491#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
492#define del_ref(a_1,a_2) fDel_1.del[(a_2)*max_nLayers_ + a_1 - 7]
493#define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
500 Int_t i__1, i__2, i__3;
507 for (i__ = 1; i__ <= i__1; ++i__) {
508 if (
fVarn_1.nclass[*ievent - 1] == i__) {
517 for (i__ = 1; i__ <= i__1; ++i__) {
519 df = (
f + 1.) * (1. -
f) / (
fDel_1.temp[
l - 1] * 2.);
524 for (j = 1; j <= i__2; ++j) {
532 for (i__ = 1; i__ <= i__2; ++i__) {
535 for (k = 1; k <= i__1; ++k) {
539 df = (
f + 1.) * (1. -
f) / (
fDel_1.temp[
l - 1] * 2.);
543 for (j = 1; j <= i__1; ++j) {
550 for (
l = 2;
l <= i__1; ++
l) {
552 for (i__ = 1; i__ <= i__2; ++i__) {
557 for (j = 1; j <= i__3; ++j) {
576#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
577#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
585 if (*iii == *maxcycle) {
593#define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
594#define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
601 Int_t i__1, i__2, i__3;
604 Int_t nevod, layer, ktest, i1, nrest;
610 Lecev2(&ktest, tout2, tin2);
612 printf(
" .... strange to be here (1) ... \n");
616 for (layer = 1; layer <= i__1; ++layer) {
618 for (j = 1; j <= i__2; ++j) {
620 i__3 =
fNeur_1.neuron[layer - 1];
621 for (i__ = 1; i__ <= i__3; ++i__) {
634 Timer timer( i__3,
"CFMlpANN" );
635 Int_t num = i__3/100;
637 for (i1 = 1; i1 <= i__3; ++i1) {
639 if ( ( num>0 && (i1-1)%num == 0) || (i1 == i__3) ) timer.
DrawProgressBar( i1-1 );
642 for (i__ = 1; i__ <= i__2; ++i__) {
703 if (
fParam_1.layerm > max_nLayers_) {
705 printf(
"Error: number of layers exceeds maximum: %i, %i ==> abort",
707 Arret(
"modification of mlpl3_param_lim.inc is needed ");
711 printf(
"Error: number of training events exceeds maximum: %i, %i ==> abort",
713 Arret(
"modification of mlpl3_param_lim.inc is needed ");
716 printf(
"Error: number of testing events exceeds maximum: %i, %i ==> abort",
718 Arret(
"modification of mlpl3_param_lim.inc is needed ");
722 printf(
"Error: wrong number of classes at ouput layer: %i != %i ==> abort\n",
724 Arret(
"problem needs to reported ");
728 printf(
"Error: number of variables exceeds maximum: %i, %i ==> abort",
730 Arret(
"modification of mlpl3_param_lim.inc is needed");
733 for (i__ = 1; i__ <= i__1; ++i__) {
734 if (
fNeur_1.neuron[i__ - 1] > max_nNodes_) {
736 printf(
"Error: number of neurons at layer exceeds maximum: %i, %i ==> abort",
741 printf(
" .... strange to be here (2) ... \n");
746#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
761 for (i__ = 1; i__ <= i__1; ++i__) {
764 for (j = 1; j <= i__2; ++j) {
765 if (
fVarn_1.nclass[i__ - 1] == j) {
773 c__ +=
fDel_1.coef[j - 1] * (d__1 * d__1);
783#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
784#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
793 Int_t jmax, k, layer, kk, nq, nr;
798 for (layer = 1; layer <= i__1; ++layer) {
799 nq =
fNeur_1.neuron[layer] / 10;
800 nr =
fNeur_1.neuron[layer] - nq * 10;
808 for (k = 1; k <= i__2; ++k) {
811 if (
fNeur_1.neuron[layer] < jmax) {
834 ret_val = aaa * (
Double_t) (*i__) + bbb;
838#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
855 Int_t nko[max_nNodes_], nok[max_nNodes_];
864 for (i__ = 1; i__ <= i__1; ++i__) {
871 for (i__ = 1; i__ <= i__1; ++i__) {
874 for (j = 1; j <= i__2; ++j) {
876 if (
fVarn_1.nclass[i__ - 1] == j) {
892 for (j = 1; j <= i__1; ++j) {
893 xmok[j - 1] /= (
Double_t) nok[j - 1];
894 xmko[j - 1] /= (
Double_t) nko[j - 1];
895 fNeur_1.cut[j - 1] = (xmok[j - 1] + xmko[j - 1]) / 2.;
916 static Int_t fg_i1 = 3823;
917 static Int_t fg_i2 = 4006;
918 static Int_t fg_i3 = 2903;
921 Int_t k3, l3, k2, l2, k1, l1;
926 k2 = fg_i2 * j3 + fg_i3 * j2 + l3;
928 k1 = fg_i1 * j3 + fg_i2 * j2 + fg_i3 * j1 + l2;
930 fg_i1 = k1 - l1 * m12;
931 fg_i2 = k2 - l2 * m12;
932 fg_i3 = k3 - l3 * m12;
945 if (*u /
fDel_1.temp[*i__ - 1] > 170.) {
946 *
f = .99999999989999999;
948 else if (*u /
fDel_1.temp[*i__ - 1] < -170.) {
949 *
f = -.99999999989999999;
953 *
f = (1. - yy) / (yy + 1.);
959#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
974 for (i__ = 1; i__ <= i__1; ++i__) {
977 for (j = 1; j <= i__2; ++j) {
978 if (
fVarn_1.mclass[i__ - 1] == j) {
986 c__ +=
fDel_1.coef[j - 1] * (d__1 * d__1);
995#define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
1020 for (i__ = 1; i__ <= i__1; ++i__) {
1022 xpg, &
fVarn_1.mclass[i__ - 1], &ikend);
1029 for (j = 1; j <= i__2; ++j) {
1030 xx_ref(i__, j) = xpg[j - 1];
1035 for (i__ = 1; i__ <= i__1; ++i__) {
1037 for (
l = 1;
l <= i__2; ++
l) {
1054#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
1055#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
1056#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
1057#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
1058#define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
1065 Int_t i__1, i__2, i__3;
1072 for (i__ = 1; i__ <= i__1; ++i__) {
1076 for (layer = 1; layer <= i__1; ++layer) {
1078 for (j = 1; j <= i__2; ++j) {
1079 x_ref(layer + 1, j) = 0.;
1080 i__3 =
fNeur_1.neuron[layer - 1];
1081 for (i__ = 1; i__ <= i__3; ++i__) {
1083 *
w_ref(layer + 1, j, i__);
#define del_ref(a_1, a_2)
#define xeev_ref(a_1, a_2)
#define w_ref(a_1, a_2, a_3)
#define delww_ref(a_1, a_2)
#define delta_ref(a_1, a_2, a_3)
#define delw_ref(a_1, a_2, a_3)
#define deltaww_ref(a_1, a_2)
int Int_t
Signed integer 4 bytes (int).
bool Bool_t
Boolean (0=false, 1=true) (bool).
double Double_t
Double 8 bytes.
float Float_t
Float 4 bytes (float).
void Foncf(Int_t *i__, Double_t *u, Double_t *f)
void Out(Int_t *iii, Int_t *maxcycle)
struct TMVA::MethodCFMlpANN_Utils::@367015201066363262016122100377006150256351063342 fVarn_1
MethodCFMlpANN_Utils()
default constructor
void Innit(char *det, Double_t *tout2, Double_t *tin2, Int_t)
void Entree_new(Int_t *, char *, Int_t *ntrain, Int_t *ntest, Int_t *numlayer, Int_t *nodes, Int_t *numcycle, Int_t)
void CollectVar(Int_t *nvar, Int_t *class__, Double_t *xpg)
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@357056331205100014241125340265312042203142075057 fNeur_1
void Leclearn(Int_t *ktest, Double_t *tout2, Double_t *tin2)
[smart comments to be added]
virtual Int_t DataInterface(Double_t *, Double_t *, Int_t *, Int_t *, Int_t *, Int_t *, Double_t *, Int_t *, Int_t *)=0
struct TMVA::MethodCFMlpANN_Utils::@002036350247326327250305066045215373106374313226 fCost_1
void GraphNN(Int_t *ilearn, Double_t *, Double_t *, char *, Int_t)
[smart comments to be added]
class TMVA::MethodCFMlpANN_Utils::VARn2 fVarn3_1
static const Int_t fg_max_nVar_
class TMVA::MethodCFMlpANN_Utils::VARn2 fVarn2_1
void En_avant2(Int_t *ievent)
[smart comments to be added]
Double_t Fdecroi(Int_t *i__)
[smart comments to be added]
void En_arriere(Int_t *ievent)
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@255020153007270074077243245161143332317026253045 fParam_1
struct TMVA::MethodCFMlpANN_Utils::@155012045125033222257014174357354245100233357312 fDel_1
void Cout(Int_t *, Double_t *xxx)
[smart comments to be added]
static const Int_t fg_max_nNodes_
Double_t Sen3a(void)
[smart comments to be added]
void Train_nn(Double_t *tin2, Double_t *tout2, Int_t *ntrain, Int_t *ntest, Int_t *nvar2, Int_t *nlayer, Int_t *nodes, Int_t *ncycle)
void Wini()
[smart comments to be added]
void En_avant(Int_t *ievent)
[smart comments to be added]
void Cout2(Int_t *, Double_t *yyy)
[smart comments to be added]
void TestNN()
[smart comments to be added]
void Lecev2(Int_t *ktest, Double_t *tout2, Double_t *tin2)
[smart comments to be added]
virtual ~MethodCFMlpANN_Utils()
Destructor.
void Arret(const char *mot)
static const char *const fg_MethodName
void Inl()
[smart comments to be added]
Timing information for training and evaluation of MVA methods.
void DrawProgressBar(Int_t, const TString &comment="")
draws progress bar in color or B&W caution:
MsgLogger & Endl(MsgLogger &ml)
Double_t Exp(Double_t x)
Returns the base-e exponential function of x, which is e raised to the power x.