87 using std::stringstream;
95 Int_t MethodCFMlpANN_nsel = 0;
134 TMVA::
MethodBase( jobName,
Types::kCFMlpANN, methodTitle, theData, theOption, theTargetDir ),
152 TMVA::
MethodBase(
Types::kCFMlpANN, theData, theWeightFile, theTargetDir ),
178 DeclareOptionRef( fNcycles =3000,
"NCycles",
"Number of training cycles" );
179 DeclareOptionRef( fLayerSpec=
"N,N-1",
"HiddenLayers",
"Specification of hidden layer architecture" );
187 fNodes =
new Int_t[20];
189 Int_t currentHiddenLayer = 1;
191 while(layerSpec.
Length()>0) {
193 if (layerSpec.
First(
',')<0) {
198 sToAdd = layerSpec(0,layerSpec.
First(
','));
199 layerSpec = layerSpec(layerSpec.First(
',')+1,layerSpec.Length());
203 nNodes += atoi(sToAdd);
204 fNodes[currentHiddenLayer++] = nNodes;
207 fNodes[0] = GetNvar();
208 fNodes[fNlayers-1] = 2;
210 if (IgnoreEventsWithNegWeightsInTraining()) {
211 Log() <<
kFATAL <<
"Mechanism to ignore events with negative weights in training not yet available for method: "
212 << GetMethodTypeName()
213 <<
" --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
217 Log() <<
kINFO <<
"Use configuration (nodes per layer): in=";
218 for (
Int_t i=0; i<fNlayers-1; i++)
Log() <<
kINFO << fNodes[i] <<
":";
222 Log() <<
"Use " << fNcycles <<
" training cycles" <<
Endl;
224 Int_t nEvtTrain =
Data()->GetNTrainingEvents();
230 fData =
new TMatrix( nEvtTrain, GetNvar() );
231 fClass =
new std::vector<Int_t>( nEvtTrain );
236 for (
Int_t ievt=0; ievt<nEvtTrain; ievt++) {
237 const Event * ev = GetEvent(ievt);
240 (*fClass)[ievt] = DataInfo().IsSignal(ev) ? 1 : 2;
243 for (ivar=0; ivar<GetNvar(); ivar++) {
244 (*fData)( ievt, ivar ) = ev->
GetValue(ivar);
260 SetNormalised(
kTRUE );
266 TMVA::MethodCFMlpANN_nsel = 0;
279 for (
Int_t i=0; i<fNlayers; i++)
delete[] fYNN[i];
291 Int_t ntrain(
Data()->GetNTrainingEvents());
293 Int_t nvar(GetNvar());
294 Int_t nlayers(fNlayers);
296 Int_t ncycles(fNcycles);
298 for (
Int_t i=0; i<nlayers; i++) nodes[i] = fNodes[i];
301 for (
Int_t i=0; i<fNlayers; i++)
delete[] fYNN[i];
306 for (
Int_t layer=0; layer<nlayers; layer++)
307 fYNN[layer] =
new Double_t[fNodes[layer]];
311 Train_nn( &dumDat, &dumDat, &ntrain, &ntest, &nvar, &nlayers, nodes, &ncycles );
313 Log() <<
kWARNING <<
"<Train> sorry CFMlpANN does not run on Windows" <<
Endl;
326 const Event* ev = GetEvent();
329 std::vector<Double_t> inputVec( GetNvar() );
330 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++) inputVec[ivar] = ev->
GetValue(ivar);
332 Double_t myMVA = EvalANN( inputVec, isOK );
333 if (!isOK)
Log() <<
kFATAL <<
"EvalANN returns (!isOK) for event " <<
Endl;
336 NoErrorCalc(err, errUpper);
348 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++) xeev[ivar] = inVar[ivar];
352 for (
UInt_t jvar=0; jvar<GetNvar(); jvar++) {
354 if (fVarn_1.xmax[jvar] < xeev[jvar]) xeev[jvar] = fVarn_1.xmax[jvar];
355 if (fVarn_1.xmin[jvar] > xeev[jvar]) xeev[jvar] = fVarn_1.xmin[jvar];
356 if (fVarn_1.xmax[jvar] == fVarn_1.xmin[jvar]) {
361 xeev[jvar] = xeev[jvar] - ((fVarn_1.xmax[jvar] + fVarn_1.xmin[jvar])/2);
362 xeev[jvar] = xeev[jvar] / ((fVarn_1.xmax[jvar] - fVarn_1.xmin[jvar])/2);
368 Double_t retval = 0.5*(1.0 + fYNN[fParam_1.layerm-1][0]);
380 for (
Int_t ivar=0; ivar<fNeur_1.neuron[0]; ivar++) fYNN[0][ivar] = xeev[ivar];
382 for (
Int_t layer=1; layer<fParam_1.layerm; layer++) {
383 for (
Int_t j=1; j<=fNeur_1.neuron[layer]; j++) {
385 Double_t x = Ww_ref(fNeur_1.ww, layer+1,j);
387 for (
Int_t k=1; k<=fNeur_1.neuron[layer-1]; k++) {
388 x += fYNN[layer-1][k-1]*W_ref(fNeur_1.w, layer+1, j, k);
390 fYNN[layer][j-1] = NN_fonc( layer, x );
402 if (u/fDel_1.temp[i] > 170) f = +1;
403 else if (u/fDel_1.temp[i] < -170) f = -1;
406 f = (1 - yy)/(1 + yy);
421 istr >> nva >> lclass;
423 if (GetNvar() != nva)
424 Log() <<
kFATAL <<
"<ReadWeightsFromFile> mismatch in number of variables" <<
Endl;
428 Log() <<
kFATAL <<
"<ReadWeightsFromFile> mismatch in number of classes" <<
Endl;
432 Log() <<
kFATAL <<
"<ReadWeightsFromStream> reached EOF prematurely " <<
Endl;
435 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++)
436 istr >> fVarn_1.xmax[ivar] >> fVarn_1.xmin[ivar];
439 istr >> fParam_1.layerm;
442 for (
Int_t i=0; i<fNlayers; i++)
delete[] fYNN[i];
446 fYNN =
new Double_t*[fParam_1.layerm];
447 for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
450 istr >> fNeur_1.neuron[layer];
451 fYNN[layer] =
new Double_t[fNeur_1.neuron[layer]];
455 const Int_t nchar( 100 );
456 char* dumchar =
new char[nchar];
459 for (
Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
461 Int_t nq = fNeur_1.neuron[layer]/10;
462 Int_t nr = fNeur_1.neuron[layer] - nq*10;
468 for (
Int_t k=1; k<=kk; k++) {
469 Int_t jmin = 10*k - 9;
471 if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
472 for (
Int_t j=jmin; j<=jmax; j++) {
473 istr >> Ww_ref(fNeur_1.ww, layer+1, j);
475 for (
Int_t i=1; i<=fNeur_1.neuron[layer-1]; i++) {
476 for (
Int_t j=jmin; j<=jmax; j++) {
477 istr >> W_ref(fNeur_1.w, layer+1, j, i);
481 istr.getline( dumchar, nchar );
485 for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
488 istr.getline( dumchar, nchar );
489 istr.getline( dumchar, nchar );
491 istr >> fDel_1.temp[layer];
495 if ((
Int_t)GetNvar() != fNeur_1.neuron[0]) {
496 Log() <<
kFATAL <<
"<ReadWeightsFromFile> mismatch in zeroth layer:"
497 << GetNvar() <<
" " << fNeur_1.neuron[0] <<
Endl;
500 fNlayers = fParam_1.layerm;
520 Log() <<
kFATAL <<
"ERROR in MethodCFMlpANN_DataInterface zero pointer xpg" <<
Endl;
523 Log() <<
kFATAL <<
"ERROR in MethodCFMlpANN_DataInterface mismatch in num of variables: "
528 *iclass = (int)opt->
GetClass( TMVA::MethodCFMlpANN_nsel );
530 xpg[ivar] = (
double)opt->
GetData( TMVA::MethodCFMlpANN_nsel, ivar );
532 ++TMVA::MethodCFMlpANN_nsel;
549 for (
Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
550 s << std::scientific << fVarn_1.xmin[ivar] <<
" " << fVarn_1.xmax[ivar] <<
" ";
555 for (
Int_t layer=0; layer<fParam_1.layerm; layer++)
556 n << std::scientific << fNeur_1.neuron[layer] <<
" ";
558 for (
Int_t layer=1; layer<fParam_1.layerm; layer++) {
560 gTools().
AddAttr(layernode,
"NNeurons",fNeur_1.neuron[layer]);
561 void* neuronnode=
NULL;
562 for (
Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
564 stringstream weights;
565 weights.precision( 16 );
566 weights << std::scientific << Ww_ref(fNeur_1.ww, layer+1, neuron+1);
567 for (
Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
568 weights <<
" " << std::scientific << W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
575 temp.precision( 16 );
576 for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
577 temp << std::scientific << fDel_1.temp[layer] <<
" ";
589 stringstream content(minmaxcontent);
590 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++)
591 content >> fVarn_1.xmin[ivar] >> fVarn_1.xmax[ivar];
593 for (
Int_t i=0; i<fNlayers; i++)
delete[] fYNN[i];
597 fYNN =
new Double_t*[fParam_1.layerm];
600 stringstream ncontent(neuronscontent);
601 for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
604 ncontent >> fNeur_1.neuron[layer];
605 fYNN[layer] =
new Double_t[fNeur_1.neuron[layer]];
607 for (
Int_t layer=1; layer<fParam_1.layerm; layer++) {
609 void* neuronnode=
NULL;
611 for (
Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
613 stringstream weights(neuronweights);
614 weights >> Ww_ref(fNeur_1.ww, layer+1, neuron+1);
615 for (
Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
616 weights >> W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
623 stringstream
t(temp);
624 for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
625 t >> fDel_1.temp[layer];
627 fNlayers = fParam_1.layerm;
636 o <<
"Number of vars " << fParam_1.nvar << std::endl;
637 o <<
"Output nodes " << fParam_1.lclass << std::endl;
640 for (
Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
641 o <<
"Var " << ivar <<
" [" << fVarn_1.xmin[ivar] <<
" - " << fVarn_1.xmax[ivar] <<
"]" << std::endl;
644 o <<
"Number of layers " << fParam_1.layerm << std::endl;
646 o <<
"Nodes per layer ";
647 for (
Int_t layer=0; layer<fParam_1.layerm; layer++)
649 o << fNeur_1.neuron[layer] <<
" ";
653 for (
Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
655 Int_t nq = fNeur_1.neuron[layer]/10;
656 Int_t nr = fNeur_1.neuron[layer] - nq*10;
662 for (
Int_t k=1; k<=kk; k++) {
663 Int_t jmin = 10*k - 9;
666 if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
667 for (j=jmin; j<=jmax; j++) {
670 o << Ww_ref(fNeur_1.ww, layer+1, j) <<
" ";
675 for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
676 for (j=jmin; j<=jmax; j++) {
678 o << W_ref(fNeur_1.w, layer+1, j, i) <<
" ";
687 for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
688 o <<
"Del.temp in layer " << layer <<
" : " << fDel_1.temp[layer] << std::endl;
701 fout <<
" // not implemented for class: \"" << className <<
"\"" << std::endl;
702 fout <<
"};" << std::endl;
void Train(void)
training of the Clement-Ferrand NN classifier
void DeclareOptions()
define the options (their key words) that can be set in the option string know options: NCycles=xx :t...
MsgLogger & Endl(MsgLogger &ml)
void NN_ava(Double_t *)
auxiliary functions
void ReadWeightsFromXML(void *wghtnode)
read weights from xml file
Int_t GetClass(Int_t ivar) const
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Int_t DataInterface(Double_t *, Double_t *, Int_t *, Int_t *, Int_t *, Int_t *, Double_t *, Int_t *, Int_t *)
data interface function
virtual ~MethodCFMlpANN(void)
destructor
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t)
CFMlpANN can handle classification with 2 classes.
if(pyself &&pyself!=Py_None)
std::vector< std::vector< double > > Data
TClass * fClass
pointer to the foreign object
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
returns CFMlpANN output (normalised within [0,1])
MethodCFMlpANN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption="3000:N-1:N-2", TDirectory *theTargetDir=0)
standard constructor option string: "n_training_cycles:n_hidden_layers" default is: n_training_cycles...
void MakeClassSpecificHeader(std::ostream &, const TString &="") const
write specific classifier response for header
void AddWeightsXMLTo(void *parent) const
write weights to xml file
Double_t EvalANN(std::vector< Double_t > &, Bool_t &isOK)
evaluates NN value as function of input variables
void GetHelpMessage() const
get help message text
void ReadWeightsFromStream(std::istream &istr)
read back the weight from the training from file (stream)
TString & Remove(Ssiz_t pos)
Double_t NN_fonc(Int_t, Double_t) const
activation function
Describe directory structure in memory.
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
TMatrixT< Float_t > TMatrix
void Init(void)
default initialisation called by all constructors
void ProcessOptions()
decode the options in the option string
void SetLogger(MsgLogger *l)
ClassImp(TMVA::MethodCFMlpANN) namespace TMVA
#define REGISTER_METHOD(CLASS)
for example
void MakeClassSpecific(std::ostream &, const TString &) const
void PrintWeights(std::ostream &o) const
write the weights of the neural net
static MethodCFMlpANN * This(void)
static pointer to this object (required for external functions
Ssiz_t First(char c) const
Find first occurrence of a character c.
Double_t GetData(Int_t isel, Int_t ivar) const