82 using
std::stringstream;
90 Int_t MethodCFMlpANN_nsel = 0;
129 TMVA::
MethodBase( jobName,
Types::kCFMlpANN, methodTitle, theData, theOption, theTargetDir ),
173 DeclareOptionRef( fNcycles =3000,
"NCycles",
"Number of training cycles" );
174 DeclareOptionRef( fLayerSpec=
"N,N-1",
"HiddenLayers",
"Specification of hidden layer architecture" );
182 fNodes =
new Int_t[20];
184 Int_t currentHiddenLayer = 1;
186 while(layerSpec.
Length()>0) {
188 if (layerSpec.
First(
',')<0) {
193 sToAdd = layerSpec(0,layerSpec.
First(
','));
194 layerSpec = layerSpec(layerSpec.First(
',')+1,layerSpec.Length());
198 nNodes += atoi(sToAdd);
199 fNodes[currentHiddenLayer++] = nNodes;
202 fNodes[0] = GetNvar();
203 fNodes[fNlayers-1] = 2;
205 if (IgnoreEventsWithNegWeightsInTraining()) {
206 Log() <<
kFATAL <<
"Mechanism to ignore events with negative weights in training not yet available for method: "
207 << GetMethodTypeName()
208 <<
" --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
212 Log() <<
kINFO <<
"Use configuration (nodes per layer): in=";
213 for (
Int_t i=0; i<fNlayers-1; i++)
Log() <<
kINFO << fNodes[i] <<
":";
217 Log() <<
"Use " << fNcycles <<
" training cycles" <<
Endl;
219 Int_t nEvtTrain =
Data()->GetNTrainingEvents();
225 fData =
new TMatrix( nEvtTrain, GetNvar() );
226 fClass =
new std::vector<Int_t>( nEvtTrain );
231 for (
Int_t ievt=0; ievt<nEvtTrain; ievt++) {
232 const Event * ev = GetEvent(ievt);
235 (*fClass)[ievt] = DataInfo().IsSignal(ev) ? 1 : 2;
238 for (ivar=0; ivar<GetNvar(); ivar++) {
239 (*fData)( ievt, ivar ) = ev->
GetValue(ivar);
255 SetNormalised(
kTRUE );
261 TMVA::MethodCFMlpANN_nsel = 0;
274 for (
Int_t i=0; i<fNlayers; i++)
delete[] fYNN[i];
286 Int_t ntrain(
Data()->GetNTrainingEvents());
288 Int_t nvar(GetNvar());
289 Int_t nlayers(fNlayers);
291 Int_t ncycles(fNcycles);
293 for (
Int_t i=0; i<nlayers; i++) nodes[i] = fNodes[i];
296 for (
Int_t i=0; i<fNlayers; i++)
delete[] fYNN[i];
301 for (
Int_t layer=0; layer<nlayers; layer++)
302 fYNN[layer] =
new Double_t[fNodes[layer]];
306 Train_nn( &dumDat, &dumDat, &ntrain, &ntest, &nvar, &nlayers, nodes, &ncycles );
308 Log() <<
kWARNING <<
"<Train> sorry CFMlpANN does not run on Windows" <<
Endl;
321 const Event* ev = GetEvent();
324 std::vector<Double_t> inputVec( GetNvar() );
325 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++) inputVec[ivar] = ev->
GetValue(ivar);
327 Double_t myMVA = EvalANN( inputVec, isOK );
328 if (!isOK)
Log() <<
kFATAL <<
"EvalANN returns (!isOK) for event " <<
Endl;
331 NoErrorCalc(err, errUpper);
343 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++) xeev[ivar] = inVar[ivar];
347 for (
UInt_t jvar=0; jvar<GetNvar(); jvar++) {
349 if (fVarn_1.xmax[jvar] < xeev[jvar]) xeev[jvar] = fVarn_1.xmax[jvar];
350 if (fVarn_1.xmin[jvar] > xeev[jvar]) xeev[jvar] = fVarn_1.xmin[jvar];
351 if (fVarn_1.xmax[jvar] == fVarn_1.xmin[jvar]) {
356 xeev[jvar] = xeev[jvar] - ((fVarn_1.xmax[jvar] + fVarn_1.xmin[jvar])/2);
357 xeev[jvar] = xeev[jvar] / ((fVarn_1.xmax[jvar] - fVarn_1.xmin[jvar])/2);
363 Double_t retval = 0.5*(1.0 + fYNN[fParam_1.layerm-1][0]);
375 for (
Int_t ivar=0; ivar<fNeur_1.neuron[0]; ivar++) fYNN[0][ivar] = xeev[ivar];
377 for (
Int_t layer=1; layer<fParam_1.layerm; layer++) {
378 for (
Int_t j=1; j<=fNeur_1.neuron[layer]; j++) {
380 Double_t x = Ww_ref(fNeur_1.ww, layer+1,j);
382 for (
Int_t k=1; k<=fNeur_1.neuron[layer-1]; k++) {
383 x += fYNN[layer-1][k-1]*W_ref(fNeur_1.w, layer+1, j, k);
385 fYNN[layer][j-1] = NN_fonc( layer, x );
397 if (u/fDel_1.temp[i] > 170) f = +1;
398 else if (u/fDel_1.temp[i] < -170) f = -1;
401 f = (1 - yy)/(1 + yy);
416 istr >> nva >> lclass;
418 if (GetNvar() != nva)
419 Log() <<
kFATAL <<
"<ReadWeightsFromFile> mismatch in number of variables" <<
Endl;
423 Log() <<
kFATAL <<
"<ReadWeightsFromFile> mismatch in number of classes" <<
Endl;
427 Log() <<
kFATAL <<
"<ReadWeightsFromStream> reached EOF prematurely " <<
Endl;
430 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++)
431 istr >> fVarn_1.xmax[ivar] >> fVarn_1.xmin[ivar];
434 istr >> fParam_1.layerm;
437 for (
Int_t i=0; i<fNlayers; i++)
delete[] fYNN[i];
441 fYNN =
new Double_t*[fParam_1.layerm];
442 for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
445 istr >> fNeur_1.neuron[layer];
446 fYNN[layer] =
new Double_t[fNeur_1.neuron[layer]];
450 const Int_t nchar( 100 );
451 char* dumchar =
new char[nchar];
454 for (
Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
456 Int_t nq = fNeur_1.neuron[layer]/10;
457 Int_t nr = fNeur_1.neuron[layer] - nq*10;
463 for (
Int_t k=1; k<=kk; k++) {
464 Int_t jmin = 10*k - 9;
466 if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
467 for (
Int_t j=jmin; j<=jmax; j++) {
468 istr >> Ww_ref(fNeur_1.ww, layer+1, j);
470 for (
Int_t i=1; i<=fNeur_1.neuron[layer-1]; i++) {
471 for (
Int_t j=jmin; j<=jmax; j++) {
472 istr >> W_ref(fNeur_1.w, layer+1, j, i);
476 istr.getline( dumchar, nchar );
480 for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
483 istr.getline( dumchar, nchar );
484 istr.getline( dumchar, nchar );
486 istr >> fDel_1.temp[layer];
490 if ((
Int_t)GetNvar() != fNeur_1.neuron[0]) {
491 Log() <<
kFATAL <<
"<ReadWeightsFromFile> mismatch in zeroth layer:"
492 << GetNvar() <<
" " << fNeur_1.neuron[0] <<
Endl;
495 fNlayers = fParam_1.layerm;
515 Log() <<
kFATAL <<
"ERROR in MethodCFMlpANN_DataInterface zero pointer xpg" <<
Endl;
518 Log() <<
kFATAL <<
"ERROR in MethodCFMlpANN_DataInterface mismatch in num of variables: "
523 *iclass = (int)opt->
GetClass( TMVA::MethodCFMlpANN_nsel );
525 xpg[ivar] = (
double)opt->
GetData( TMVA::MethodCFMlpANN_nsel, ivar );
527 ++TMVA::MethodCFMlpANN_nsel;
544 for (
Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
545 s << std::scientific << fVarn_1.xmin[ivar] <<
" " << fVarn_1.xmax[ivar] <<
" ";
550 for (
Int_t layer=0; layer<fParam_1.layerm; layer++)
551 n << std::scientific << fNeur_1.neuron[layer] <<
" ";
553 for (
Int_t layer=1; layer<fParam_1.layerm; layer++) {
555 gTools().
AddAttr(layernode,
"NNeurons",fNeur_1.neuron[layer]);
556 void* neuronnode=
NULL;
557 for (
Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
559 stringstream weights;
560 weights.precision( 16 );
561 weights << std::scientific << Ww_ref(fNeur_1.ww, layer+1, neuron+1);
562 for (
Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
563 weights <<
" " << std::scientific << W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
570 temp.precision( 16 );
571 for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
572 temp << std::scientific << fDel_1.temp[layer] <<
" ";
584 stringstream content(minmaxcontent);
585 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++)
586 content >> fVarn_1.xmin[ivar] >> fVarn_1.xmax[ivar];
588 for (
Int_t i=0; i<fNlayers; i++)
delete[] fYNN[i];
592 fYNN =
new Double_t*[fParam_1.layerm];
595 stringstream ncontent(neuronscontent);
596 for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
599 ncontent >> fNeur_1.neuron[layer];
600 fYNN[layer] =
new Double_t[fNeur_1.neuron[layer]];
602 for (
Int_t layer=1; layer<fParam_1.layerm; layer++) {
604 void* neuronnode=
NULL;
606 for (
Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
608 stringstream weights(neuronweights);
609 weights >> Ww_ref(fNeur_1.ww, layer+1, neuron+1);
610 for (
Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
611 weights >> W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
618 stringstream t(temp);
619 for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
620 t >> fDel_1.temp[layer];
622 fNlayers = fParam_1.layerm;
631 o <<
"Number of vars " << fParam_1.nvar << std::endl;
632 o <<
"Output nodes " << fParam_1.lclass << std::endl;
635 for (
Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
636 o <<
"Var " << ivar <<
" [" << fVarn_1.xmin[ivar] <<
" - " << fVarn_1.xmax[ivar] <<
"]" << std::endl;
639 o <<
"Number of layers " << fParam_1.layerm << std::endl;
641 o <<
"Nodes per layer ";
642 for (
Int_t layer=0; layer<fParam_1.layerm; layer++)
644 o << fNeur_1.neuron[layer] <<
" ";
648 for (
Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
650 Int_t nq = fNeur_1.neuron[layer]/10;
651 Int_t nr = fNeur_1.neuron[layer] - nq*10;
657 for (
Int_t k=1; k<=kk; k++) {
658 Int_t jmin = 10*k - 9;
661 if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
662 for (j=jmin; j<=jmax; j++) {
665 o << Ww_ref(fNeur_1.ww, layer+1, j) <<
" ";
670 for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
671 for (j=jmin; j<=jmax; j++) {
673 o << W_ref(fNeur_1.w, layer+1, j, i) <<
" ";
682 for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
683 o <<
"Del.temp in layer " << layer <<
" : " << fDel_1.temp[layer] << std::endl;
696 fout <<
" // not implemented for class: \"" << className <<
"\"" << std::endl;
697 fout <<
"};" << std::endl;
void Train(void)
training of the Clement-Ferrand NN classifier
void DeclareOptions()
define the options (their key words) that can be set in the option string know options: NCycles=xx :t...
MsgLogger & Endl(MsgLogger &ml)
void NN_ava(Double_t *)
auxiliary functions
void ReadWeightsFromXML(void *wghtnode)
read weights from xml file
Int_t GetClass(Int_t ivar) const
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Int_t DataInterface(Double_t *, Double_t *, Int_t *, Int_t *, Int_t *, Int_t *, Double_t *, Int_t *, Int_t *)
data interface function
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
virtual ~MethodCFMlpANN(void)
destructor
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t)
CFMlpANN can handle classification with 2 classes.
if(pyself &&pyself!=Py_None)
std::vector< std::vector< double > > Data
TClass * fClass
pointer to the foreign object
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
returns CFMlpANN output (normalised within [0,1])
MethodCFMlpANN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption="3000:N-1:N-2", TDirectory *theTargetDir=0)
standard constructor option string: "n_training_cycles:n_hidden_layers" default is: n_training_cycles...
void MakeClassSpecificHeader(std::ostream &, const TString &="") const
write specific classifier response for header
void AddWeightsXMLTo(void *parent) const
write weights to xml file
Double_t EvalANN(std::vector< Double_t > &, Bool_t &isOK)
evaluates NN value as function of input variables
void GetHelpMessage() const
get help message text
void ReadWeightsFromStream(std::istream &istr)
read back the weight from the training from file (stream)
TString & Remove(Ssiz_t pos)
Double_t NN_fonc(Int_t, Double_t) const
activation function
Describe directory structure in memory.
TMatrixT< Float_t > TMatrix
void Init(void)
default initialisation called by all constructors
void ProcessOptions()
decode the options in the option string
void SetLogger(MsgLogger *l)
ClassImp(TMVA::MethodCFMlpANN) namespace TMVA
#define REGISTER_METHOD(CLASS)
for example
Abstract ClassifierFactory template that handles arbitrary types.
void MakeClassSpecific(std::ostream &, const TString &) const
void PrintWeights(std::ostream &o) const
write the weights of the neural net
static MethodCFMlpANN * This(void)
static pointer to this object (required for external functions
Ssiz_t First(char c) const
Find first occurrence of a character c.
Double_t GetData(Int_t isel, Int_t ivar) const