Logo ROOT   6.14/05
Reference Guide
MethodCFMlpANN.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate Data analysis *
6  * Package: TMVA *
7  * Class : TMVA::MethodCFMlpANN *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Description: *
11  * Implementation (see header for description) *
12  * *
13  * Authors (alphabetical): *
14  * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
15  * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France *
16  * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany *
17  * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada *
18  * *
19  * Copyright (c) 2005: *
20  * CERN, Switzerland *
21  * U. of Victoria, Canada *
22  * MPI-K Heidelberg, Germany *
23  * LAPP, Annecy, France *
24  * *
25  * Redistribution and use in source and binary forms, with or without *
26  * modification, are permitted according to the terms listed in LICENSE *
27  * (http://tmva.sourceforge.net/LICENSE) *
28  **********************************************************************************/
29 
30 /*! \class TMVA::MethodCFMlpANN
31 \ingroup TMVA
32 
33 Interface to Clermond-Ferrand artificial neural network
34 
35 
36 The CFMlpANN belong to the class of Multilayer Perceptrons (MLP), which are
37 feed-forward networks according to the following propagation schema:
38 
39 \image html tmva_mlp.png Schema for artificial neural network.
40 
41 The input layer contains as many neurons as input variables used in the MVA.
42 The output layer contains two neurons for the signal and background
43 event classes. In between the input and output layers are a variable number
44 of <i>k</i> hidden layers with arbitrary numbers of neurons. (While the
45 structure of the input and output layers is determined by the problem, the
46 hidden layers can be configured by the user through the option string
47 of the method booking.)
48 
49 As indicated in the sketch, all neuron inputs to a layer are linear
50 combinations of the neuron output of the previous layer. The transfer
51 from input to output within a neuron is performed by means of an "activation
52 function". In general, the activation function of a neuron can be
53 zero (deactivated), one (linear), or non-linear. The above example uses
54 a sigmoid activation function. The transfer function of the output layer
55 is usually linear. As a consequence: an ANN without hidden layer should
56 give identical discrimination power as a linear discriminant analysis (Fisher).
57 In case of one hidden layer, the ANN computes a linear combination of
58 sigmoid.
59 
60 The learning method used by the CFMlpANN is only stochastic.
61 */
62 
63 
64 #include "TMVA/MethodCFMlpANN.h"
65 
66 #include "TMVA/ClassifierFactory.h"
67 #include "TMVA/Configurable.h"
68 #include "TMVA/DataSet.h"
69 #include "TMVA/DataSetInfo.h"
70 #include "TMVA/IMethod.h"
71 #include "TMVA/MethodBase.h"
73 #include "TMVA/MsgLogger.h"
74 #include "TMVA/Tools.h"
75 #include "TMVA/Types.h"
76 
77 #include "TMatrix.h"
78 #include "TObjString.h"
79 #include "Riostream.h"
80 #include "TMath.h"
81 
82 #include <cstdlib>
83 #include <iostream>
84 #include <string>
85 
86 
87 
88 REGISTER_METHOD(CFMlpANN)
89 
90 using std::stringstream;
91 using std::make_pair;
92 using std::atoi;
93 
95 
96 
97 
98 ////////////////////////////////////////////////////////////////////////////////
99 /// standard constructor
100 ///
101 /// option string: "n_training_cycles:n_hidden_layers"
102 ///
103 /// default is: n_training_cycles = 5000, n_layers = 4
104 ///
105 /// * note that the number of hidden layers in the NN is:
106 /// n_hidden_layers = n_layers - 2
107 ///
108 /// * since there is one input and one output layer. The number of
109 /// nodes (neurons) is predefined to be:
110 ///
111 /// n_nodes[i] = nvars + 1 - i (where i=1..n_layers)
112 ///
113 /// with nvars being the number of variables used in the NN.
114 ///
115 /// Hence, the default case is:
116 ///
117 /// n_neurons(layer 1 (input)) : nvars
118 /// n_neurons(layer 2 (hidden)): nvars-1
119 /// n_neurons(layer 3 (hidden)): nvars-1
120 /// n_neurons(layer 4 (out)) : 2
121 ///
122 /// This artificial neural network usually needs a relatively large
123 /// number of cycles to converge (8000 and more). Overtraining can
124 /// be efficiently tested by comparing the signal and background
125 /// output of the NN for the events that were used for training and
126 /// an independent data sample (with equal properties). If the separation
127 /// performance is significantly better for the training sample, the
128 /// NN interprets statistical effects, and is hence overtrained. In
129 /// this case, the number of cycles should be reduced, or the size
130 /// of the training sample increased.
131 
133  const TString& methodTitle,
134  DataSetInfo& theData,
135  const TString& theOption ) :
136  TMVA::MethodBase( jobName, Types::kCFMlpANN, methodTitle, theData, theOption),
137  fData(0),
138  fClass(0),
139  fNlayers(0),
140  fNcycles(0),
141  fNodes(0),
142  fYNN(0),
143  MethodCFMlpANN_nsel(0)
144 {
146 }
147 
148 ////////////////////////////////////////////////////////////////////////////////
149 /// constructor from weight file
150 
152  const TString& theWeightFile):
153  TMVA::MethodBase( Types::kCFMlpANN, theData, theWeightFile),
154  fData(0),
155  fClass(0),
156  fNlayers(0),
157  fNcycles(0),
158  fNodes(0),
159  fYNN(0),
161 {
162 }
163 
164 ////////////////////////////////////////////////////////////////////////////////
165 /// CFMlpANN can handle classification with 2 classes
166 
168 {
169  if (type == Types::kClassification && numberClasses == 2) return kTRUE;
170  return kFALSE;
171 }
172 
173 ////////////////////////////////////////////////////////////////////////////////
174 /// define the options (their key words) that can be set in the option string
175 /// know options: NCycles=xx :the number of training cycles
176 /// HiddenLayser="N-1,N-2" :the specification of the hidden layers
177 
179 {
180  DeclareOptionRef( fNcycles =3000, "NCycles", "Number of training cycles" );
181  DeclareOptionRef( fLayerSpec="N,N-1", "HiddenLayers", "Specification of hidden layer architecture" );
182 }
183 
184 ////////////////////////////////////////////////////////////////////////////////
185 /// decode the options in the option string
186 
188 {
189  fNodes = new Int_t[20]; // number of nodes per layer (maximum 20 layers)
190  fNlayers = 2;
191  Int_t currentHiddenLayer = 1;
192  TString layerSpec(fLayerSpec);
193  while(layerSpec.Length()>0) {
194  TString sToAdd = "";
195  if (layerSpec.First(',')<0) {
196  sToAdd = layerSpec;
197  layerSpec = "";
198  }
199  else {
200  sToAdd = layerSpec(0,layerSpec.First(','));
201  layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
202  }
203  Int_t nNodes = 0;
204  if (sToAdd.BeginsWith("N") || sToAdd.BeginsWith("n")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
205  nNodes += atoi(sToAdd);
206  fNodes[currentHiddenLayer++] = nNodes;
207  fNlayers++;
208  }
209  fNodes[0] = GetNvar(); // number of input nodes
210  fNodes[fNlayers-1] = 2; // number of output nodes
211 
213  Log() << kFATAL << "Mechanism to ignore events with negative weights in training not yet available for method: "
214  << GetMethodTypeName()
215  << " --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
216  << Endl;
217  }
218 
219  Log() << kINFO << "Use configuration (nodes per layer): in=";
220  for (Int_t i=0; i<fNlayers-1; i++) Log() << kINFO << fNodes[i] << ":";
221  Log() << kINFO << fNodes[fNlayers-1] << "=out" << Endl;
222 
223  // some info
224  Log() << "Use " << fNcycles << " training cycles" << Endl;
225 
226  Int_t nEvtTrain = Data()->GetNTrainingEvents();
227 
228  // note that one variable is type
229  if (nEvtTrain>0) {
230 
231  // Data LUT
232  fData = new TMatrix( nEvtTrain, GetNvar() );
233  fClass = new std::vector<Int_t>( nEvtTrain );
234 
235  // ---- fill LUTs
236 
237  UInt_t ivar;
238  for (Int_t ievt=0; ievt<nEvtTrain; ievt++) {
239  const Event * ev = GetEvent(ievt);
240 
241  // identify signal and background events
242  (*fClass)[ievt] = DataInfo().IsSignal(ev) ? 1 : 2;
243 
244  // use normalized input Data
245  for (ivar=0; ivar<GetNvar(); ivar++) {
246  (*fData)( ievt, ivar ) = ev->GetValue(ivar);
247  }
248  }
249 
250  //Log() << kVERBOSE << Data()->GetNEvtSigTrain() << " Signal and "
251  // << Data()->GetNEvtBkgdTrain() << " background" << " events in trainingTree" << Endl;
252  }
253 
254 }
255 
256 ////////////////////////////////////////////////////////////////////////////////
257 /// default initialisation called by all constructors
258 
260 {
261  // CFMlpANN prefers normalised input variables
262  SetNormalised( kTRUE );
263 
264  // initialize dimensions
266 }
267 
268 ////////////////////////////////////////////////////////////////////////////////
269 /// destructor
270 
272 {
273  delete fData;
274  delete fClass;
275  delete[] fNodes;
276 
277  if (fYNN!=0) {
278  for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
279  delete[] fYNN;
280  fYNN=0;
281  }
282 }
283 
284 ////////////////////////////////////////////////////////////////////////////////
285 /// training of the Clement-Ferrand NN classifier
286 
288 {
289  Double_t dumDat(0);
290  Int_t ntrain(Data()->GetNTrainingEvents());
291  Int_t ntest(0);
292  Int_t nvar(GetNvar());
293  Int_t nlayers(fNlayers);
294  Int_t *nodes = new Int_t[nlayers];
295  Int_t ncycles(fNcycles);
296 
297  for (Int_t i=0; i<nlayers; i++) nodes[i] = fNodes[i]; // full copy of class member
298 
299  if (fYNN != 0) {
300  for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
301  delete[] fYNN;
302  fYNN = 0;
303  }
304  fYNN = new Double_t*[nlayers];
305  for (Int_t layer=0; layer<nlayers; layer++)
306  fYNN[layer] = new Double_t[fNodes[layer]];
307 
308  // please check
309 #ifndef R__WIN32
310  Train_nn( &dumDat, &dumDat, &ntrain, &ntest, &nvar, &nlayers, nodes, &ncycles );
311 #else
312  Log() << kWARNING << "<Train> sorry CFMlpANN does not run on Windows" << Endl;
313 #endif
314 
315  delete [] nodes;
316 
318 }
319 
320 ////////////////////////////////////////////////////////////////////////////////
321 /// returns CFMlpANN output (normalised within [0,1])
322 
324 {
325  Bool_t isOK = kTRUE;
326 
327  const Event* ev = GetEvent();
328 
329  // copy of input variables
330  std::vector<Double_t> inputVec( GetNvar() );
331  for (UInt_t ivar=0; ivar<GetNvar(); ivar++) inputVec[ivar] = ev->GetValue(ivar);
332 
333  Double_t myMVA = EvalANN( inputVec, isOK );
334  if (!isOK) Log() << kFATAL << "EvalANN returns (!isOK) for event " << Endl;
335 
336  // cannot determine error
337  NoErrorCalc(err, errUpper);
338 
339  return myMVA;
340 }
341 
342 ////////////////////////////////////////////////////////////////////////////////
343 /// evaluates NN value as function of input variables
344 
345 Double_t TMVA::MethodCFMlpANN::EvalANN( std::vector<Double_t>& inVar, Bool_t& isOK )
346 {
347  // hardcopy of input variables (necessary because they are update later)
348  Double_t* xeev = new Double_t[GetNvar()];
349  for (UInt_t ivar=0; ivar<GetNvar(); ivar++) xeev[ivar] = inVar[ivar];
350 
351  // ---- now apply the weights: get NN output
352  isOK = kTRUE;
353  for (UInt_t jvar=0; jvar<GetNvar(); jvar++) {
354 
355  if (fVarn_1.xmax[jvar] < xeev[jvar]) xeev[jvar] = fVarn_1.xmax[jvar];
356  if (fVarn_1.xmin[jvar] > xeev[jvar]) xeev[jvar] = fVarn_1.xmin[jvar];
357  if (fVarn_1.xmax[jvar] == fVarn_1.xmin[jvar]) {
358  isOK = kFALSE;
359  xeev[jvar] = 0;
360  }
361  else {
362  xeev[jvar] = xeev[jvar] - ((fVarn_1.xmax[jvar] + fVarn_1.xmin[jvar])/2);
363  xeev[jvar] = xeev[jvar] / ((fVarn_1.xmax[jvar] - fVarn_1.xmin[jvar])/2);
364  }
365  }
366 
367  NN_ava( xeev );
368 
369  Double_t retval = 0.5*(1.0 + fYNN[fParam_1.layerm-1][0]);
370 
371  delete [] xeev;
372 
373  return retval;
374 }
375 
376 ////////////////////////////////////////////////////////////////////////////////
377 /// auxiliary functions
378 
380 {
381  for (Int_t ivar=0; ivar<fNeur_1.neuron[0]; ivar++) fYNN[0][ivar] = xeev[ivar];
382 
383  for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
384  for (Int_t j=1; j<=fNeur_1.neuron[layer]; j++) {
385 
386  Double_t x = Ww_ref(fNeur_1.ww, layer+1,j); // init with the bias layer
387 
388  for (Int_t k=1; k<=fNeur_1.neuron[layer-1]; k++) { // neurons of originating layer
389  x += fYNN[layer-1][k-1]*W_ref(fNeur_1.w, layer+1, j, k);
390  }
391  fYNN[layer][j-1] = NN_fonc( layer, x );
392  }
393  }
394 }
395 
396 ////////////////////////////////////////////////////////////////////////////////
397 /// activation function
398 
400 {
401  Double_t f(0);
402 
403  if (u/fDel_1.temp[i] > 170) f = +1;
404  else if (u/fDel_1.temp[i] < -170) f = -1;
405  else {
406  Double_t yy = TMath::Exp(-u/fDel_1.temp[i]);
407  f = (1 - yy)/(1 + yy);
408  }
409 
410  return f;
411 }
412 
413 ////////////////////////////////////////////////////////////////////////////////
414 /// read back the weight from the training from file (stream)
415 
417 {
418  TString var;
419 
420  // read number of variables and classes
421  UInt_t nva(0), lclass(0);
422  istr >> nva >> lclass;
423 
424  if (GetNvar() != nva) // wrong file
425  Log() << kFATAL << "<ReadWeightsFromFile> mismatch in number of variables" << Endl;
426 
427  // number of output classes must be 2
428  if (lclass != 2) // wrong file
429  Log() << kFATAL << "<ReadWeightsFromFile> mismatch in number of classes" << Endl;
430 
431  // check that we are not at the end of the file
432  if (istr.eof( ))
433  Log() << kFATAL << "<ReadWeightsFromStream> reached EOF prematurely " << Endl;
434 
435  // read extrema of input variables
436  for (UInt_t ivar=0; ivar<GetNvar(); ivar++)
437  istr >> fVarn_1.xmax[ivar] >> fVarn_1.xmin[ivar];
438 
439  // read number of layers (sum of: input + output + hidden)
440  istr >> fParam_1.layerm;
441 
442  if (fYNN != 0) {
443  for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
444  delete[] fYNN;
445  fYNN = 0;
446  }
447  fYNN = new Double_t*[fParam_1.layerm];
448  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
449  // read number of neurons for each layer
450  // coverity[tainted_data_argument]
451  istr >> fNeur_1.neuron[layer];
452  fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
453  }
454 
455  // to read dummy lines
456  const Int_t nchar( 100 );
457  char* dumchar = new char[nchar];
458 
459  // read weights
460  for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
461 
462  Int_t nq = fNeur_1.neuron[layer]/10;
463  Int_t nr = fNeur_1.neuron[layer] - nq*10;
464 
465  Int_t kk(0);
466  if (nr==0) kk = nq;
467  else kk = nq+1;
468 
469  for (Int_t k=1; k<=kk; k++) {
470  Int_t jmin = 10*k - 9;
471  Int_t jmax = 10*k;
472  if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
473  for (Int_t j=jmin; j<=jmax; j++) {
474  istr >> Ww_ref(fNeur_1.ww, layer+1, j);
475  }
476  for (Int_t i=1; i<=fNeur_1.neuron[layer-1]; i++) {
477  for (Int_t j=jmin; j<=jmax; j++) {
478  istr >> W_ref(fNeur_1.w, layer+1, j, i);
479  }
480  }
481  // skip two empty lines
482  istr.getline( dumchar, nchar );
483  }
484  }
485 
486  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
487 
488  // skip 2 empty lines
489  istr.getline( dumchar, nchar );
490  istr.getline( dumchar, nchar );
491 
492  istr >> fDel_1.temp[layer];
493  }
494 
495  // sanity check
496  if ((Int_t)GetNvar() != fNeur_1.neuron[0]) {
497  Log() << kFATAL << "<ReadWeightsFromFile> mismatch in zeroth layer:"
498  << GetNvar() << " " << fNeur_1.neuron[0] << Endl;
499  }
500 
501  fNlayers = fParam_1.layerm;
502  delete[] dumchar;
503 }
504 
505 ////////////////////////////////////////////////////////////////////////////////
506 /// data interface function
507 
509  Int_t* /* icode*/, Int_t* /*flag*/,
510  Int_t* /*nalire*/, Int_t* nvar,
511  Double_t* xpg, Int_t* iclass, Int_t* ikend )
512 {
513  // icode and ikend are dummies needed to match f2c mlpl3 functions
514  *ikend = 0;
515 
516 
517  // sanity checks
518  if (0 == xpg) {
519  Log() << kFATAL << "ERROR in MethodCFMlpANN_DataInterface zero pointer xpg" << Endl;
520  }
521  if (*nvar != (Int_t)this->GetNvar()) {
522  Log() << kFATAL << "ERROR in MethodCFMlpANN_DataInterface mismatch in num of variables: "
523  << *nvar << " " << this->GetNvar() << Endl;
524  }
525 
526  // fill variables
527  *iclass = (int)this->GetClass( MethodCFMlpANN_nsel );
528  for (UInt_t ivar=0; ivar<this->GetNvar(); ivar++)
529  xpg[ivar] = (double)this->GetData( MethodCFMlpANN_nsel, ivar );
530 
532 
533  return 0;
534 }
535 
536 ////////////////////////////////////////////////////////////////////////////////
537 /// write weights to xml file
538 
539 void TMVA::MethodCFMlpANN::AddWeightsXMLTo( void* parent ) const
540 {
541  void *wght = gTools().AddChild(parent, "Weights");
542  gTools().AddAttr(wght,"NVars",fParam_1.nvar);
543  gTools().AddAttr(wght,"NClasses",fParam_1.lclass);
544  gTools().AddAttr(wght,"NLayers",fParam_1.layerm);
545  void* minmaxnode = gTools().AddChild(wght, "VarMinMax");
546  stringstream s;
547  s.precision( 16 );
548  for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
549  s << std::scientific << fVarn_1.xmin[ivar] << " " << fVarn_1.xmax[ivar] << " ";
550  gTools().AddRawLine( minmaxnode, s.str().c_str() );
551  void* neurons = gTools().AddChild(wght, "NNeurons");
552  stringstream n;
553  n.precision( 16 );
554  for (Int_t layer=0; layer<fParam_1.layerm; layer++)
555  n << std::scientific << fNeur_1.neuron[layer] << " ";
556  gTools().AddRawLine( neurons, n.str().c_str() );
557  for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
558  void* layernode = gTools().AddChild(wght, "Layer"+gTools().StringFromInt(layer));
559  gTools().AddAttr(layernode,"NNeurons",fNeur_1.neuron[layer]);
560  void* neuronnode=NULL;
561  for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
562  neuronnode = gTools().AddChild(layernode,"Neuron"+gTools().StringFromInt(neuron));
563  stringstream weights;
564  weights.precision( 16 );
565  weights << std::scientific << Ww_ref(fNeur_1.ww, layer+1, neuron+1);
566  for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
567  weights << " " << std::scientific << W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
568  }
569  gTools().AddRawLine( neuronnode, weights.str().c_str() );
570  }
571  }
572  void* tempnode = gTools().AddChild(wght, "LayerTemp");
573  stringstream temp;
574  temp.precision( 16 );
575  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
576  temp << std::scientific << fDel_1.temp[layer] << " ";
577  }
578  gTools().AddRawLine(tempnode, temp.str().c_str() );
579 }
580 ////////////////////////////////////////////////////////////////////////////////
581 /// read weights from xml file
582 
584 {
585  gTools().ReadAttr( wghtnode, "NLayers",fParam_1.layerm );
586  void* minmaxnode = gTools().GetChild(wghtnode);
587  const char* minmaxcontent = gTools().GetContent(minmaxnode);
588  stringstream content(minmaxcontent);
589  for (UInt_t ivar=0; ivar<GetNvar(); ivar++)
590  content >> fVarn_1.xmin[ivar] >> fVarn_1.xmax[ivar];
591  if (fYNN != 0) {
592  for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
593  delete[] fYNN;
594  fYNN = 0;
595  }
596  fYNN = new Double_t*[fParam_1.layerm];
597  void *layernode=gTools().GetNextChild(minmaxnode);
598  const char* neuronscontent = gTools().GetContent(layernode);
599  stringstream ncontent(neuronscontent);
600  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
601  // read number of neurons for each layer;
602  // coverity[tainted_data_argument]
603  ncontent >> fNeur_1.neuron[layer];
604  fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
605  }
606  for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
607  layernode=gTools().GetNextChild(layernode);
608  void* neuronnode=NULL;
609  neuronnode = gTools().GetChild(layernode);
610  for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
611  const char* neuronweights = gTools().GetContent(neuronnode);
612  stringstream weights(neuronweights);
613  weights >> Ww_ref(fNeur_1.ww, layer+1, neuron+1);
614  for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
615  weights >> W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
616  }
617  neuronnode=gTools().GetNextChild(neuronnode);
618  }
619  }
620  void* tempnode=gTools().GetNextChild(layernode);
621  const char* temp = gTools().GetContent(tempnode);
622  stringstream t(temp);
623  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
624  t >> fDel_1.temp[layer];
625  }
626  fNlayers = fParam_1.layerm;
627 }
628 
629 ////////////////////////////////////////////////////////////////////////////////
630 /// write the weights of the neural net
631 
632 void TMVA::MethodCFMlpANN::PrintWeights( std::ostream & o ) const
633 {
634  // write number of variables and classes
635  o << "Number of vars " << fParam_1.nvar << std::endl;
636  o << "Output nodes " << fParam_1.lclass << std::endl;
637 
638  // write extrema of input variables
639  for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
640  o << "Var " << ivar << " [" << fVarn_1.xmin[ivar] << " - " << fVarn_1.xmax[ivar] << "]" << std::endl;
641 
642  // write number of layers (sum of: input + output + hidden)
643  o << "Number of layers " << fParam_1.layerm << std::endl;
644 
645  o << "Nodes per layer ";
646  for (Int_t layer=0; layer<fParam_1.layerm; layer++)
647  // write number of neurons for each layer
648  o << fNeur_1.neuron[layer] << " ";
649  o << std::endl;
650 
651  // write weights
652  for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
653 
654  Int_t nq = fNeur_1.neuron[layer]/10;
655  Int_t nr = fNeur_1.neuron[layer] - nq*10;
656 
657  Int_t kk(0);
658  if (nr==0) kk = nq;
659  else kk = nq+1;
660 
661  for (Int_t k=1; k<=kk; k++) {
662  Int_t jmin = 10*k - 9;
663  Int_t jmax = 10*k;
664  Int_t i, j;
665  if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
666  for (j=jmin; j<=jmax; j++) {
667 
668  //o << fNeur_1.ww[j*max_nLayers_ + layer - 6] << " ";
669  o << Ww_ref(fNeur_1.ww, layer+1, j) << " ";
670 
671  }
672  o << std::endl;
673  //for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
674  for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
675  for (j=jmin; j<=jmax; j++) {
676  // o << fNeur_1.w[(i*max_nNodes_ + j)*max_nLayers_ + layer - 186] << " ";
677  o << W_ref(fNeur_1.w, layer+1, j, i) << " ";
678  }
679  o << std::endl;
680  }
681 
682  // skip two empty lines
683  o << std::endl;
684  }
685  }
686  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
687  o << "Del.temp in layer " << layer << " : " << fDel_1.temp[layer] << std::endl;
688  }
689 }
690 
691 ////////////////////////////////////////////////////////////////////////////////
692 
693 void TMVA::MethodCFMlpANN::MakeClassSpecific( std::ostream& fout, const TString& className ) const
694 {
695  // write specific classifier response
696  fout << " // not implemented for class: \"" << className << "\"" << std::endl;
697  fout << "};" << std::endl;
698 }
699 
700 ////////////////////////////////////////////////////////////////////////////////
701 /// write specific classifier response for header
702 
703 void TMVA::MethodCFMlpANN::MakeClassSpecificHeader( std::ostream& , const TString& ) const
704 {
705 }
706 
707 ////////////////////////////////////////////////////////////////////////////////
708 /// get help message text
709 ///
710 /// typical length of text line:
711 /// "|--------------------------------------------------------------|"
712 
714 {
715  Log() << Endl;
716  Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
717  Log() << Endl;
718  Log() << "<None>" << Endl;
719  Log() << Endl;
720  Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
721  Log() << Endl;
722  Log() << "<None>" << Endl;
723  Log() << Endl;
724  Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
725  Log() << Endl;
726  Log() << "<None>" << Endl;
727 }
for(Int_t i=0;i< n;i++)
Definition: legend1.C:18
Double_t GetData(Int_t isel, Int_t ivar) const
void Train(void)
training of the Clement-Ferrand NN classifier
struct TMVA::MethodCFMlpANN_Utils::@152 fParam_1
void DeclareOptions()
define the options (their key words) that can be set in the option string know options: NCycles=xx :t...
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:158
Double_t temp[max_nLayers_]
Singleton class for Global types used by TMVA.
Definition: Types.h:73
void NN_ava(Double_t *)
auxiliary functions
void MakeClassSpecificHeader(std::ostream &, const TString &="") const
write specific classifier response for header
Int_t GetClass(Int_t ivar) const
void ReadWeightsFromXML(void *wghtnode)
read weights from xml file
void MakeClassSpecific(std::ostream &, const TString &) const
UInt_t GetNvar() const
Definition: MethodBase.h:335
MsgLogger & Log() const
Definition: Configurable.h:122
OptionBase * DeclareOptionRef(T &ref, const TString &name, const TString &desc="")
EAnalysisType
Definition: Types.h:127
Virtual base Class for all MVA method.
Definition: MethodBase.h:109
Basic string class.
Definition: TString.h:131
#define f(i)
Definition: RSha256.hxx:104
int Int_t
Definition: RtypesCore.h:41
bool Bool_t
Definition: RtypesCore.h:59
Double_t W_ref(const Double_t wNN[], Int_t a_1, Int_t a_2, Int_t a_3) const
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
add attribute to xml
Definition: Tools.h:353
Int_t DataInterface(Double_t *, Double_t *, Int_t *, Int_t *, Int_t *, Int_t *, Double_t *, Int_t *, Int_t *)
data interface function
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1136
virtual ~MethodCFMlpANN(void)
destructor
const Event * GetEvent() const
Definition: MethodBase.h:740
std::vector< Int_t > * fClass
DataSet * Data() const
Definition: MethodBase.h:400
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t)
CFMlpANN can handle classification with 2 classes.
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1162
DataSetInfo & DataInfo() const
Definition: MethodBase.h:401
Ssiz_t First(char c) const
Find first occurrence of a character c.
Definition: TString.cxx:487
Class that contains all the data information.
Definition: DataSetInfo.h:60
void PrintWeights(std::ostream &o) const
write the weights of the neural net
Long64_t GetNTrainingEvents() const
Definition: DataSet.h:79
struct TMVA::MethodCFMlpANN_Utils::@153 fVarn_1
Bool_t AddRawLine(void *node, const char *raw)
XML helpers.
Definition: Tools.cxx:1202
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
returns CFMlpANN output (normalised within [0,1])
Double_t NN_fonc(Int_t, Double_t) const
activation function
void Train_nn(Double_t *tin2, Double_t *tout2, Int_t *ntrain, Int_t *ntest, Int_t *nvar2, Int_t *nlayer, Int_t *nodes, Int_t *ncycle)
Double_t Ww_ref(const Double_t wwNN[], Int_t a_1, Int_t a_2) const
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition: TString.h:610
struct TMVA::MethodCFMlpANN_Utils::@154 fNeur_1
unsigned int UInt_t
Definition: RtypesCore.h:42
Double_t x[max_nLayers_ *max_nNodes_]
Ssiz_t Length() const
Definition: TString.h:405
void GetHelpMessage() const
get help message text
const char * GetContent(void *node)
XML helpers.
Definition: Tools.cxx:1186
void ReadAttr(void *node, const char *, T &value)
read attribute from xml
Definition: Tools.h:335
Tools & gTools()
Double_t EvalANN(std::vector< Double_t > &, Bool_t &isOK)
evaluates NN value as function of input variables
void ReadWeightsFromStream(std::istream &istr)
read back the weight from the training from file (stream)
void SetNormalised(Bool_t norm)
Definition: MethodBase.h:486
const Bool_t kFALSE
Definition: RtypesCore.h:88
Float_t GetValue(UInt_t ivar) const
return value of i&#39;th variable
Definition: Event.cxx:237
TString & Remove(Ssiz_t pos)
Definition: TString.h:668
Bool_t IgnoreEventsWithNegWeightsInTraining() const
Definition: MethodBase.h:675
Double_t Exp(Double_t x)
Definition: TMath.h:726
#define ClassImp(name)
Definition: Rtypes.h:359
double Double_t
Definition: RtypesCore.h:55
int type
Definition: TGX11.cxx:120
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1174
void AddWeightsXMLTo(void *parent) const
write weights to xml file
static constexpr double s
TMatrixT< Float_t > TMatrix
Definition: TMatrix.h:24
void Init(void)
default initialisation called by all constructors
void ProcessOptions()
decode the options in the option string
void ExitFromTraining()
Definition: MethodBase.h:453
const TString & Color(const TString &)
human readable color strings
Definition: Tools.cxx:840
#define REGISTER_METHOD(CLASS)
for example
Abstract ClassifierFactory template that handles arbitrary types.
Interface to Clermond-Ferrand artificial neural network.
TString GetMethodTypeName() const
Definition: MethodBase.h:323
Bool_t IsSignal(const Event *ev) const
MethodCFMlpANN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption="3000:N-1:N-2")
standard constructor
const Bool_t kTRUE
Definition: RtypesCore.h:87
const Int_t n
Definition: legend1.C:16
struct TMVA::MethodCFMlpANN_Utils::@155 fDel_1
void NoErrorCalc(Double_t *const err, Double_t *const errUpper)
Definition: MethodBase.cxx:841