ROOT  6.07/01
Reference Guide
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Groups Pages
MethodCFMlpANN.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate Data analysis *
6  * Package: TMVA *
7  * Class : TMVA::MethodCFMlpANN *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Description: *
11  * Implementation (see header for description) *
12  * *
13  * Authors (alphabetical): *
14  * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
15  * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France *
16  * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany *
17  * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada *
18  * *
19  * Copyright (c) 2005: *
20  * CERN, Switzerland *
21  * U. of Victoria, Canada *
22  * MPI-K Heidelberg, Germany *
23  * LAPP, Annecy, France *
24  * *
25  * Redistribution and use in source and binary forms, with or without *
26  * modification, are permitted according to the terms listed in LICENSE *
27  * (http://tmva.sourceforge.net/LICENSE) *
28  **********************************************************************************/
29 
30 //_______________________________________________________________________
31 //
32 // Begin_Html
33 /*
34  Interface to Clermond-Ferrand artificial neural network
35 
36  <p>
37  The CFMlpANN belong to the class of Multilayer Perceptrons (MLP), which are
38  feed-forward networks according to the following propagation schema:<br>
39  <center>
40  <img vspace=10 src="gif/tmva_mlp.gif" align="bottom" alt="Schema for artificial neural network">
41  </center>
42  The input layer contains as many neurons as input variables used in the MVA.
43  The output layer contains two neurons for the signal and background
44  event classes. In between the input and output layers are a variable number
45  of <i>k</i> hidden layers with arbitrary numbers of neurons. (While the
46  structure of the input and output layers is determined by the problem, the
47  hidden layers can be configured by the user through the option string
48  of the method booking.) <br>
49 
50  As indicated in the sketch, all neuron inputs to a layer are linear
51  combinations of the neuron output of the previous layer. The transfer
52  from input to output within a neuron is performed by means of an "activation
53  function". In general, the activation function of a neuron can be
54  zero (deactivated), one (linear), or non-linear. The above example uses
55  a sigmoid activation function. The transfer function of the output layer
56  is usually linear. As a consequence: an ANN without hidden layer should
57  give identical discrimination power as a linear discriminant analysis (Fisher).
58  In case of one hidden layer, the ANN computes a linear combination of
59  sigmoid. <br>
60 
61  The learning method used by the CFMlpANN is only stochastic.
62 */
63 // End_Html
64 //_______________________________________________________________________
65 
66 #include <string>
67 #include <cstdlib>
68 #include <iostream>
69 
70 #include "TMatrix.h"
71 #include "TObjString.h"
72 #include "Riostream.h"
73 #include "TMath.h"
74 
75 #include "TMVA/ClassifierFactory.h"
76 #include "TMVA/DataSet.h"
77 #include "TMVA/DataSetInfo.h"
78 #include "TMVA/MethodBase.h"
79 #include "TMVA/MethodCFMlpANN.h"
81 #include "TMVA/MsgLogger.h"
82 #include "TMVA/Tools.h"
83 #include "TMVA/Types.h"
84 
85 REGISTER_METHOD(CFMlpANN)
86 
87 using std::stringstream;
88 using std::make_pair;
89 using std::atoi;
90 
91 ClassImp(TMVA::MethodCFMlpANN)
92 
93 // initialization of global variable
94 namespace TMVA {
95  Int_t MethodCFMlpANN_nsel = 0;
96 }
97 
98 TMVA::MethodCFMlpANN* TMVA::MethodCFMlpANN::fgThis = 0;
99 
100 ////////////////////////////////////////////////////////////////////////////////
101 /// standard constructor
102 /// option string: "n_training_cycles:n_hidden_layers"
103 /// default is: n_training_cycles = 5000, n_layers = 4
104 ///
105 /// * note that the number of hidden layers in the NN is:
106 /// n_hidden_layers = n_layers - 2
107 ///
108 /// * since there is one input and one output layer. The number of
109 /// nodes (neurons) is predefined to be:
110 /// n_nodes[i] = nvars + 1 - i (where i=1..n_layers)
111 ///
112 /// with nvars being the number of variables used in the NN.
113 ///
114 /// Hence, the default case is: n_neurons(layer 1 (input)) : nvars
115 /// n_neurons(layer 2 (hidden)): nvars-1
116 /// n_neurons(layer 3 (hidden)): nvars-1
117 /// n_neurons(layer 4 (out)) : 2
118 ///
119 /// This artificial neural network usually needs a relatively large
120 /// number of cycles to converge (8000 and more). Overtraining can
121 /// be efficienctly tested by comparing the signal and background
122 /// output of the NN for the events that were used for training and
123 /// an independent data sample (with equal properties). If the separation
124 /// performance is significantly better for the training sample, the
125 /// NN interprets statistical effects, and is hence overtrained. In
126 /// this case, the number of cycles should be reduced, or the size
127 /// of the training sample increased.
128 
130  const TString& methodTitle,
131  DataSetInfo& theData,
132  const TString& theOption,
133  TDirectory* theTargetDir ) :
134  TMVA::MethodBase( jobName, Types::kCFMlpANN, methodTitle, theData, theOption, theTargetDir ),
135  fData(0),
136  fClass(0),
137  fNlayers(0),
138  fNcycles(0),
139  fNodes(0),
140  fYNN(0)
141 {
143 
144 }
145 
146 ////////////////////////////////////////////////////////////////////////////////
147 /// constructor from weight file
148 
150  const TString& theWeightFile,
151  TDirectory* theTargetDir ):
152  TMVA::MethodBase( Types::kCFMlpANN, theData, theWeightFile, theTargetDir ),
153  fData(0),
154  fClass(0),
155  fNlayers(0),
156  fNcycles(0),
157  fNodes(0),
158  fYNN(0)
159 {
160 }
161 
162 ////////////////////////////////////////////////////////////////////////////////
163 /// CFMlpANN can handle classification with 2 classes
164 
166 {
167  if (type == Types::kClassification && numberClasses == 2) return kTRUE;
168  return kFALSE;
169 }
170 
171 ////////////////////////////////////////////////////////////////////////////////
172 /// define the options (their key words) that can be set in the option string
173 /// know options: NCycles=xx :the number of training cycles
174 /// HiddenLayser="N-1,N-2" :the specification of the hidden layers
175 
177 {
178  DeclareOptionRef( fNcycles =3000, "NCycles", "Number of training cycles" );
179  DeclareOptionRef( fLayerSpec="N,N-1", "HiddenLayers", "Specification of hidden layer architecture" );
180 }
181 
182 ////////////////////////////////////////////////////////////////////////////////
183 /// decode the options in the option string
184 
186 {
187  fNodes = new Int_t[20]; // number of nodes per layer (maximum 20 layers)
188  fNlayers = 2;
189  Int_t currentHiddenLayer = 1;
190  TString layerSpec(fLayerSpec);
191  while(layerSpec.Length()>0) {
192  TString sToAdd = "";
193  if (layerSpec.First(',')<0) {
194  sToAdd = layerSpec;
195  layerSpec = "";
196  }
197  else {
198  sToAdd = layerSpec(0,layerSpec.First(','));
199  layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
200  }
201  Int_t nNodes = 0;
202  if (sToAdd.BeginsWith("N") || sToAdd.BeginsWith("n")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
203  nNodes += atoi(sToAdd);
204  fNodes[currentHiddenLayer++] = nNodes;
205  fNlayers++;
206  }
207  fNodes[0] = GetNvar(); // number of input nodes
208  fNodes[fNlayers-1] = 2; // number of output nodes
209 
210  if (IgnoreEventsWithNegWeightsInTraining()) {
211  Log() << kFATAL << "Mechanism to ignore events with negative weights in training not yet available for method: "
212  << GetMethodTypeName()
213  << " --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
214  << Endl;
215  }
216 
217  Log() << kINFO << "Use configuration (nodes per layer): in=";
218  for (Int_t i=0; i<fNlayers-1; i++) Log() << kINFO << fNodes[i] << ":";
219  Log() << kINFO << fNodes[fNlayers-1] << "=out" << Endl;
220 
221  // some info
222  Log() << "Use " << fNcycles << " training cycles" << Endl;
223 
224  Int_t nEvtTrain = Data()->GetNTrainingEvents();
225 
226  // note that one variable is type
227  if (nEvtTrain>0) {
228 
229  // Data LUT
230  fData = new TMatrix( nEvtTrain, GetNvar() );
231  fClass = new std::vector<Int_t>( nEvtTrain );
232 
233  // ---- fill LUTs
234 
235  UInt_t ivar;
236  for (Int_t ievt=0; ievt<nEvtTrain; ievt++) {
237  const Event * ev = GetEvent(ievt);
238 
239  // identify signal and background events
240  (*fClass)[ievt] = DataInfo().IsSignal(ev) ? 1 : 2;
241 
242  // use normalized input Data
243  for (ivar=0; ivar<GetNvar(); ivar++) {
244  (*fData)( ievt, ivar ) = ev->GetValue(ivar);
245  }
246  }
247 
248  //Log() << kVERBOSE << Data()->GetNEvtSigTrain() << " Signal and "
249  // << Data()->GetNEvtBkgdTrain() << " background" << " events in trainingTree" << Endl;
250  }
251 
252 }
253 
254 ////////////////////////////////////////////////////////////////////////////////
255 /// default initialisation called by all constructors
256 
258 {
259  // CFMlpANN prefers normalised input variables
260  SetNormalised( kTRUE );
261 
262  // initialize all pointers
263  fgThis = this;
264 
265  // initialize dimensions
266  TMVA::MethodCFMlpANN_nsel = 0;
267 }
268 
269 ////////////////////////////////////////////////////////////////////////////////
270 /// destructor
271 
273 {
274  delete fData;
275  delete fClass;
276  delete[] fNodes;
277 
278  if (fYNN!=0) {
279  for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
280  delete[] fYNN;
281  fYNN=0;
282  }
283 }
284 
285 ////////////////////////////////////////////////////////////////////////////////
286 /// training of the Clement-Ferrand NN classifier
287 
289 {
290  Double_t dumDat(0);
291  Int_t ntrain(Data()->GetNTrainingEvents());
292  Int_t ntest(0);
293  Int_t nvar(GetNvar());
294  Int_t nlayers(fNlayers);
295  Int_t *nodes = new Int_t[nlayers];
296  Int_t ncycles(fNcycles);
297 
298  for (Int_t i=0; i<nlayers; i++) nodes[i] = fNodes[i]; // full copy of class member
299 
300  if (fYNN != 0) {
301  for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
302  delete[] fYNN;
303  fYNN = 0;
304  }
305  fYNN = new Double_t*[nlayers];
306  for (Int_t layer=0; layer<nlayers; layer++)
307  fYNN[layer] = new Double_t[fNodes[layer]];
308 
309  // please check
310 #ifndef R__WIN32
311  Train_nn( &dumDat, &dumDat, &ntrain, &ntest, &nvar, &nlayers, nodes, &ncycles );
312 #else
313  Log() << kWARNING << "<Train> sorry CFMlpANN does not run on Windows" << Endl;
314 #endif
315 
316  delete [] nodes;
317 }
318 
319 ////////////////////////////////////////////////////////////////////////////////
320 /// returns CFMlpANN output (normalised within [0,1])
321 
323 {
324  Bool_t isOK = kTRUE;
325 
326  const Event* ev = GetEvent();
327 
328  // copy of input variables
329  std::vector<Double_t> inputVec( GetNvar() );
330  for (UInt_t ivar=0; ivar<GetNvar(); ivar++) inputVec[ivar] = ev->GetValue(ivar);
331 
332  Double_t myMVA = EvalANN( inputVec, isOK );
333  if (!isOK) Log() << kFATAL << "EvalANN returns (!isOK) for event " << Endl;
334 
335  // cannot determine error
336  NoErrorCalc(err, errUpper);
337 
338  return myMVA;
339 }
340 
341 ////////////////////////////////////////////////////////////////////////////////
342 /// evaluates NN value as function of input variables
343 
344 Double_t TMVA::MethodCFMlpANN::EvalANN( std::vector<Double_t>& inVar, Bool_t& isOK )
345 {
346  // hardcopy of input variables (necessary because they are update later)
347  Double_t* xeev = new Double_t[GetNvar()];
348  for (UInt_t ivar=0; ivar<GetNvar(); ivar++) xeev[ivar] = inVar[ivar];
349 
350  // ---- now apply the weights: get NN output
351  isOK = kTRUE;
352  for (UInt_t jvar=0; jvar<GetNvar(); jvar++) {
353 
354  if (fVarn_1.xmax[jvar] < xeev[jvar]) xeev[jvar] = fVarn_1.xmax[jvar];
355  if (fVarn_1.xmin[jvar] > xeev[jvar]) xeev[jvar] = fVarn_1.xmin[jvar];
356  if (fVarn_1.xmax[jvar] == fVarn_1.xmin[jvar]) {
357  isOK = kFALSE;
358  xeev[jvar] = 0;
359  }
360  else {
361  xeev[jvar] = xeev[jvar] - ((fVarn_1.xmax[jvar] + fVarn_1.xmin[jvar])/2);
362  xeev[jvar] = xeev[jvar] / ((fVarn_1.xmax[jvar] - fVarn_1.xmin[jvar])/2);
363  }
364  }
365 
366  NN_ava( xeev );
367 
368  Double_t retval = 0.5*(1.0 + fYNN[fParam_1.layerm-1][0]);
369 
370  delete [] xeev;
371 
372  return retval;
373 }
374 
375 ////////////////////////////////////////////////////////////////////////////////
376 /// auxiliary functions
377 
379 {
380  for (Int_t ivar=0; ivar<fNeur_1.neuron[0]; ivar++) fYNN[0][ivar] = xeev[ivar];
381 
382  for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
383  for (Int_t j=1; j<=fNeur_1.neuron[layer]; j++) {
384 
385  Double_t x = Ww_ref(fNeur_1.ww, layer+1,j); // init with the bias layer
386 
387  for (Int_t k=1; k<=fNeur_1.neuron[layer-1]; k++) { // neurons of originating layer
388  x += fYNN[layer-1][k-1]*W_ref(fNeur_1.w, layer+1, j, k);
389  }
390  fYNN[layer][j-1] = NN_fonc( layer, x );
391  }
392  }
393 }
394 
395 ////////////////////////////////////////////////////////////////////////////////
396 /// activation function
397 
399 {
400  Double_t f(0);
401 
402  if (u/fDel_1.temp[i] > 170) f = +1;
403  else if (u/fDel_1.temp[i] < -170) f = -1;
404  else {
405  Double_t yy = TMath::Exp(-u/fDel_1.temp[i]);
406  f = (1 - yy)/(1 + yy);
407  }
408 
409  return f;
410 }
411 
412 ////////////////////////////////////////////////////////////////////////////////
413 /// read back the weight from the training from file (stream)
414 
416 {
417  TString var;
418 
419  // read number of variables and classes
420  UInt_t nva(0), lclass(0);
421  istr >> nva >> lclass;
422 
423  if (GetNvar() != nva) // wrong file
424  Log() << kFATAL << "<ReadWeightsFromFile> mismatch in number of variables" << Endl;
425 
426  // number of output classes must be 2
427  if (lclass != 2) // wrong file
428  Log() << kFATAL << "<ReadWeightsFromFile> mismatch in number of classes" << Endl;
429 
430  // check that we are not at the end of the file
431  if (istr.eof( ))
432  Log() << kFATAL << "<ReadWeightsFromStream> reached EOF prematurely " << Endl;
433 
434  // read extrema of input variables
435  for (UInt_t ivar=0; ivar<GetNvar(); ivar++)
436  istr >> fVarn_1.xmax[ivar] >> fVarn_1.xmin[ivar];
437 
438  // read number of layers (sum of: input + output + hidden)
439  istr >> fParam_1.layerm;
440 
441  if (fYNN != 0) {
442  for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
443  delete[] fYNN;
444  fYNN = 0;
445  }
446  fYNN = new Double_t*[fParam_1.layerm];
447  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
448  // read number of neurons for each layer
449  // coverity[tainted_data_argument]
450  istr >> fNeur_1.neuron[layer];
451  fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
452  }
453 
454  // to read dummy lines
455  const Int_t nchar( 100 );
456  char* dumchar = new char[nchar];
457 
458  // read weights
459  for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
460 
461  Int_t nq = fNeur_1.neuron[layer]/10;
462  Int_t nr = fNeur_1.neuron[layer] - nq*10;
463 
464  Int_t kk(0);
465  if (nr==0) kk = nq;
466  else kk = nq+1;
467 
468  for (Int_t k=1; k<=kk; k++) {
469  Int_t jmin = 10*k - 9;
470  Int_t jmax = 10*k;
471  if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
472  for (Int_t j=jmin; j<=jmax; j++) {
473  istr >> Ww_ref(fNeur_1.ww, layer+1, j);
474  }
475  for (Int_t i=1; i<=fNeur_1.neuron[layer-1]; i++) {
476  for (Int_t j=jmin; j<=jmax; j++) {
477  istr >> W_ref(fNeur_1.w, layer+1, j, i);
478  }
479  }
480  // skip two empty lines
481  istr.getline( dumchar, nchar );
482  }
483  }
484 
485  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
486 
487  // skip 2 empty lines
488  istr.getline( dumchar, nchar );
489  istr.getline( dumchar, nchar );
490 
491  istr >> fDel_1.temp[layer];
492  }
493 
494  // sanity check
495  if ((Int_t)GetNvar() != fNeur_1.neuron[0]) {
496  Log() << kFATAL << "<ReadWeightsFromFile> mismatch in zeroth layer:"
497  << GetNvar() << " " << fNeur_1.neuron[0] << Endl;
498  }
499 
500  fNlayers = fParam_1.layerm;
501  delete[] dumchar;
502 }
503 
504 ////////////////////////////////////////////////////////////////////////////////
505 /// data interface function
506 
508  Int_t* /* icode*/, Int_t* /*flag*/,
509  Int_t* /*nalire*/, Int_t* nvar,
510  Double_t* xpg, Int_t* iclass, Int_t* ikend )
511 {
512  // icode and ikend are dummies needed to match f2c mlpl3 functions
513  *ikend = 0;
514 
515  // retrieve pointer to current object (CFMlpANN must be a singleton class!)
517 
518  // sanity checks
519  if (0 == xpg) {
520  Log() << kFATAL << "ERROR in MethodCFMlpANN_DataInterface zero pointer xpg" << Endl;
521  }
522  if (*nvar != (Int_t)opt->GetNvar()) {
523  Log() << kFATAL << "ERROR in MethodCFMlpANN_DataInterface mismatch in num of variables: "
524  << *nvar << " " << opt->GetNvar() << Endl;
525  }
526 
527  // fill variables
528  *iclass = (int)opt->GetClass( TMVA::MethodCFMlpANN_nsel );
529  for (UInt_t ivar=0; ivar<opt->GetNvar(); ivar++)
530  xpg[ivar] = (double)opt->GetData( TMVA::MethodCFMlpANN_nsel, ivar );
531 
532  ++TMVA::MethodCFMlpANN_nsel;
533 
534  return 0;
535 }
536 
537 ////////////////////////////////////////////////////////////////////////////////
538 /// write weights to xml file
539 
540 void TMVA::MethodCFMlpANN::AddWeightsXMLTo( void* parent ) const
541 {
542  void *wght = gTools().AddChild(parent, "Weights");
543  gTools().AddAttr(wght,"NVars",fParam_1.nvar);
544  gTools().AddAttr(wght,"NClasses",fParam_1.lclass);
545  gTools().AddAttr(wght,"NLayers",fParam_1.layerm);
546  void* minmaxnode = gTools().AddChild(wght, "VarMinMax");
547  stringstream s;
548  s.precision( 16 );
549  for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
550  s << std::scientific << fVarn_1.xmin[ivar] << " " << fVarn_1.xmax[ivar] << " ";
551  gTools().AddRawLine( minmaxnode, s.str().c_str() );
552  void* neurons = gTools().AddChild(wght, "NNeurons");
553  stringstream n;
554  n.precision( 16 );
555  for (Int_t layer=0; layer<fParam_1.layerm; layer++)
556  n << std::scientific << fNeur_1.neuron[layer] << " ";
557  gTools().AddRawLine( neurons, n.str().c_str() );
558  for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
559  void* layernode = gTools().AddChild(wght, "Layer"+gTools().StringFromInt(layer));
560  gTools().AddAttr(layernode,"NNeurons",fNeur_1.neuron[layer]);
561  void* neuronnode=NULL;
562  for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
563  neuronnode = gTools().AddChild(layernode,"Neuron"+gTools().StringFromInt(neuron));
564  stringstream weights;
565  weights.precision( 16 );
566  weights << std::scientific << Ww_ref(fNeur_1.ww, layer+1, neuron+1);
567  for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
568  weights << " " << std::scientific << W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
569  }
570  gTools().AddRawLine( neuronnode, weights.str().c_str() );
571  }
572  }
573  void* tempnode = gTools().AddChild(wght, "LayerTemp");
574  stringstream temp;
575  temp.precision( 16 );
576  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
577  temp << std::scientific << fDel_1.temp[layer] << " ";
578  }
579  gTools().AddRawLine(tempnode, temp.str().c_str() );
580 }
581 ////////////////////////////////////////////////////////////////////////////////
582 /// read weights from xml file
583 
585 {
586  gTools().ReadAttr( wghtnode, "NLayers",fParam_1.layerm );
587  void* minmaxnode = gTools().GetChild(wghtnode);
588  const char* minmaxcontent = gTools().GetContent(minmaxnode);
589  stringstream content(minmaxcontent);
590  for (UInt_t ivar=0; ivar<GetNvar(); ivar++)
591  content >> fVarn_1.xmin[ivar] >> fVarn_1.xmax[ivar];
592  if (fYNN != 0) {
593  for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
594  delete[] fYNN;
595  fYNN = 0;
596  }
597  fYNN = new Double_t*[fParam_1.layerm];
598  void *layernode=gTools().GetNextChild(minmaxnode);
599  const char* neuronscontent = gTools().GetContent(layernode);
600  stringstream ncontent(neuronscontent);
601  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
602  // read number of neurons for each layer;
603  // coverity[tainted_data_argument]
604  ncontent >> fNeur_1.neuron[layer];
605  fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
606  }
607  for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
608  layernode=gTools().GetNextChild(layernode);
609  void* neuronnode=NULL;
610  neuronnode = gTools().GetChild(layernode);
611  for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
612  const char* neuronweights = gTools().GetContent(neuronnode);
613  stringstream weights(neuronweights);
614  weights >> Ww_ref(fNeur_1.ww, layer+1, neuron+1);
615  for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
616  weights >> W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
617  }
618  neuronnode=gTools().GetNextChild(neuronnode);
619  }
620  }
621  void* tempnode=gTools().GetNextChild(layernode);
622  const char* temp = gTools().GetContent(tempnode);
623  stringstream t(temp);
624  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
625  t >> fDel_1.temp[layer];
626  }
627  fNlayers = fParam_1.layerm;
628 }
629 
630 ////////////////////////////////////////////////////////////////////////////////
631 /// write the weights of the neural net
632 
633 void TMVA::MethodCFMlpANN::PrintWeights( std::ostream & o ) const
634 {
635  // write number of variables and classes
636  o << "Number of vars " << fParam_1.nvar << std::endl;
637  o << "Output nodes " << fParam_1.lclass << std::endl;
638 
639  // write extrema of input variables
640  for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
641  o << "Var " << ivar << " [" << fVarn_1.xmin[ivar] << " - " << fVarn_1.xmax[ivar] << "]" << std::endl;
642 
643  // write number of layers (sum of: input + output + hidden)
644  o << "Number of layers " << fParam_1.layerm << std::endl;
645 
646  o << "Nodes per layer ";
647  for (Int_t layer=0; layer<fParam_1.layerm; layer++)
648  // write number of neurons for each layer
649  o << fNeur_1.neuron[layer] << " ";
650  o << std::endl;
651 
652  // write weights
653  for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
654 
655  Int_t nq = fNeur_1.neuron[layer]/10;
656  Int_t nr = fNeur_1.neuron[layer] - nq*10;
657 
658  Int_t kk(0);
659  if (nr==0) kk = nq;
660  else kk = nq+1;
661 
662  for (Int_t k=1; k<=kk; k++) {
663  Int_t jmin = 10*k - 9;
664  Int_t jmax = 10*k;
665  Int_t i, j;
666  if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
667  for (j=jmin; j<=jmax; j++) {
668 
669  //o << fNeur_1.ww[j*max_nLayers_ + layer - 6] << " ";
670  o << Ww_ref(fNeur_1.ww, layer+1, j) << " ";
671 
672  }
673  o << std::endl;
674  //for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
675  for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
676  for (j=jmin; j<=jmax; j++) {
677  // o << fNeur_1.w[(i*max_nNodes_ + j)*max_nLayers_ + layer - 186] << " ";
678  o << W_ref(fNeur_1.w, layer+1, j, i) << " ";
679  }
680  o << std::endl;
681  }
682 
683  // skip two empty lines
684  o << std::endl;
685  }
686  }
687  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
688  o << "Del.temp in layer " << layer << " : " << fDel_1.temp[layer] << std::endl;
689  }
690 }
691 ////////////////////////////////////////////////////////////////////////////////
692 /// static pointer to this object (required for external functions
693 
695 {
696  return fgThis;
697 }
698 void TMVA::MethodCFMlpANN::MakeClassSpecific( std::ostream& fout, const TString& className ) const
699 {
700  // write specific classifier response
701  fout << " // not implemented for class: \"" << className << "\"" << std::endl;
702  fout << "};" << std::endl;
703 }
704 
705 ////////////////////////////////////////////////////////////////////////////////
706 /// write specific classifier response for header
707 
708 void TMVA::MethodCFMlpANN::MakeClassSpecificHeader( std::ostream& , const TString& ) const
709 {
710 }
711 
712 ////////////////////////////////////////////////////////////////////////////////
713 /// get help message text
714 ///
715 /// typical length of text line:
716 /// "|--------------------------------------------------------------|"
717 
719 {
720  Log() << Endl;
721  Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
722  Log() << Endl;
723  Log() << "<None>" << Endl;
724  Log() << Endl;
725  Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
726  Log() << Endl;
727  Log() << "<None>" << Endl;
728  Log() << Endl;
729  Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
730  Log() << Endl;
731  Log() << "<None>" << Endl;
732 }
for(Int_t i=0;i< n;i++)
Definition: legend1.C:18
void Train(void)
training of the Clement-Ferrand NN classifier
void DeclareOptions()
define the options (their key words) that can be set in the option string know options: NCycles=xx :t...
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:162
void NN_ava(Double_t *)
auxiliary functions
Ssiz_t Length() const
Definition: TString.h:390
void ReadWeightsFromXML(void *wghtnode)
read weights from xml file
Int_t GetClass(Int_t ivar) const
UInt_t GetNvar() const
Definition: MethodBase.h:309
EAnalysisType
Definition: Types.h:124
Basic string class.
Definition: TString.h:137
int Int_t
Definition: RtypesCore.h:41
bool Bool_t
Definition: RtypesCore.h:59
const Bool_t kFALSE
Definition: Rtypes.h:92
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
Definition: Tools.h:308
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition: TString.h:558
Int_t DataInterface(Double_t *, Double_t *, Int_t *, Int_t *, Int_t *, Int_t *, Double_t *, Int_t *, Int_t *)
data interface function
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1134
TFile * f
virtual ~MethodCFMlpANN(void)
destructor
Tools & gTools()
Definition: Tools.cxx:79
Double_t x[n]
Definition: legend1.C:17
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t)
CFMlpANN can handle classification with 2 classes.
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1158
if(pyself &&pyself!=Py_None)
std::vector< std::vector< double > > Data
TClass * fClass
pointer to the foreign object
Bool_t AddRawLine(void *node, const char *raw)
XML helpers.
Definition: Tools.cxx:1198
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
returns CFMlpANN output (normalised within [0,1])
MethodCFMlpANN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption="3000:N-1:N-2", TDirectory *theTargetDir=0)
standard constructor option string: "n_training_cycles:n_hidden_layers" default is: n_training_cycles...
TThread * t[5]
Definition: threadsh1.C:13
unsigned int UInt_t
Definition: RtypesCore.h:42
void MakeClassSpecificHeader(std::ostream &, const TString &="") const
write specific classifier response for header
const char * GetContent(void *node)
XML helpers.
Definition: Tools.cxx:1182
void ReadAttr(void *node, const char *, T &value)
Definition: Tools.h:295
void AddWeightsXMLTo(void *parent) const
write weights to xml file
Double_t EvalANN(std::vector< Double_t > &, Bool_t &isOK)
evaluates NN value as function of input variables
void GetHelpMessage() const
get help message text
void ReadWeightsFromStream(std::istream &istr)
read back the weight from the training from file (stream)
TString & Remove(Ssiz_t pos)
Definition: TString.h:616
Double_t Exp(Double_t x)
Definition: TMath.h:495
Double_t NN_fonc(Int_t, Double_t) const
activation function
double Double_t
Definition: RtypesCore.h:55
Describe directory structure in memory.
Definition: TDirectory.h:44
int type
Definition: TGX11.cxx:120
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1170
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
Definition: Event.cxx:231
MsgLogger & Log() const
Definition: Configurable.h:130
TMatrixT< Float_t > TMatrix
Definition: TMatrix.h:26
void Init(void)
default initialisation called by all constructors
void ProcessOptions()
decode the options in the option string
const TString & Color(const TString &)
human readable color strings
Definition: Tools.cxx:837
ClassImp(TMVA::MethodCFMlpANN) namespace TMVA
#define REGISTER_METHOD(CLASS)
for example
void MakeClassSpecific(std::ostream &, const TString &) const
#define NULL
Definition: Rtypes.h:82
void PrintWeights(std::ostream &o) const
write the weights of the neural net
const Bool_t kTRUE
Definition: Rtypes.h:91
const Int_t n
Definition: legend1.C:16
static MethodCFMlpANN * This(void)
static pointer to this object (required for external functions
Definition: math.cpp:60
Ssiz_t First(char c) const
Find first occurrence of a character c.
Definition: TString.cxx:453
Double_t GetData(Int_t isel, Int_t ivar) const