88using std::stringstream;
 
  141   MethodCFMlpANN_nsel(0)
 
  158   MethodCFMlpANN_nsel(0)
 
  178   DeclareOptionRef( fNcycles  =3000,      
"NCycles",      
"Number of training cycles" );
 
  179   DeclareOptionRef( fLayerSpec=
"N,N-1",   
"HiddenLayers", 
"Specification of hidden layer architecture" );
 
  187   fNodes = 
new Int_t[20]; 
 
  189   Int_t currentHiddenLayer = 1;
 
  191   while(layerSpec.
Length()>0) {
 
  193      if (layerSpec.
First(
',')<0) {
 
  198         sToAdd = layerSpec(0,layerSpec.
First(
','));
 
  199         layerSpec = layerSpec(layerSpec.
First(
',')+1,layerSpec.
Length());
 
  203      nNodes += atoi(sToAdd);
 
  204      fNodes[currentHiddenLayer++] = nNodes;
 
  207   fNodes[0]          = GetNvar(); 
 
  208   fNodes[fNlayers-1] = 2;         
 
  210   if (IgnoreEventsWithNegWeightsInTraining()) {
 
  211      Log() << kFATAL << 
"Mechanism to ignore events with negative weights in training not yet available for method: " 
  212            << GetMethodTypeName()
 
  213            << 
" --> please remove \"IgnoreNegWeightsInTraining\" option from booking string." 
  217   Log() << kINFO << 
"Use configuration (nodes per layer): in=";
 
  218   for (
Int_t i=0; i<fNlayers-1; i++) Log() << kINFO << fNodes[i] << 
":";
 
  219   Log() << kINFO << fNodes[fNlayers-1] << 
"=out" << 
Endl;
 
  222   Log() << 
"Use " << fNcycles << 
" training cycles" << 
Endl;
 
  224   Int_t nEvtTrain = Data()->GetNTrainingEvents();
 
  230      fData  = 
new TMatrix( nEvtTrain, GetNvar() );
 
  231      fClass = 
new std::vector<Int_t>( nEvtTrain );
 
  236      for (
Int_t ievt=0; ievt<nEvtTrain; ievt++) {
 
  237         const Event * ev = GetEvent(ievt);
 
  240         (*fClass)[ievt] = DataInfo().IsSignal(ev) ? 1 : 2;
 
  243         for (ivar=0; ivar<GetNvar(); ivar++) {
 
  244            (*fData)( ievt, ivar ) = ev->
GetValue(ivar);
 
  260   SetNormalised( 
kTRUE );
 
  263   MethodCFMlpANN_nsel = 0;
 
  276      for (
Int_t i=0; i<fNlayers; i++) 
delete[] fYNN[i];
 
  288   Int_t ntrain(Data()->GetNTrainingEvents());
 
  290   Int_t nvar(GetNvar());
 
  291   Int_t nlayers(fNlayers);
 
  293   Int_t ncycles(fNcycles);
 
  295   for (
Int_t i=0; i<nlayers; i++) nodes[i] = fNodes[i]; 
 
  298      for (
Int_t i=0; i<fNlayers; i++) 
delete[] fYNN[i];
 
  303   for (
Int_t layer=0; layer<nlayers; layer++)
 
  304      fYNN[layer] = 
new Double_t[fNodes[layer]];
 
  308   Train_nn( &dumDat, &dumDat, &ntrain, &ntest, &nvar, &nlayers, nodes, &ncycles );
 
  310   Log() << kWARNING << 
"<Train> sorry CFMlpANN does not run on Windows" << 
Endl;
 
  325   const Event* ev = GetEvent();
 
  328   std::vector<Double_t> inputVec( GetNvar() );
 
  329   for (
UInt_t ivar=0; ivar<GetNvar(); ivar++) inputVec[ivar] = ev->
GetValue(ivar);
 
  331   Double_t myMVA = EvalANN( inputVec, isOK );
 
  332   if (!isOK) Log() << kFATAL << 
"EvalANN returns (!isOK) for event " << 
Endl;
 
  335   NoErrorCalc(err, errUpper);
 
  347   for (
UInt_t ivar=0; ivar<GetNvar(); ivar++) xeev[ivar] = inVar[ivar];
 
  351   for (
UInt_t jvar=0; jvar<GetNvar(); jvar++) {
 
  353      if (fVarn_1.xmax[jvar] < xeev[jvar]) xeev[jvar] = fVarn_1.xmax[jvar];
 
  354      if (fVarn_1.xmin[jvar] > xeev[jvar]) xeev[jvar] = fVarn_1.xmin[jvar];
 
  355      if (fVarn_1.xmax[jvar] == fVarn_1.xmin[jvar]) {
 
  360         xeev[jvar] = xeev[jvar] - ((fVarn_1.xmax[jvar] + fVarn_1.xmin[jvar])/2);
 
  361         xeev[jvar] = xeev[jvar] / ((fVarn_1.xmax[jvar] - fVarn_1.xmin[jvar])/2);
 
  367   Double_t retval = 0.5*(1.0 + fYNN[fParam_1.layerm-1][0]);
 
  379   for (
Int_t ivar=0; ivar<fNeur_1.neuron[0]; ivar++) fYNN[0][ivar] = xeev[ivar];
 
  381   for (
Int_t layer=1; layer<fParam_1.layerm; layer++) {
 
  382      for (
Int_t j=1; j<=fNeur_1.neuron[layer]; j++) {
 
  384         Double_t x = Ww_ref(fNeur_1.ww, layer+1,j); 
 
  386         for (
Int_t k=1; k<=fNeur_1.neuron[layer-1]; k++) { 
 
  387            x += fYNN[layer-1][k-1]*W_ref(fNeur_1.w, layer+1, j, k);
 
  389         fYNN[layer][j-1] = NN_fonc( layer, 
x );
 
  401   if      (u/fDel_1.temp[i] >  170) 
f = +1;
 
  402   else if (u/fDel_1.temp[i] < -170) 
f = -1;
 
  405      f  = (1 - yy)/(1 + yy);
 
  420   istr >> nva >> lclass;
 
  422   if (GetNvar() != nva) 
 
  423      Log() << kFATAL << 
"<ReadWeightsFromFile> mismatch in number of variables" << 
Endl;
 
  427      Log() << kFATAL << 
"<ReadWeightsFromFile> mismatch in number of classes" << 
Endl;
 
  431      Log() << kFATAL << 
"<ReadWeightsFromStream> reached EOF prematurely " << 
Endl;
 
  434   for (
UInt_t ivar=0; ivar<GetNvar(); ivar++)
 
  435      istr >> fVarn_1.xmax[ivar] >> fVarn_1.xmin[ivar];
 
  438   istr >> fParam_1.layerm;
 
  441      for (
Int_t i=0; i<fNlayers; i++) 
delete[] fYNN[i];
 
  445   fYNN = 
new Double_t*[fParam_1.layerm];
 
  446   for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
 
  449      istr >> fNeur_1.neuron[layer];
 
  450      fYNN[layer] = 
new Double_t[fNeur_1.neuron[layer]];
 
  455   char* dumchar = 
new char[
nchar];
 
  458   for (
Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
 
  460      Int_t nq = fNeur_1.neuron[layer]/10;
 
  461      Int_t nr = fNeur_1.neuron[layer] - nq*10;
 
  467      for (
Int_t k=1; k<=kk; k++) {
 
  468         Int_t jmin = 10*k - 9;
 
  470         if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
 
  471         for (
Int_t j=jmin; j<=jmax; j++) {
 
  472            istr >> Ww_ref(fNeur_1.ww, layer+1, j);
 
  474         for (
Int_t i=1; i<=fNeur_1.neuron[layer-1]; i++) {
 
  475            for (
Int_t j=jmin; j<=jmax; j++) {
 
  476               istr >> W_ref(fNeur_1.w, layer+1, j, i);
 
  480         istr.getline( dumchar, 
nchar );
 
  484   for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
 
  487      istr.getline( dumchar, 
nchar );
 
  488      istr.getline( dumchar, 
nchar );
 
  490      istr >> fDel_1.temp[layer];
 
  494   if ((
Int_t)GetNvar() != fNeur_1.neuron[0]) {
 
  495      Log() << kFATAL << 
"<ReadWeightsFromFile> mismatch in zeroth layer:" 
  496            << GetNvar() << 
" " << fNeur_1.neuron[0] << 
Endl;
 
  499   fNlayers = fParam_1.layerm;
 
  517      Log() << kFATAL << 
"ERROR in MethodCFMlpANN_DataInterface zero pointer xpg" << 
Endl;
 
  519   if (*nvar != (
Int_t)this->GetNvar()) {
 
  520      Log() << kFATAL << 
"ERROR in MethodCFMlpANN_DataInterface mismatch in num of variables: " 
  521            << *nvar << 
" " << this->GetNvar() << 
Endl;
 
  525   *iclass = (
int)this->GetClass( MethodCFMlpANN_nsel );
 
  526   for (
UInt_t ivar=0; ivar<this->GetNvar(); ivar++)
 
  527      xpg[ivar] = (
double)this->GetData( MethodCFMlpANN_nsel, ivar );
 
  529   ++MethodCFMlpANN_nsel;
 
  546   for (
Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
 
  547      s << std::scientific << fVarn_1.xmin[ivar] <<  
" " << fVarn_1.xmax[ivar] <<  
" ";
 
  552   for (
Int_t layer=0; layer<fParam_1.layerm; layer++)
 
  553      n << std::scientific << fNeur_1.neuron[layer] << 
" ";
 
  555   for (
Int_t layer=1; layer<fParam_1.layerm; layer++) {
 
  557      gTools().
AddAttr(layernode,
"NNeurons",fNeur_1.neuron[layer]);
 
  558      void* neuronnode=NULL;
 
  559      for (
Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
 
  561         stringstream weights;
 
  562         weights.precision( 16 );
 
  563         weights << std::scientific << Ww_ref(fNeur_1.ww, layer+1, neuron+1);
 
  564         for (
Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
 
  565            weights << 
" " << std::scientific << W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
 
  572   temp.precision( 16 );
 
  573   for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
 
  574      temp << std::scientific << fDel_1.temp[layer] << 
" ";
 
  586   stringstream content(minmaxcontent);
 
  587   for (
UInt_t ivar=0; ivar<GetNvar(); ivar++)
 
  588      content >> fVarn_1.xmin[ivar] >> fVarn_1.xmax[ivar];
 
  590      for (
Int_t i=0; i<fNlayers; i++) 
delete[] fYNN[i];
 
  594   fYNN = 
new Double_t*[fParam_1.layerm];
 
  597   stringstream ncontent(neuronscontent);
 
  598   for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
 
  601      ncontent >> fNeur_1.neuron[layer];
 
  602      fYNN[layer] = 
new Double_t[fNeur_1.neuron[layer]];
 
  604   for (
Int_t layer=1; layer<fParam_1.layerm; layer++) {
 
  606      void* neuronnode=NULL;
 
  608      for (
Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
 
  610         stringstream weights(neuronweights);
 
  611         weights >> Ww_ref(fNeur_1.ww, layer+1, neuron+1);
 
  612         for (
Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
 
  613            weights >> W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
 
  620   stringstream t(temp);
 
  621   for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
 
  622      t >> fDel_1.temp[layer];
 
  624   fNlayers = fParam_1.layerm;
 
  633   o << 
"Number of vars " << fParam_1.nvar << std::endl;
 
  634   o << 
"Output nodes   " << fParam_1.lclass << std::endl;
 
  637   for (
Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
 
  638      o << 
"Var " << ivar << 
" [" << fVarn_1.xmin[ivar] << 
" - " << fVarn_1.xmax[ivar] << 
"]" << std::endl;
 
  641   o << 
"Number of layers " << fParam_1.layerm << std::endl;
 
  643   o << 
"Nodes per layer ";
 
  644   for (
Int_t layer=0; layer<fParam_1.layerm; layer++)
 
  646      o << fNeur_1.neuron[layer] << 
"     ";
 
  650   for (
Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
 
  652      Int_t nq = fNeur_1.neuron[layer]/10;
 
  653      Int_t nr = fNeur_1.neuron[layer] - nq*10;
 
  659      for (
Int_t k=1; k<=kk; k++) {
 
  660         Int_t jmin = 10*k - 9;
 
  663         if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
 
  664         for (j=jmin; j<=jmax; j++) {
 
  667            o << Ww_ref(fNeur_1.ww, layer+1, j) << 
"   ";
 
  672         for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
 
  673            for (j=jmin; j<=jmax; j++) {
 
  675               o << W_ref(fNeur_1.w, layer+1, j, i) << 
"   ";
 
  684   for (
Int_t layer=0; layer<fParam_1.layerm; layer++) {
 
  685      o << 
"Del.temp in layer " << layer << 
" :  " << fDel_1.temp[layer] << std::endl;
 
  694   fout << 
"   // not implemented for class: \"" << className << 
"\"" << std::endl;
 
  695   fout << 
"};" << std::endl;
 
  716   Log() << 
"<None>" << 
Endl;
 
  720   Log() << 
"<None>" << 
Endl;
 
  724   Log() << 
"<None>" << 
Endl;
 
#define REGISTER_METHOD(CLASS)
for example
 
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t nchar
 
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
 
TMatrixT< Float_t > TMatrix
 
Class that contains all the data information.
 
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
 
Virtual base Class for all MVA method.
 
void SetLogger(MsgLogger *l)
 
Interface to Clermond-Ferrand artificial neural network.
 
Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr)
returns CFMlpANN output (normalised within [0,1])
 
void PrintWeights(std::ostream &o) const
write the weights of the neural net
 
void MakeClassSpecific(std::ostream &, const TString &) const
 
Double_t EvalANN(std::vector< Double_t > &, Bool_t &isOK)
evaluates NN value as function of input variables
 
void DeclareOptions()
define the options (their key words) that can be set in the option string know options: NCycles=xx :t...
 
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t)
CFMlpANN can handle classification with 2 classes.
 
void NN_ava(Double_t *)
auxiliary functions
 
void AddWeightsXMLTo(void *parent) const
write weights to xml file
 
void ProcessOptions()
decode the options in the option string
 
void Train(void)
training of the Clement-Ferrand NN classifier
 
Double_t NN_fonc(Int_t, Double_t) const
activation function
 
void ReadWeightsFromStream(std::istream &istr)
read back the weight from the training from file (stream)
 
void MakeClassSpecificHeader(std::ostream &, const TString &="") const
write specific classifier response for header
 
virtual ~MethodCFMlpANN(void)
destructor
 
MethodCFMlpANN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption="3000:N-1:N-2")
standard constructor
 
void Init(void)
default initialisation called by all constructors
 
Int_t DataInterface(Double_t *, Double_t *, Int_t *, Int_t *, Int_t *, Int_t *, Double_t *, Int_t *, Int_t *)
data interface function
 
void ReadWeightsFromXML(void *wghtnode)
read weights from xml file
 
void GetHelpMessage() const
get help message text
 
Singleton class for Global types used by TMVA.
 
Ssiz_t First(char c) const
Find first occurrence of a character c.
 
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
 
TString & Remove(Ssiz_t pos)
 
create variable transformations
 
MsgLogger & Endl(MsgLogger &ml)
 
Double_t Exp(Double_t x)
Returns the base-e exponential function of x, which is e raised to the power x.