88using std::stringstream;
 
  141   MethodCFMlpANN_nsel(0)
 
 
  158   MethodCFMlpANN_nsel(0)
 
 
  178   DeclareOptionRef( fNcycles  =3000,      
"NCycles",      
"Number of training cycles" );
 
  179   DeclareOptionRef( fLayerSpec=
"N,N-1",   
"HiddenLayers", 
"Specification of hidden layer architecture" );
 
 
  187   fNodes = 
new Int_t[20]; 
 
  207   fNodes[0]          = GetNvar(); 
 
  208   fNodes[fNlayers-1] = 2;         
 
  210   if (IgnoreEventsWithNegWeightsInTraining()) {
 
  211      Log() << kFATAL << 
"Mechanism to ignore events with negative weights in training not yet available for method: " 
  212            << GetMethodTypeName()
 
  213            << 
" --> please remove \"IgnoreNegWeightsInTraining\" option from booking string." 
  217   Log() << kINFO << 
"Use configuration (nodes per layer): in=";
 
  218   for (
Int_t i=0; i<fNlayers-1; i++) Log() << kINFO << fNodes[i] << 
":";
 
  219   Log() << kINFO << fNodes[fNlayers-1] << 
"=out" << 
Endl;
 
  222   Log() << 
"Use " << fNcycles << 
" training cycles" << 
Endl;
 
  240         (*fClass)[
ievt] = DataInfo().IsSignal(
ev) ? 1 : 2;
 
 
  260   SetNormalised( 
kTRUE );
 
  263   MethodCFMlpANN_nsel = 0;
 
 
  276      for (
Int_t i=0; i<fNlayers; i++) 
delete[] fYNN[i];
 
 
  290   Int_t nvar(GetNvar());
 
  298      for (
Int_t i=0; i<fNlayers; i++) 
delete[] fYNN[i];
 
  310   Log() << kWARNING << 
"<Train> sorry CFMlpANN does not run on Windows" << 
Endl;
 
 
  328   std::vector<Double_t> 
inputVec( GetNvar() );
 
  332   if (!
isOK) Log() << kFATAL << 
"EvalANN returns (!isOK) for event " << 
Endl;
 
 
  355      if (fVarn_1.xmax[
jvar] == fVarn_1.xmin[
jvar]) {
 
 
  386         for (
Int_t k=1; k<=fNeur_1.neuron[
layer-1]; k++) { 
 
  387            x += fYNN[
layer-1][k-1]*W_ref(fNeur_1.w, 
layer+1, 
j, k);
 
 
  401   if      (
u/fDel_1.temp[i] >  170) 
f = +1;
 
  402   else if (
u/fDel_1.temp[i] < -170) 
f = -1;
 
  405      f  = (1 - 
yy)/(1 + 
yy);
 
 
  422   if (GetNvar() != 
nva) 
 
  423      Log() << kFATAL << 
"<ReadWeightsFromFile> mismatch in number of variables" << 
Endl;
 
  427      Log() << kFATAL << 
"<ReadWeightsFromFile> mismatch in number of classes" << 
Endl;
 
  431      Log() << kFATAL << 
"<ReadWeightsFromStream> reached EOF prematurely " << 
Endl;
 
  438   istr >> fParam_1.layerm;
 
  441      for (
Int_t i=0; i<fNlayers; i++) 
delete[] fYNN[i];
 
  445   fYNN = 
new Double_t*[fParam_1.layerm];
 
  474         for (
Int_t i=1; i<=fNeur_1.neuron[
layer-1]; i++) {
 
  494   if ((
Int_t)GetNvar() != fNeur_1.neuron[0]) {
 
  495      Log() << kFATAL << 
"<ReadWeightsFromFile> mismatch in zeroth layer:" 
  496            << GetNvar() << 
" " << fNeur_1.neuron[0] << 
Endl;
 
  499   fNlayers = fParam_1.layerm;
 
 
  517      Log() << kFATAL << 
"ERROR in MethodCFMlpANN_DataInterface zero pointer xpg" << 
Endl;
 
  519   if (*nvar != (
Int_t)this->GetNvar()) {
 
  520      Log() << kFATAL << 
"ERROR in MethodCFMlpANN_DataInterface mismatch in num of variables: " 
  521            << *nvar << 
" " << this->GetNvar() << 
Endl;
 
  525   *iclass = (
int)this->GetClass( MethodCFMlpANN_nsel );
 
  527      xpg[
ivar] = (
double)this->GetData( MethodCFMlpANN_nsel, 
ivar );
 
  529   ++MethodCFMlpANN_nsel;
 
 
  547      s << std::scientific << fVarn_1.xmin[
ivar] <<  
" " << fVarn_1.xmax[
ivar] <<  
" ";
 
  553      n << std::scientific << fNeur_1.neuron[
layer] << 
" ";
 
  559      for (
Int_t neuron=0; neuron<fNeur_1.neuron[
layer]; neuron++) {
 
  561         stringstream weights;
 
  562         weights.precision( 16 );
 
  563         weights << std::scientific << Ww_ref(fNeur_1.ww, 
layer+1, neuron+1);
 
  564         for (
Int_t i=0; i<fNeur_1.neuron[
layer-1]; i++) {
 
  565            weights << 
" " << std::scientific << W_ref(fNeur_1.w, 
layer+1, neuron+1, i+1);
 
  572   temp.precision( 16 );
 
  574      temp << std::scientific << fDel_1.temp[
layer] << 
" ";
 
 
  590      for (
Int_t i=0; i<fNlayers; i++) 
delete[] fYNN[i];
 
  594   fYNN = 
new Double_t*[fParam_1.layerm];
 
  608      for (
Int_t neuron=0; neuron<fNeur_1.neuron[
layer]; neuron++) {
 
  611         weights >> Ww_ref(fNeur_1.ww, 
layer+1, neuron+1);
 
  612         for (
Int_t i=0; i<fNeur_1.neuron[
layer-1]; i++) {
 
  613            weights >> W_ref(fNeur_1.w, 
layer+1, neuron+1, i+1);
 
  620   stringstream t(temp);
 
  622      t >> fDel_1.temp[
layer];
 
  624   fNlayers = fParam_1.layerm;
 
 
  633   o << 
"Number of vars " << fParam_1.nvar << std::endl;
 
  634   o << 
"Output nodes   " << fParam_1.lclass << std::endl;
 
  638      o << 
"Var " << 
ivar << 
" [" << fVarn_1.xmin[
ivar] << 
" - " << fVarn_1.xmax[
ivar] << 
"]" << std::endl;
 
  641   o << 
"Number of layers " << fParam_1.layerm << std::endl;
 
  643   o << 
"Nodes per layer ";
 
  646      o << fNeur_1.neuron[
layer] << 
"     ";
 
  667            o << Ww_ref(fNeur_1.ww, 
layer+1, 
j) << 
"   ";
 
  672         for (i=1; i<=fNeur_1.neuron[
layer-1]; i++) {
 
  675               o << W_ref(fNeur_1.w, 
layer+1, 
j, i) << 
"   ";
 
  685      o << 
"Del.temp in layer " << 
layer << 
" :  " << fDel_1.temp[
layer] << std::endl;
 
 
  694   fout << 
"   // not implemented for class: \"" << className << 
"\"" << std::endl;
 
  695   fout << 
"};" << std::endl;
 
 
  716   Log() << 
"<None>" << 
Endl;
 
  720   Log() << 
"<None>" << 
Endl;
 
  724   Log() << 
"<None>" << 
Endl;
 
 
#define REGISTER_METHOD(CLASS)
for example
 
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
 
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t nchar
 
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
 
TMatrixT< Float_t > TMatrix
 
Class that contains all the data information.
 
Virtual base Class for all MVA method.
 
void SetLogger(MsgLogger *l)
 
Interface to Clermond-Ferrand artificial neural network.
 
Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr)
returns CFMlpANN output (normalised within [0,1])
 
void PrintWeights(std::ostream &o) const
write the weights of the neural net
 
void MakeClassSpecific(std::ostream &, const TString &) const
 
Double_t EvalANN(std::vector< Double_t > &, Bool_t &isOK)
evaluates NN value as function of input variables
 
void DeclareOptions()
define the options (their key words) that can be set in the option string know options: NCycles=xx :t...
 
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t)
CFMlpANN can handle classification with 2 classes.
 
void NN_ava(Double_t *)
auxiliary functions
 
void AddWeightsXMLTo(void *parent) const
write weights to xml file
 
void ProcessOptions()
decode the options in the option string
 
void Train(void)
training of the Clement-Ferrand NN classifier
 
Double_t NN_fonc(Int_t, Double_t) const
activation function
 
void ReadWeightsFromStream(std::istream &istr)
read back the weight from the training from file (stream)
 
void MakeClassSpecificHeader(std::ostream &, const TString &="") const
write specific classifier response for header
 
virtual ~MethodCFMlpANN(void)
destructor
 
MethodCFMlpANN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption="3000:N-1:N-2")
standard constructor
 
void Init(void)
default initialisation called by all constructors
 
Int_t DataInterface(Double_t *, Double_t *, Int_t *, Int_t *, Int_t *, Int_t *, Double_t *, Int_t *, Int_t *)
data interface function
 
void ReadWeightsFromXML(void *wghtnode)
read weights from xml file
 
void GetHelpMessage() const
get help message text
 
Singleton class for Global types used by TMVA.
 
create variable transformations
 
MsgLogger & Endl(MsgLogger &ml)
 
Double_t Exp(Double_t x)
Returns the base-e exponential function of x, which is e raised to the power x.