#ifndef ROOT_TMVA_MethodMLP
#define ROOT_TMVA_MethodMLP
#include <vector>
#ifndef ROOT_TString
#include "TString.h"
#endif
#ifndef ROOT_TTree
#include "TTree.h"
#endif
#ifndef ROOT_TObjArray
#include "TObjArray.h"
#endif
#ifndef ROOT_TRandom3
#include "TRandom3.h"
#endif
#ifndef ROOT_TH1F
#include "TH1F.h"
#endif
#ifndef ROOT_TMatrixDfwd
#include "TMatrixDfwd.h"
#endif
#ifndef ROOT_TMVA_IFitterTarget
#include "TMVA/IFitterTarget.h"
#endif
#ifndef ROOT_TMVA_MethodBase
#include "TMVA/MethodBase.h"
#endif
#ifndef ROOT_TMVA_MethodANNBase
#include "TMVA/MethodANNBase.h"
#endif
#ifndef ROOT_TMVA_TNeuron
#include "TMVA/TNeuron.h"
#endif
#ifndef ROOT_TMVA_TActivation
#include "TMVA/TActivation.h"
#endif
#ifndef ROOT_TMVA_ConvergenceTest
#include "TMVA/ConvergenceTest.h"
#endif
#define MethodMLP_UseMinuit__
#undef MethodMLP_UseMinuit__
namespace TMVA {
class MethodMLP : public MethodANNBase, public IFitterTarget, public ConvergenceTest {
public:
MethodMLP( const TString& jobName,
const TString& methodTitle,
DataSetInfo& theData,
const TString& theOption,
TDirectory* theTargetDir = 0 );
MethodMLP( DataSetInfo& theData,
const TString& theWeightFile,
TDirectory* theTargetDir = 0 );
virtual ~MethodMLP();
virtual Bool_t HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets );
void Train() { Train(NumCycles()); }
Double_t ComputeEstimator ( std::vector<Double_t>& parameters );
Double_t EstimatorFunction( std::vector<Double_t>& parameters );
enum ETrainingMethod { kBP=0, kBFGS, kGA };
enum EBPTrainingMode { kSequential=0, kBatch };
protected:
void MakeClassSpecific( std::ostream&, const TString& ) const;
void GetHelpMessage() const;
private:
void DeclareOptions();
void ProcessOptions();
void Train( Int_t nEpochs );
void Init();
void InitializeLearningRates();
Double_t CalculateEstimator( Types::ETreeType treeType = Types::kTraining );
void BFGSMinimize( Int_t nEpochs );
void SetGammaDelta( TMatrixD &Gamma, TMatrixD &Delta, std::vector<Double_t> &Buffer );
void SteepestDir( TMatrixD &Dir );
Bool_t GetHessian( TMatrixD &Hessian, TMatrixD &Gamma, TMatrixD &Delta );
void SetDir( TMatrixD &Hessian, TMatrixD &Dir );
Double_t DerivDir( TMatrixD &Dir );
Bool_t LineSearch( TMatrixD &Dir, std::vector<Double_t> &Buffer );
void ComputeDEDw();
void SimulateEvent( const Event* ev );
void SetDirWeights( std::vector<Double_t> &Origin, TMatrixD &Dir, Double_t alpha );
Double_t GetError();
Double_t GetSqrErr( const Event* ev, UInt_t index = 0 );
void BackPropagationMinimize( Int_t nEpochs );
void TrainOneEpoch();
void Shuffle( Int_t* index, Int_t n );
void DecaySynapseWeights(Bool_t lateEpoch );
void TrainOneEvent( Int_t ievt);
Double_t GetDesiredOutput( const Event* ev );
void UpdateNetwork( Double_t desired, Double_t eventWeight=1.0 );
void UpdateNetwork(std::vector<Float_t>& desired, Double_t eventWeight=1.0);
void CalculateNeuronDeltas();
void UpdateSynapses();
void AdjustSynapseWeights();
void TrainOneEventFast( Int_t ievt, Float_t*& branchVar, Int_t& type );
void GeneticMinimize();
void SetAnalysisType( Types::EAnalysisType type );
#ifdef MethodMLP_UseMinuit__
void MinuitMinimize();
static MethodMLP* GetThisPtr() { return fgThis; }
static void IFCN( Int_t& npars, Double_t* grad, Double_t &f, Double_t* fitPars, Int_t ifl );
void FCN( Int_t& npars, Double_t* grad, Double_t &f, Double_t* fitPars, Int_t ifl );
#endif
ETrainingMethod fTrainingMethod;
TString fTrainMethodS;
Float_t fSamplingFraction;
Float_t fSamplingEpoch;
Float_t fSamplingWeight;
Bool_t fSamplingTraining;
Bool_t fSamplingTesting;
Double_t fLastAlpha;
Double_t fTau;
Int_t fResetStep;
Double_t fLearnRate;
Double_t fDecayRate;
EBPTrainingMode fBPMode;
TString fBpModeS;
Int_t fBatchSize;
Int_t fTestRate;
Int_t fGA_nsteps;
Int_t fGA_preCalc;
Int_t fGA_SC_steps;
Int_t fGA_SC_rate;
Double_t fGA_SC_factor;
#ifdef MethodMLP_UseMinuit__
Int_t fNumberOfWeights;
static MethodMLP* fgThis;
#endif
static const Int_t fgPRINT_ESTIMATOR_INC = 10;
static const Bool_t fgPRINT_SEQ = kFALSE;
static const Bool_t fgPRINT_BATCH = kFALSE;
ClassDef(MethodMLP,0)
};
}
#endif