38#ifndef ROOT_TMVA_MethodMLP 
   39#define ROOT_TMVA_MethodMLP 
   64#define MethodMLP_UseMinuit__ 
   65#undef  MethodMLP_UseMinuit__ 
  155#ifdef MethodMLP_UseMinuit__ 
  157      void MinuitMinimize();
 
  208#ifdef MethodMLP_UseMinuit__ 
  210      Int_t          fNumberOfWeights; 
 
#define ClassDef(name, id)
 
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
 
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
 
Class that contains all the data information.
 
Interface for a fitter 'target'.
 
Base class for all TMVA methods using artificial neural networks.
 
Multilayer Perceptron class built off of MethodANNBase.
 
Int_t fResetStep
reset time (how often we clear hessian matrix)
 
bool fCalculateErrors
compute inverse hessian matrix at the end of the training
 
std::vector< std::pair< Float_t, Float_t > > * fDeviationsFromTargets
deviation from the targets, event weight
 
Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr)
get the mva value generated by the NN
 
Double_t fTau
line search variable
 
Int_t fGA_SC_rate
GA settings: SC_rate.
 
Float_t fWeightRange
suppress outliers for the estimator calculation
 
Float_t fSamplingWeight
changing factor for event weights when sampling is turned on
 
Int_t fBatchSize
batch size, only matters if in batch learning mode
 
void GetHelpMessage() const
get help message text
 
void BackPropagationMinimize(Int_t nEpochs)
minimize estimator / train network with back propagation algorithm
 
Double_t GetMSEErr(const Event *ev, UInt_t index=0)
zjh
 
void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
 
void AdjustSynapseWeights()
just adjust the synapse weights (should be called in batch mode)
 
std::vector< Double_t > fPriorDev
zjh
 
TString fBpModeS
backprop learning mode option string (sequential or batch)
 
void SteepestDir(TMatrixD &Dir)
 
void TrainOneEpoch()
train network over a single epoch/cycle of events
 
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
MLP can handle classification with 2 classes and regression with one regression-target.
 
TString fTrainMethodS
training method option param
 
Int_t fGA_nsteps
GA settings: number of steps.
 
Bool_t GetHessian(TMatrixD &Hessian, TMatrixD &Gamma, TMatrixD &Delta)
 
Double_t ComputeEstimator(std::vector< Double_t > ¶meters)
this function is called by GeneticANN for GA optimization
 
static const Bool_t fgPRINT_BATCH
debug flags
 
void InitializeLearningRates()
initialize learning rates of synapses, used only by back propagation
 
Int_t fGA_preCalc
GA settings: number of pre-calc steps.
 
void CalculateNeuronDeltas()
have each neuron calculate its delta by back propagation
 
Int_t fTestRate
test for overtraining performed at each #th epochs
 
Double_t fDecayRate
decay rate for above learning rate
 
ETrainingMethod fTrainingMethod
method of training, BP or GA
 
EBPTrainingMode fBPMode
backprop learning mode (sequential or batch)
 
Double_t DerivDir(TMatrixD &Dir)
 
Double_t fGA_SC_factor
GA settings: SC_factor.
 
Double_t GetCEErr(const Event *ev, UInt_t index=0)
zjh
 
virtual ~MethodMLP()
destructor nothing to be done
 
Int_t fGA_SC_steps
GA settings: SC_steps.
 
void SetDir(TMatrixD &Hessian, TMatrixD &Dir)
 
void Shuffle(Int_t *index, Int_t n)
Input:
 
Bool_t fSamplingTraining
The training sample is sampled.
 
void SimulateEvent(const Event *ev)
 
void SetDirWeights(std::vector< Double_t > &Origin, TMatrixD &Dir, Double_t alpha)
 
void SetGammaDelta(TMatrixD &Gamma, TMatrixD &Delta, std::vector< Double_t > &Buffer)
 
Double_t EstimatorFunction(std::vector< Double_t > ¶meters)
interface to the estimate
 
Double_t fLearnRate
learning rate for synapse weight adjustments
 
void GetApproxInvHessian(TMatrixD &InvHessian, bool regulate=true)
rank-1 approximation, neglect 2nd derivatives. //zjh
 
void BFGSMinimize(Int_t nEpochs)
train network with BFGS algorithm
 
void UpdateSynapses()
update synapse error fields and adjust the weights (if in sequential mode)
 
void Init()
default initializations
 
static const Int_t fgPRINT_ESTIMATOR_INC
debug flags
 
void ProcessOptions()
process user options
 
Float_t fSamplingFraction
fraction of events which is sampled for training
 
void TrainOneEvent(Int_t ievt)
train network over a single event this uses the new event model
 
Double_t GetDesiredOutput(const Event *ev)
get the desired output of this event
 
void GeneticMinimize()
create genetics class similar to GeneticCut give it vector of parameter ranges (parameters = weights)...
 
Bool_t fSamplingTesting
The testing sample is sampled.
 
Double_t fLastAlpha
line search variable
 
Float_t fSamplingEpoch
fraction of epochs where sampling is used
 
void DecaySynapseWeights(Bool_t lateEpoch)
decay synapse weights in last 10 epochs, lower learning rate even more to find a good minimum
 
void TrainOneEventFast(Int_t ievt, Float_t *&branchVar, Int_t &type)
fast per-event training
 
Bool_t fEpochMon
create and fill epoch-wise monitoring histograms (makes outputfile big!)
 
void UpdateNetwork(Double_t desired, Double_t eventWeight=1.0)
update the network based on how closely the output matched the desired output
 
void UpdateRegulators()
zjh
 
Bool_t LineSearch(TMatrixD &Dir, std::vector< Double_t > &Buffer, Double_t *dError=nullptr)
zjh
 
void DeclareOptions()
define the options (their key words) that can be set in the option string
 
Double_t CalculateEstimator(Types::ETreeType treeType=Types::kTraining, Int_t iEpoch=-1)
calculate the estimator that training is attempting to minimize
 
static const Bool_t fgPRINT_SEQ
debug flags
 
create variable transformations