#include "TString.h"
#include <vector>
#include "TTree.h"
#include "Riostream.h"
#include "TRandom3.h"
#include "TFitter.h"
#ifndef ROOT_TMVA_MethodMLP
#include "TMVA/MethodMLP.h"
#endif
#ifndef ROOT_TMVA_TNeuron
#include "TMVA/TNeuron.h"
#endif
#ifndef ROOT_TMVA_TSynapse
#include "TMVA/TSynapse.h"
#endif
#ifndef ROOT_TMVA_Timer
#include "TMVA/Timer.h"
#endif
#ifndef ROOT_TMVA_Types
#include "TMVA/Types.h"
#endif
#ifndef ROOT_TMVA_Tools
#include "TMVA/Tools.h"
#endif
#ifndef ROOT_TMVA_GeneticANN
#include "TMVA/GeneticANN.h"
#endif
#ifdef MethodMLP_UseMinuit__
TMVA::MethodMLP* TMVA::MethodMLP::fgThis = 0;
Bool_t MethodMLP_UseMinuit = kTRUE;
#endif
using std::vector;
ClassImp(TMVA::MethodMLP)
;
TMVA::MethodMLP::MethodMLP( TString jobName, TString methodTitle, DataSet& theData,
TString theOption, TDirectory* theTargetDir )
: TMVA::MethodANNBase( jobName, methodTitle, theData, theOption, theTargetDir )
{
InitMLP();
DeclareOptions();
ParseOptions();
ProcessOptions();
InitializeLearningRates();
if (fBPMode == kBatch) {
Int_t numEvents = Data().GetNEvtTrain();
if (fBatchSize < 1 || fBatchSize > numEvents) fBatchSize = numEvents;
}
}
TMVA::MethodMLP::MethodMLP( DataSet& theData, TString theWeightFile, TDirectory* theTargetDir )
: TMVA::MethodANNBase( theData, theWeightFile, theTargetDir )
{
InitMLP();
DeclareOptions();
}
TMVA::MethodMLP::~MethodMLP()
{
}
void TMVA::MethodMLP::InitMLP()
{
SetMethodName( "MLP" );
SetMethodType( TMVA::Types::kMLP );
SetTestvarName();
}
void TMVA::MethodMLP::DeclareOptions()
{
DeclareOptionRef(fTrainMethodS="BP", "TrainingMethod",
"Train with Back-Propagation (BP) or Genetic Algorithm (GA) (takes a LONG time)");
AddPreDefVal(TString("BP"));
AddPreDefVal(TString("GA"));
DeclareOptionRef(fLearnRate=0.02, "LearningRate", "NN learning rate parameter");
DeclareOptionRef(fDecayRate=0.01, "DecayRate", "Decay rate for learning parameter");
DeclareOptionRef(fTestRate =10, "TestRate", "Test for overtraining performed at each #th epochs");
DeclareOptionRef(fBpModeS="sequential", "BPMode",
"Back-propagation learning mode: sequential or batch");
AddPreDefVal(TString("sequential"));
AddPreDefVal(TString("batch"));
DeclareOptionRef(fBatchSize=-1, "BatchSize",
"Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events");
}
void TMVA::MethodMLP::ProcessOptions()
{
MethodANNBase::ProcessOptions();
if (fTrainMethodS == "BP") fTrainingMethod = kBP;
else if (fTrainMethodS == "GA") fTrainingMethod = kGA;
if (fBpModeS == "sequential") fBPMode = kSequential;
else if (fBpModeS == "batch") fBPMode = kBatch;
}
void TMVA::MethodMLP::InitializeLearningRates()
{
TSynapse *synapse;
Int_t numSynapses = fSynapses->GetEntriesFast();
for (Int_t i = 0; i < numSynapses; i++) {
synapse = (TSynapse*)fSynapses->At(i);
synapse->SetLearningRate(fLearnRate);
}
}
Double_t TMVA::MethodMLP::CalculateEstimator( TMVA::Types::ETreeType treeType )
{
Int_t nEvents = ( (treeType == TMVA::Types::kTesting ) ? Data().GetNEvtTest() :
(treeType == TMVA::Types::kTraining) ? Data().GetNEvtTrain() : -1 );
if (nEvents <=0)
fLogger << kFATAL << "<CalculateEstimator> fatal error: wrong tree type: " << treeType << Endl;
Double_t estimator = 0;
for (Int_t i = 0; i < nEvents; i++) {
if (treeType == TMVA::Types::kTesting )
ReadTestEvent(i);
else
ReadTrainingEvent(i);
Double_t desired = GetDesiredOutput();
ForceNetworkInputs();
ForceNetworkCalculations();
Double_t d = GetOutputNeuron()->GetActivationValue() - desired;
estimator += (d*d);
}
estimator = estimator*0.5/Float_t(nEvents);
return estimator;
}
void TMVA::MethodMLP::Train(Int_t nEpochs)
{
PrintMessage("Training Network");
TMVA::Timer timer( nEpochs, GetName() );
#ifdef MethodMLP_UseMinuit__
if (useMinuit) MinuitMinimize();
#else
if (fTrainingMethod == kGA) GeneticMinimize();
else BackPropagationMinimize(nEpochs);
#endif
PrintMessage("Train: elapsed time: " + timer.GetElapsedTime() + " ", kTRUE);
}
void TMVA::MethodMLP::BackPropagationMinimize(Int_t nEpochs)
{
TMVA::Timer timer( nEpochs, GetName() );
Int_t lateEpoch = (Int_t)(nEpochs*0.95) - 1;
Int_t nbinTest = Int_t(nEpochs/fTestRate);
fEstimatorHistTrain = new TH1F( "estimatorHistTrain", "training estimator",
nbinTest, Int_t(fTestRate/2), nbinTest*fTestRate+Int_t(fTestRate/2) );
fEstimatorHistTest = new TH1F( "estimatorHistTest", "test estimator",
nbinTest, Int_t(fTestRate/2), nbinTest*fTestRate+Int_t(fTestRate/2) );
for (Int_t i = 0; i < nEpochs; i++) {
timer.DrawProgressBar(i);
TrainOneEpoch();
DecaySynapseWeights(i >= lateEpoch);
if ((i+1)%fTestRate == 0) {
Double_t trainE = CalculateEstimator( TMVA::Types::kTraining );
Double_t testE = CalculateEstimator( TMVA::Types::kTesting );
fEstimatorHistTrain->Fill( i+1, trainE );
fEstimatorHistTest ->Fill( i+1, testE );
}
}
}
void TMVA::MethodMLP::TrainOneEpoch()
{
Int_t nEvents = Data().GetNEvtTrain();
Int_t* index = new Int_t[nEvents];
for (Int_t i = 0; i < nEvents; i++) index[i] = i;
Shuffle(index, nEvents);
for (Int_t i = 0; i < nEvents; i++) {
TrainOneEvent(index[i]);
if (fBPMode == kBatch && (i+1)%fBatchSize == 0) {
AdjustSynapseWeights();
if (fgPRINT_BATCH) {
PrintNetwork();
WaitForKeyboard();
}
}
if (fgPRINT_SEQ) {
PrintNetwork();
WaitForKeyboard();
}
}
delete[] index;
}
void TMVA::MethodMLP::Shuffle(Int_t* index, Int_t n)
{
Int_t j, k;
Int_t a = n - 1;
for (Int_t i = 0; i < n; i++) {
j = (Int_t) (frgen->Rndm() * a);
k = index[j];
index[j] = index[i];
index[i] = k;
}
}
void TMVA::MethodMLP::DecaySynapseWeights(Bool_t lateEpoch)
{
TSynapse* synapse;
Int_t numSynapses = fSynapses->GetEntriesFast();
for (Int_t i = 0; i < numSynapses; i++) {
synapse = (TSynapse*)fSynapses->At(i);
if (lateEpoch) synapse->DecayLearningRate(fDecayRate*fDecayRate);
else synapse->DecayLearningRate(fDecayRate);
}
}
void TMVA::MethodMLP::TrainOneEventFast(Int_t ievt, Float_t*& branchVar, Int_t& type)
{
ReadTrainingEvent(ievt);
Double_t eventWeight = 1.0;
Double_t desired;
if (type == 0) desired = fActivation->GetMin();
else desired = fActivation->GetMax();
Double_t x;
TNeuron* neuron;
for (Int_t j = 0; j < GetNvar(); j++) {
x = branchVar[j];
if (Normalize()) x = Norm(j, x);
neuron = GetInputNeuron(j);
neuron->ForceValue(x);
}
ForceNetworkCalculations();
UpdateNetwork(desired, eventWeight);
}
void TMVA::MethodMLP::TrainOneEvent(Int_t ievt)
{
ReadTrainingEvent(ievt);
Double_t eventWeight = Data().Event().GetWeight();
Double_t desired = GetDesiredOutput();
ForceNetworkInputs();
ForceNetworkCalculations();
UpdateNetwork(desired, eventWeight);
}
Double_t TMVA::MethodMLP::GetDesiredOutput()
{
Double_t desired;
if (Data().Event().IsSignal()) desired = fActivation->GetMax();
else desired = fActivation->GetMin();
return desired;
}
void TMVA::MethodMLP::UpdateNetwork(Double_t desired, Double_t eventWeight)
{
Double_t error = GetOutputNeuron()->GetActivationValue() - desired;
error *= eventWeight;
GetOutputNeuron()->SetError(error);
CalculateNeuronDeltas();
UpdateSynapses();
}
void TMVA::MethodMLP::CalculateNeuronDeltas()
{
TNeuron* neuron;
Int_t numNeurons;
TObjArray* curLayer;
Int_t numLayers = fNetwork->GetEntriesFast();
for (Int_t i = numLayers-1; i >= 0; i--) {
curLayer = (TObjArray*)fNetwork->At(i);
numNeurons = curLayer->GetEntriesFast();
for (Int_t j = 0; j < numNeurons; j++) {
neuron = (TNeuron*) curLayer->At(j);
neuron->CalculateDelta();
}
}
}
void TMVA::MethodMLP::GeneticMinimize()
{
PrintMessage("Minimizing Estimator with GA");
fGA_preCalc = 1;
fGA_SC_steps = 10;
fGA_SC_offsteps = 5;
fGA_SC_factor = 0.95;
fGA_nsteps = 30;
vector<LowHigh_t*> ranges;
Int_t numWeights = fSynapses->GetEntriesFast();
for (Int_t ivar=0; ivar< numWeights; ivar++) {
ranges.push_back( new LowHigh_t( -3.0, 3.0 ) );
}
GeneticANN *bestResultsStore = new GeneticANN( 0, ranges, this );
GeneticANN *bestResults = new GeneticANN( 0, ranges, this );
Timer timer1( fGA_preCalc, GetName() );
for (Int_t preCalc = 0; preCalc < fGA_preCalc; preCalc++) {
GeneticANN ga( ranges.size() * 10, ranges, this );
ga.GetGeneticPopulation().AddPopulation( &bestResults->GetGeneticPopulation() );
ga.CalculateFitness();
ga.GetGeneticPopulation().TrimPopulation();
while (true) {
ga.Init();
ga.CalculateFitness();
ga.SpreadControl( fGA_SC_steps, fGA_SC_offsteps, fGA_SC_factor );
if (ga.HasConverged( Int_t(fGA_nsteps*0.67), 0.0001 )) break;
}
bestResultsStore->GetGeneticPopulation().GiveHint( ga.GetGeneticPopulation().GetGenes( 0 )->GetFactors() );
delete bestResults;
bestResults = bestResultsStore;
bestResultsStore = new GeneticANN( 0, ranges, this );
}
Double_t estimator = CalculateEstimator();
bestResults->Init();
fLogger << kINFO << "GA: starting main course " << Endl;
vector<Double_t> par(2*GetNvar());
TMVA::GeneticANN ga( ranges.size() * 10, ranges, this );
ga.SetSpread( 0.1 );
ga.GetGeneticPopulation().AddPopulation( &bestResults->GetGeneticPopulation() );
ga.CalculateFitness();
ga.GetGeneticPopulation().TrimPopulation();
while(true) {
ga.Init();
ga.CalculateFitness();
ga.SpreadControl( fGA_SC_steps, fGA_SC_offsteps, fGA_SC_factor );
if (ga.HasConverged( fGA_nsteps, 0.00001 )) break;
}
Int_t n = 0;
vector< Double_t >::iterator vec = ga.GetGeneticPopulation().GetGenes( 0 )->GetFactors().begin();
for (; vec < ga.GetGeneticPopulation().GetGenes( 0 )->GetFactors().end(); vec++ ) {
par[n] = (*vec);
n++;
}
fLogger << kINFO << "GA: elapsed time: " << timer1.GetElapsedTime() << Endl;
estimator = CalculateEstimator();
fLogger << kINFO << "GA: stimator after optimization: " << estimator << Endl;
}
Double_t TMVA::MethodMLP::ComputeEstimator(const vector<Double_t>& parameters)
{
TSynapse* synapse;
Int_t numSynapses = fSynapses->GetEntriesFast();
for (Int_t i = 0; i < numSynapses; i++) {
synapse = (TSynapse*)fSynapses->At(i);
synapse->SetWeight(parameters.at(i));
}
Double_t estimator = CalculateEstimator();
return estimator;
}
void TMVA::MethodMLP::UpdateSynapses()
{
TNeuron* neuron;
Int_t numNeurons;
TObjArray* curLayer;
Int_t numLayers = fNetwork->GetEntriesFast();
for (Int_t i = 0; i < numLayers; i++) {
curLayer = (TObjArray*)fNetwork->At(i);
numNeurons = curLayer->GetEntriesFast();
for (Int_t j = 0; j < numNeurons; j++) {
neuron = (TNeuron*) curLayer->At(j);
if (fBPMode == kBatch) neuron->UpdateSynapsesBatch();
else neuron->UpdateSynapsesSequential();
}
}
}
void TMVA::MethodMLP::AdjustSynapseWeights()
{
TNeuron* neuron;
Int_t numNeurons;
TObjArray* curLayer;
Int_t numLayers = fNetwork->GetEntriesFast();
for (Int_t i = numLayers-1; i >= 0; i--) {
curLayer = (TObjArray*)fNetwork->At(i);
numNeurons = curLayer->GetEntriesFast();
for (Int_t j = 0; j < numNeurons; j++) {
neuron = (TNeuron*) curLayer->At(j);
neuron->AdjustSynapseWeights();
}
}
}
#ifdef MethodMLP_UseMinuit__
void TMVA::MethodMLP::MinuitMinimize()
{
fNumberOfWeights = fSynapses->GetEntriesFast();
TFitter* tfitter = new TFitter( fNumberOfWeights );
double w[54];
for (Int_t ipar=0; ipar < fNumberOfWeights; ipar++) {
TString parName = Form("w%i", ipar);
tfitter->SetParameter( ipar,
parName, w[ipar], 0.1, 0, 0 );
}
tfitter->SetFCN( &IFCN );
Double_t args[10];
args[0] = 2;
tfitter->ExecuteCommand( "SET PRINTOUT", args, 1 );
tfitter->ExecuteCommand( "SET NOWARNINGS", args, 0 );
args[0] = 2;
tfitter->ExecuteCommand( "SET STRATEGY", args, 1 );
args[0] = 1e-04;
tfitter->ExecuteCommand( "MIGRAD", args, 1 );
Bool_t doBetter = kFALSE;
Bool_t doEvenBetter = kFALSE;
if (doBetter) {
args[0] = 1e-04;
tfitter->ExecuteCommand( "IMPROVE", args, 1 );
if (doEvenBetter) {
args[0] = 500;
tfitter->ExecuteCommand( "MINOS", args, 1 );
}
}
}
_____________________________________________________________________________
void TMVA::MethodMLP::IFCN( Int_t& npars, Double_t* grad, Double_t &f, Double_t* fitPars, Int_t iflag )
{
((MethodMLP*)GetThisPtr())->FCN( npars, grad, f, fitPars, iflag );
}
static Int_t nc = 0;
static double minf = 1000000;
void TMVA::MethodMLP::FCN( Int_t& npars, Double_t* grad, Double_t &f, Double_t* fitPars, Int_t iflag )
{
for (Int_t ipar=0; ipar<fNumberOfWeights; ipar++) {
TSynapse* synapse = (TSynapse*)fSynapses->At(ipar);
synapse->SetWeight(fitPars[ipar]);
}
f = CalculateEstimator();
nc++;
if (f < minf) minf = f;
for (Int_t ipar=0; ipar<fNumberOfWeights; ipar++) fLogger << kVERBOSE << fitPars[ipar] << " ";
fLogger << kVERBOSE << Endl;
fLogger << kVERBOSE << "***** new estimator: " << f << " min: " << minf << " --> ncalls: " << nc << Endl;
}
#endif
ROOT page - Class index - Class Hierarchy - Top of the page
This page has been automatically generated. If you have any comments or suggestions about the page layout send a mail to ROOT support, or contact the developers with any questions or problems regarding ROOT.