#include <cstdlib>
#include <iostream>
#include <fstream>
#include "Riostream.h"
#include "TLeaf.h"
#include "TEventList.h"
#include "TObjString.h"
#include "TROOT.h"
#include "TMultiLayerPerceptron.h"
#include "TMVA/Config.h"
#include "TMVA/MethodTMlpANN.h"
#include "TMVA/ClassifierFactory.h"
#ifndef ROOT_TMVA_Tools
#include "TMVA/Tools.h"
#endif
using std::atoi;
const Bool_t EnforceNormalization__=kTRUE;
#if ROOT_VERSION_CODE > ROOT_VERSION(5,13,06)
#else
#endif
REGISTER_METHOD(TMlpANN)
ClassImp(TMVA::MethodTMlpANN)
TMVA::MethodTMlpANN::MethodTMlpANN( const TString& jobName,
const TString& methodTitle,
DataSetInfo& theData,
const TString& theOption,
TDirectory* theTargetDir) :
TMVA::MethodBase( jobName, Types::kTMlpANN, methodTitle, theData, theOption, theTargetDir ),
fMLP(0),
fLocalTrainingTree(0),
fNcycles(100),
fValidationFraction(0.5),
fLearningMethod( "" )
{
}
TMVA::MethodTMlpANN::MethodTMlpANN( DataSetInfo& theData,
const TString& theWeightFile,
TDirectory* theTargetDir ) :
TMVA::MethodBase( Types::kTMlpANN, theData, theWeightFile, theTargetDir ),
fMLP(0),
fLocalTrainingTree(0),
fNcycles(100),
fValidationFraction(0.5),
fLearningMethod( "" )
{
}
Bool_t TMVA::MethodTMlpANN::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses,
UInt_t )
{
if (type == Types::kClassification && numberClasses == 2) return kTRUE;
return kFALSE;
}
void TMVA::MethodTMlpANN::Init( void )
{
}
TMVA::MethodTMlpANN::~MethodTMlpANN( void )
{
if (fMLP) delete fMLP;
}
void TMVA::MethodTMlpANN::CreateMLPOptions( TString layerSpec )
{
fHiddenLayer = ":";
while (layerSpec.Length()>0) {
TString sToAdd="";
if (layerSpec.First(',')<0) {
sToAdd = layerSpec;
layerSpec = "";
}
else {
sToAdd = layerSpec(0,layerSpec.First(','));
layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
}
int nNodes = 0;
if (sToAdd.BeginsWith("N")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
nNodes += atoi(sToAdd);
fHiddenLayer = Form( "%s%i:", (const char*)fHiddenLayer, nNodes );
}
std::vector<TString>::iterator itrVar = (*fInputVars).begin();
std::vector<TString>::iterator itrVarEnd = (*fInputVars).end();
fMLPBuildOptions = "";
for (; itrVar != itrVarEnd; itrVar++) {
if (EnforceNormalization__) fMLPBuildOptions += "@";
TString myVar = *itrVar; ;
fMLPBuildOptions += myVar;
fMLPBuildOptions += ",";
}
fMLPBuildOptions.Chop();
fMLPBuildOptions += fHiddenLayer;
fMLPBuildOptions += "type";
Log() << kINFO << "Use " << fNcycles << " training cycles" << Endl;
Log() << kINFO << "Use configuration (nodes per hidden layer): " << fHiddenLayer << Endl;
}
void TMVA::MethodTMlpANN::DeclareOptions()
{
DeclareOptionRef( fNcycles = 200, "NCycles", "Number of training cycles" );
DeclareOptionRef( fLayerSpec = "N,N-1", "HiddenLayers", "Specification of hidden layer architecture (N stands for number of variables; any integers may also be used)" );
DeclareOptionRef( fValidationFraction = 0.5, "ValidationFraction",
"Fraction of events in training tree used for cross validation" );
DeclareOptionRef( fLearningMethod = "Stochastic", "LearningMethod", "Learning method" );
AddPreDefVal( TString("Stochastic") );
AddPreDefVal( TString("Batch") );
AddPreDefVal( TString("SteepestDescent") );
AddPreDefVal( TString("RibierePolak") );
AddPreDefVal( TString("FletcherReeves") );
AddPreDefVal( TString("BFGS") );
}
void TMVA::MethodTMlpANN::ProcessOptions()
{
CreateMLPOptions(fLayerSpec);
if (IgnoreEventsWithNegWeightsInTraining()) {
Log() << kFATAL << "Mechanism to ignore events with negative weights in training not available for method"
<< GetMethodTypeName()
<< " --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
<< Endl;
}
}
Double_t TMVA::MethodTMlpANN::GetMvaValue( Double_t* err, Double_t* errUpper )
{
const Event* ev = GetEvent();
TTHREAD_TLS_DECL_ARG(Double_t*, d, new Double_t[Data()->GetNVariables()]);
for (UInt_t ivar = 0; ivar<Data()->GetNVariables(); ivar++) {
d[ivar] = (Double_t)ev->GetValue(ivar);
}
Double_t mvaVal = fMLP->Evaluate(0,d);
NoErrorCalc(err, errUpper);
return mvaVal;
}
void TMVA::MethodTMlpANN::Train( void )
{
Int_t type;
Float_t weight;
const Long_t basketsize = 128000;
Float_t* vArr = new Float_t[GetNvar()];
TTree *localTrainingTree = new TTree( "TMLPtrain", "Local training tree for TMlpANN" );
localTrainingTree->Branch( "type", &type, "type/I", basketsize );
localTrainingTree->Branch( "weight", &weight, "weight/F", basketsize );
for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
const char* myVar = GetInternalVarName(ivar).Data();
localTrainingTree->Branch( myVar, &vArr[ivar], Form("Var%02i/F", ivar), basketsize );
}
for (UInt_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {
const Event *ev = GetEvent(ievt);
for (UInt_t i=0; i<GetNvar(); i++) {
vArr[i] = ev->GetValue( i );
}
type = DataInfo().IsSignal( ev ) ? 1 : 0;
weight = ev->GetWeight();
localTrainingTree->Fill();
}
TString trainList = "Entry$<";
trainList += 1.0-fValidationFraction;
trainList += "*";
trainList += (Int_t)Data()->GetNEvtSigTrain();
trainList += " || (Entry$>";
trainList += (Int_t)Data()->GetNEvtSigTrain();
trainList += " && Entry$<";
trainList += (Int_t)(Data()->GetNEvtSigTrain() + (1.0 - fValidationFraction)*Data()->GetNEvtBkgdTrain());
trainList += ")";
TString testList = TString("!(") + trainList + ")";
Log() << kINFO << "Requirement for training events: \"" << trainList << "\"" << Endl;
Log() << kINFO << "Requirement for validation events: \"" << testList << "\"" << Endl;
if (fMLP != 0) { delete fMLP; fMLP = 0; }
fMLP = new TMultiLayerPerceptron( fMLPBuildOptions.Data(),
localTrainingTree,
trainList,
testList );
fMLP->SetEventWeight( "weight" );
#if ROOT_VERSION_CODE > ROOT_VERSION(5,13,06)
TMultiLayerPerceptron::ELearningMethod learningMethod = TMultiLayerPerceptron::kStochastic;
#else
TMultiLayerPerceptron::LearningMethod learningMethod = TMultiLayerPerceptron::kStochastic;
#endif
fLearningMethod.ToLower();
if (fLearningMethod == "stochastic" ) learningMethod = TMultiLayerPerceptron::kStochastic;
else if (fLearningMethod == "batch" ) learningMethod = TMultiLayerPerceptron::kBatch;
else if (fLearningMethod == "steepestdescent" ) learningMethod = TMultiLayerPerceptron::kSteepestDescent;
else if (fLearningMethod == "ribierepolak" ) learningMethod = TMultiLayerPerceptron::kRibierePolak;
else if (fLearningMethod == "fletcherreeves" ) learningMethod = TMultiLayerPerceptron::kFletcherReeves;
else if (fLearningMethod == "bfgs" ) learningMethod = TMultiLayerPerceptron::kBFGS;
else {
Log() << kFATAL << "Unknown Learning Method: \"" << fLearningMethod << "\"" << Endl;
}
fMLP->SetLearningMethod( learningMethod );
fMLP->Train(fNcycles, "text,update=50" );
delete localTrainingTree;
delete [] vArr;
}
void TMVA::MethodTMlpANN::AddWeightsXMLTo( void* parent ) const
{
void *wght = gTools().AddChild(parent, "Weights");
void* arch = gTools().AddChild( wght, "Architecture" );
gTools().AddAttr( arch, "BuildOptions", fMLPBuildOptions.Data() );
fMLP->DumpWeights( "weights/TMlp.nn.weights.temp" );
std::ifstream inf( "weights/TMlp.nn.weights.temp" );
char temp[256];
TString data("");
void *ch=NULL;
while (inf.getline(temp,256)) {
TString dummy(temp);
if (dummy.BeginsWith('#')) {
if (ch!=0) gTools().AddRawLine( ch, data.Data() );
dummy = dummy.Strip(TString::kLeading, '#');
dummy = dummy(0,dummy.First(' '));
ch = gTools().AddChild(wght, dummy);
data.Resize(0);
continue;
}
data += (dummy + " ");
}
if (ch != 0) gTools().AddRawLine( ch, data.Data() );
inf.close();
}
void TMVA::MethodTMlpANN::ReadWeightsFromXML( void* wghtnode )
{
void* ch = gTools().GetChild(wghtnode);
gTools().ReadAttr( ch, "BuildOptions", fMLPBuildOptions );
ch = gTools().GetNextChild(ch);
const char* fname = "weights/TMlp.nn.weights.temp";
std::ofstream fout( fname );
double temp1=0,temp2=0;
while (ch) {
const char* nodecontent = gTools().GetContent(ch);
std::stringstream content(nodecontent);
if (strcmp(gTools().GetName(ch),"input")==0) {
fout << "#input normalization" << std::endl;
while ((content >> temp1) &&(content >> temp2)) {
fout << temp1 << " " << temp2 << std::endl;
}
}
if (strcmp(gTools().GetName(ch),"output")==0) {
fout << "#output normalization" << std::endl;
while ((content >> temp1) &&(content >> temp2)) {
fout << temp1 << " " << temp2 << std::endl;
}
}
if (strcmp(gTools().GetName(ch),"neurons")==0) {
fout << "#neurons weights" << std::endl;
while (content >> temp1) {
fout << temp1 << std::endl;
}
}
if (strcmp(gTools().GetName(ch),"synapses")==0) {
fout << "#synapses weights" ;
while (content >> temp1) {
fout << std::endl << temp1 ;
}
}
ch = gTools().GetNextChild(ch);
}
fout.close();;
TTHREAD_TLS_DECL_ARG(Double_t*, d, new Double_t[Data()->GetNVariables()]);
TTHREAD_TLS(Int_t) type;
gROOT->cd();
TTree * dummyTree = new TTree("dummy","Empty dummy tree", 1);
for (UInt_t ivar = 0; ivar<Data()->GetNVariables(); ivar++) {
TString vn = DataInfo().GetVariableInfo(ivar).GetInternalName();
dummyTree->Branch(Form("%s",vn.Data()), d+ivar, Form("%s/D",vn.Data()));
}
dummyTree->Branch("type", &type, "type/I");
if (fMLP != 0) { delete fMLP; fMLP = 0; }
fMLP = new TMultiLayerPerceptron( fMLPBuildOptions.Data(), dummyTree );
fMLP->LoadWeights( fname );
}
void TMVA::MethodTMlpANN::ReadWeightsFromStream( std::istream& istr )
{
std::ofstream fout( "./TMlp.nn.weights.temp" );
fout << istr.rdbuf();
fout.close();
Log() << kINFO << "Load TMLP weights into " << fMLP << Endl;
Double_t* d = new Double_t[Data()->GetNVariables()] ;
Int_t type;
gROOT->cd();
TTree * dummyTree = new TTree("dummy","Empty dummy tree", 1);
for (UInt_t ivar = 0; ivar<Data()->GetNVariables(); ivar++) {
TString vn = DataInfo().GetVariableInfo(ivar).GetLabel();
dummyTree->Branch(Form("%s",vn.Data()), d+ivar, Form("%s/D",vn.Data()));
}
dummyTree->Branch("type", &type, "type/I");
if (fMLP != 0) { delete fMLP; fMLP = 0; }
fMLP = new TMultiLayerPerceptron( fMLPBuildOptions.Data(), dummyTree );
fMLP->LoadWeights( "./TMlp.nn.weights.temp" );
delete [] d;
}
void TMVA::MethodTMlpANN::MakeClass( const TString& theClassFileName ) const
{
TString classFileName = "";
if (theClassFileName == "")
classFileName = GetWeightFileDir() + "/" + GetJobName() + "_" + GetMethodName() + ".class";
else
classFileName = theClassFileName;
classFileName.ReplaceAll(".class","");
Log() << kINFO << "Creating specific (TMultiLayerPerceptron) standalone response class: " << classFileName << Endl;
fMLP->Export( classFileName.Data() );
}
void TMVA::MethodTMlpANN::MakeClassSpecific( std::ostream& , const TString& ) const
{
}
void TMVA::MethodTMlpANN::GetHelpMessage() const
{
Log() << Endl;
Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
Log() << Endl;
Log() << "This feed-forward multilayer perceptron neural network is the " << Endl;
Log() << "standard implementation distributed with ROOT (class TMultiLayerPerceptron)." << Endl;
Log() << Endl;
Log() << "Detailed information is available here:" << Endl;
if (gConfig().WriteOptionsReference()) {
Log() << "<a href=\"http://root.cern.ch/root/html/TMultiLayerPerceptron.html\">";
Log() << "http://root.cern.ch/root/html/TMultiLayerPerceptron.html</a>" << Endl;
}
else Log() << "http://root.cern.ch/root/html/TMultiLayerPerceptron.html" << Endl;
Log() << Endl;
}