Logo ROOT  
Reference Guide
 
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
Loading...
Searching...
No Matches
TMVA_Higgs_Classification.C
Go to the documentation of this file.
1/// \file
2/// \ingroup tutorial_tmva
3/// \notebook
4/// Classification example of TMVA based on public Higgs UCI dataset
5///
6/// The UCI data set is a public HIGGS data set , see http://archive.ics.uci.edu/ml/datasets/HIGGS
7/// used in this paper: Baldi, P., P. Sadowski, and D. Whiteson. “Searching for Exotic Particles in High-energy Physics
8/// with Deep Learning.” Nature Communications 5 (July 2, 2014).
9///
10/// \macro_image
11/// \macro_output
12/// \macro_code
13///
14/// \author Lorenzo Moneta
15
16/***
17## Declare Factory
18
19Create the Factory class. Later you can choose the methods
20whose performance you'd like to investigate.
21
22The factory is the major TMVA object you have to interact with. Here is the list of parameters you need to pass
23
24 - The first argument is the base of the name of all the output
25weightfiles in the directory weight/ that will be created with the
26method parameters
27
28 - The second argument is the output file for the training results
29
30 - The third argument is a string option defining some general configuration for the TMVA session. For example all TMVA output can be suppressed by removing the "!" (not) in front of the "Silent" argument in the option string
31
32**/
33
35
36 // options to control used methods
37
38 bool useLikelihood = true; // likelihood based discriminant
39 bool useLikelihoodKDE = false; // likelihood based discriminant
40 bool useFischer = true; // Fischer discriminant
41 bool useMLP = false; // Multi Layer Perceptron (old TMVA NN implementation)
42 bool useBDT = true; // Boosted Decision Tree
43 bool useDL = true; // TMVA Deep learning ( CPU or GPU)
44 bool useKeras = true; // Keras Deep learning
45 bool usePyTorch = true; // PyTorch Deep learning
46
48
49#ifdef R__HAS_PYMVA
50 gSystem->Setenv("KERAS_BACKEND", "tensorflow");
51 // for using Keras
53#else
54 useKeras = false;
55 usePyTorch = false;
56#endif
57
58 auto outputFile = TFile::Open("Higgs_ClassificationOutput.root", "RECREATE");
59
60 TMVA::Factory factory("TMVA_Higgs_Classification", outputFile,
61 "!V:ROC:!Silent:Color:AnalysisType=Classification" );
62
63/**
64
65## Setup Dataset(s)
66
67Define now input data file and signal and background trees
68
69 **/
70
71 TString inputFileName = gROOT->GetTutorialDir() + "/tmva/data/Higgs_data.root";
72
73 TFile *inputFile = nullptr;
74
76 // file exists
78 }
79 if (!inputFile) {
80 Error("TMVA_Higgs_Classification","Input file is not found - exit");
81 return;
82 }
83
84// --- Register the training and test trees
85
86 TTree *signalTree = (TTree*)inputFile->Get("sig_tree");
87 TTree *backgroundTree = (TTree*)inputFile->Get("bkg_tree");
88
89 signalTree->Print();
90
91/***
92## Declare DataLoader(s)
93
94The next step is to declare the DataLoader class that deals with input variables
95
96Define the input variables that shall be used for the MVA training
97note that you may also use variable expressions, which can be parsed by TTree::Draw( "expression" )]
98
99***/
100
101 TMVA::DataLoader * loader = new TMVA::DataLoader("dataset");
102
103 loader->AddVariable("m_jj");
104 loader->AddVariable("m_jjj");
105 loader->AddVariable("m_lv");
106 loader->AddVariable("m_jlv");
107 loader->AddVariable("m_bb");
108 loader->AddVariable("m_wbb");
109 loader->AddVariable("m_wwbb");
110
111/// We set now the input data trees in the TMVA DataLoader class
112
113// global event weights per tree (see below for setting event-wise weights)
116
117// You can add an arbitrary number of signal or background trees
118 loader->AddSignalTree ( signalTree, signalWeight );
119 loader->AddBackgroundTree( backgroundTree, backgroundWeight );
120
121
122// Set individual event weights (the variables must exist in the original TTree)
123// for signal : factory->SetSignalWeightExpression ("weight1*weight2");
124// for background: factory->SetBackgroundWeightExpression("weight1*weight2");
125//loader->SetBackgroundWeightExpression( "weight" );
126
127// Apply additional cuts on the signal and background samples (can be different)
128 TCut mycuts = ""; // for example: TCut mycuts = "abs(var1)<0.5 && abs(var2-0.5)<1";
129 TCut mycutb = ""; // for example: TCut mycutb = "abs(var1)<0.5";
130
131// Tell the factory how to use the training and testing events
132//
133// If no numbers of events are given, half of the events in the tree are used
134// for training, and the other half for testing:
135// loader->PrepareTrainingAndTestTree( mycut, "SplitMode=random:!V" );
136// To also specify the number of testing events, use:
137
138 loader->PrepareTrainingAndTestTree( mycuts, mycutb,
139 "nTrain_Signal=7000:nTrain_Background=7000:SplitMode=Random:NormMode=NumEvents:!V" );
140
141/***
142## Booking Methods
143
144Here we book the TMVA methods. We book first a Likelihood based on KDE (Kernel Density Estimation), a Fischer discriminant, a BDT
145and a shallow neural network
146
147 */
148
149
150// Likelihood ("naive Bayes estimator")
151if (useLikelihood) {
152 factory.BookMethod(loader, TMVA::Types::kLikelihood, "Likelihood",
153 "H:!V:TransformOutput:PDFInterpol=Spline2:NSmoothSig[0]=20:NSmoothBkg[0]=20:NSmoothBkg[1]=10:NSmooth=1:NAvEvtPerBin=50" );
154}
155// Use a kernel density estimator to approximate the PDFs
156if (useLikelihoodKDE) {
157 factory.BookMethod(loader, TMVA::Types::kLikelihood, "LikelihoodKDE",
158 "!H:!V:!TransformOutput:PDFInterpol=KDE:KDEtype=Gauss:KDEiter=Adaptive:KDEFineFactor=0.3:KDEborder=None:NAvEvtPerBin=50" );
159
160}
161
162// Fisher discriminant (same as LD)
163if (useFischer) {
164 factory.BookMethod(loader, TMVA::Types::kFisher, "Fisher", "H:!V:Fisher:VarTransform=None:CreateMVAPdfs:PDFInterpolMVAPdf=Spline2:NbinsMVAPdf=50:NsmoothMVAPdf=10" );
165}
166
167//Boosted Decision Trees
168if (useBDT) {
169 factory.BookMethod(loader,TMVA::Types::kBDT, "BDT",
170 "!V:NTrees=200:MinNodeSize=2.5%:MaxDepth=2:BoostType=AdaBoost:AdaBoostBeta=0.5:UseBaggedBoost:BaggedSampleFraction=0.5:SeparationType=GiniIndex:nCuts=20" );
171}
172
173//Multi-Layer Perceptron (Neural Network)
174if (useMLP) {
175 factory.BookMethod(loader, TMVA::Types::kMLP, "MLP",
176 "!H:!V:NeuronType=tanh:VarTransform=N:NCycles=100:HiddenLayers=N+5:TestRate=5:!UseRegulator" );
177}
178
179
180/// Here we book the new DNN of TMVA if we have support in ROOT. We will use GPU version if ROOT is enabled with GPU
181
182
183 /***
184
185## Booking Deep Neural Network
186
187Here we define the option string for building the Deep Neural network model.
188
189#### 1. Define DNN layout
190
191The DNN configuration is defined using a string. Note that whitespaces between characters are not allowed.
192
193We define first the DNN layout:
194
195- **input layout** : this defines the input data format for the DNN as ``input depth | height | width``.
196 In case of a dense layer as first layer the input layout should be ``1 | 1 | number of input variables`` (features)
197- **batch layout** : this defines how are the input batch. It is related to input layout but not the same.
198 If the first layer is dense it should be ``1 | batch size ! number of variables`` (features)
199
200 *(note the use of the character `|` as separator of input parameters for DNN layout)*
201
202note that in case of only dense layer the input layout could be omitted but it is required when defining more
203complex architectures
204
205- **layer layout** string defining the layer architecture. The syntax is
206 - layer type (e.g. DENSE, CONV, RNN)
207 - layer parameters (e.g. number of units)
208 - activation function (e.g TANH, RELU,...)
209
210 *the different layers are separated by the ``","`` *
211
212#### 2. Define Training Strategy
213
214We define here the training strategy parameters for the DNN. The parameters are separated by the ``","`` separator.
215One can then concatenate different training strategy with different parameters. The training strategy are separated by
216the ``"|"`` separator.
217
218 - Optimizer
219 - Learning rate
220 - Momentum (valid for SGD and RMSPROP)
221 - Regularization and Weight Decay
222 - Dropout
223 - Max number of epochs
224 - Convergence steps. if the test error will not decrease after that value the training will stop
225 - Batch size (This value must be the same specified in the input layout)
226 - Test Repetitions (the interval when the test error will be computed)
227
228
229#### 3. Define general DNN options
230
231We define the general DNN options concatenating in the final string the previously defined layout and training strategy.
232Note we use the ``":"`` separator to separate the different higher level options, as in the other TMVA methods.
233In addition to input layout, batch layout and training strategy we add now:
234
235- Type of Loss function (e.g. CROSSENTROPY)
236- Weight Initizalization (e.g XAVIER, XAVIERUNIFORM, NORMAL )
237- Variable Transformation
238- Type of Architecture (e.g. CPU, GPU, Standard)
239
240We can then book the DL method using the built option string
241
242 ***/
243
244 if (useDL) {
245
246 bool useDLGPU = false;
247#ifdef R__HAS_TMVAGPU
248 useDLGPU = true;
249#endif
250
251 // Define DNN layout
252 TString inputLayoutString = "InputLayout=1|1|7";
253 TString batchLayoutString= "BatchLayout=1|128|7";
254 TString layoutString ("Layout=DENSE|64|TANH,DENSE|64|TANH,DENSE|64|TANH,DENSE|64|TANH,DENSE|1|LINEAR");
255 // Define Training strategies
256 // one can catenate several training strategies
257 TString training1("LearningRate=1e-3,Momentum=0.9,"
258 "ConvergenceSteps=10,BatchSize=128,TestRepetitions=1,"
259 "MaxEpochs=30,WeightDecay=1e-4,Regularization=None,"
260 "Optimizer=ADAM,ADAM_beta1=0.9,ADAM_beta2=0.999,ADAM_eps=1.E-7," // ADAM default parameters
261 "DropConfig=0.0+0.0+0.0+0.");
262 // TString training2("LearningRate=1e-3,Momentum=0.9"
263 // "ConvergenceSteps=10,BatchSize=128,TestRepetitions=1,"
264 // "MaxEpochs=20,WeightDecay=1e-4,Regularization=None,"
265 // "Optimizer=SGD,DropConfig=0.0+0.0+0.0+0.");
266
267 TString trainingStrategyString ("TrainingStrategy=");
268 trainingStrategyString += training1; // + "|" + training2;
269
270 // General Options.
271
272 TString dnnOptions ("!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=G:"
273 "WeightInitialization=XAVIER");
274 dnnOptions.Append (":"); dnnOptions.Append (inputLayoutString);
275 dnnOptions.Append (":"); dnnOptions.Append (batchLayoutString);
276 dnnOptions.Append (":"); dnnOptions.Append (layoutString);
277 dnnOptions.Append (":"); dnnOptions.Append (trainingStrategyString);
278
279 TString dnnMethodName = "DNN_CPU";
280 if (useDLGPU) {
281 dnnOptions += ":Architecture=GPU";
282 dnnMethodName = "DNN_GPU";
283 } else {
284 dnnOptions += ":Architecture=CPU";
285 }
286
287 factory.BookMethod(loader, TMVA::Types::kDL, dnnMethodName, dnnOptions);
288 }
289
290 // Keras deep learning
291 if (useKeras) {
292
293 Info("TMVA_Higgs_Classification", "Building deep neural network with keras ");
294 // create python script which can be executed
295 // create 2 conv2d layer + maxpool + dense
296 TMacro m;
297 m.AddLine("import tensorflow");
298 m.AddLine("from tensorflow.keras.models import Sequential");
299 m.AddLine("from tensorflow.keras.optimizers import Adam");
300 m.AddLine("from tensorflow.keras.layers import Input, Dense");
301 m.AddLine("");
302 m.AddLine("model = Sequential() ");
303 m.AddLine("model.add(Dense(64, activation='relu',input_dim=7))");
304 m.AddLine("model.add(Dense(64, activation='relu'))");
305 m.AddLine("model.add(Dense(64, activation='relu'))");
306 m.AddLine("model.add(Dense(64, activation='relu'))");
307 m.AddLine("model.add(Dense(2, activation='sigmoid'))");
308 m.AddLine("model.compile(loss = 'binary_crossentropy', optimizer = Adam(learning_rate = 0.001), weighted_metrics = ['accuracy'])");
309 m.AddLine("model.save('Higgs_model.h5')");
310 m.AddLine("model.summary()");
311
312 m.SaveSource("make_higgs_model.py");
313 // execute
314 auto ret = (TString *)gROOT->ProcessLine("TMVA::Python_Executable()");
315 TString python_exe = (ret) ? *(ret) : "python";
316 gSystem->Exec(python_exe + " make_higgs_model.py");
317
318 if (gSystem->AccessPathName("Higgs_model.h5")) {
319 Warning("TMVA_Higgs_Classification", "Error creating Keras model file - skip using Keras");
320 } else {
321 // book PyKeras method only if Keras model could be created
322 Info("TMVA_Higgs_Classification", "Booking tf.Keras Dense model");
323 factory.BookMethod(
324 loader, TMVA::Types::kPyKeras, "PyKeras",
325 "H:!V:VarTransform=None:FilenameModel=Higgs_model.h5:tf.keras:"
326 "FilenameTrainedModel=Higgs_trained_model.h5:NumEpochs=20:BatchSize=100:"
327 "GpuOptions=allow_growth=True"); // needed for RTX NVidia card and to avoid TF allocates all GPU memory
328 }
329 }
330
331 /**
332## Train Methods
333
334Here we train all the previously booked methods.
335
336 */
337
338 factory.TrainAllMethods();
339
340/**
341 ## Test all methods
342
343 Now we test and evaluate all methods using the test data set
344*/
345
346 factory.TestAllMethods();
347
348 factory.EvaluateAllMethods();
349
350/// after we get the ROC curve and we display
351
352 auto c1 = factory.GetROCCurve(loader);
353 c1->Draw();
354
355/// at the end we close the output file which contains the evaluation result of all methods and it can be used by TMVAGUI
356/// to display additional plots
357
358 outputFile->Close();
359
360
361}
double Double_t
Definition RtypesCore.h:59
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
void Info(const char *location, const char *msgfmt,...)
Use this function for informational messages.
Definition TError.cxx:218
void Error(const char *location, const char *msgfmt,...)
Use this function in case an error occurred.
Definition TError.cxx:185
void Warning(const char *location, const char *msgfmt,...)
Use this function in warning situations.
Definition TError.cxx:229
#define gROOT
Definition TROOT.h:406
R__EXTERN TSystem * gSystem
Definition TSystem.h:566
A specialized string object used for TTree selections.
Definition TCut.h:25
A ROOT file is an on-disk file, usually with extension .root, that stores objects in a file-system-li...
Definition TFile.h:53
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
Definition TFile.cxx:4094
This is the main MVA steering class.
Definition Factory.h:80
static void PyInitialize()
Initialize Python interpreter.
static Tools & Instance()
Definition Tools.cxx:71
@ kFisher
Definition Types.h:82
@ kLikelihood
Definition Types.h:79
Class supporting a collection of lines with C++ code.
Definition TMacro.h:31
Basic string class.
Definition TString.h:139
virtual Int_t Exec(const char *shellcmd)
Execute a command.
Definition TSystem.cxx:653
virtual Bool_t AccessPathName(const char *path, EAccessMode mode=kFileExists)
Returns FALSE if one can access a file using the specified access mode.
Definition TSystem.cxx:1296
virtual void Setenv(const char *name, const char *value)
Set environment variable.
Definition TSystem.cxx:1649
A TTree represents a columnar dataset.
Definition TTree.h:79
return c1
Definition legend1.C:41
TMarker m
Definition textangle.C:8