Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA_CNN_Classification.C
Go to the documentation of this file.
1/// \file
2/// \ingroup tutorial_tmva
3/// \notebook
4/// TMVA Classification Example Using a Convolutional Neural Network
5///
6/// This is an example of using a CNN in TMVA. We do classification using a toy image data set
7/// that is generated when running the example macro
8///
9/// \macro_image
10/// \macro_output
11/// \macro_code
12///
13/// \author Lorenzo Moneta
14
15/***
16
17 # TMVA Classification Example Using a Convolutional Neural Network
18
19
20**/
21
22/// Helper function to create input images data
23/// we create a signal and background 2D histograms from 2d gaussians
24/// with a location (means in X and Y) different for each event
25/// The difference between signal and background is in the gaussian width.
26/// The width for the background gaussian is slightly larger than the signal width by few % values
27///
28///
29void MakeImagesTree(int n, int nh, int nw)
30{
31
32 // image size (nh x nw)
33 const int ntot = nh * nw;
34 const TString fileOutName = TString::Format("images_data_%dx%d.root", nh, nw);
35
36 const int nRndmEvts = 10000; // number of events we use to fill each image
37 double delta_sigma = 0.1; // 5% difference in the sigma
38 double pixelNoise = 5;
39
40 double sX1 = 3;
41 double sY1 = 3;
42 double sX2 = sX1 + delta_sigma;
43 double sY2 = sY1 - delta_sigma;
44
45 auto h1 = new TH2D("h1", "h1", nh, 0, 10, nw, 0, 10);
46 auto h2 = new TH2D("h2", "h2", nh, 0, 10, nw, 0, 10);
47
48 auto f1 = new TF2("f1", "xygaus");
49 auto f2 = new TF2("f2", "xygaus");
50 TTree sgn("sig_tree", "signal_tree");
51 TTree bkg("bkg_tree", "background_tree");
52
53 TFile f(fileOutName, "RECREATE");
54
55 std::vector<float> x1(ntot);
56 std::vector<float> x2(ntot);
57
58 // create signal and background trees with a single branch
59 // an std::vector<float> of size nh x nw containing the image data
60
61 std::vector<float> *px1 = &x1;
62 std::vector<float> *px2 = &x2;
63
64 bkg.Branch("vars", "std::vector<float>", &px1);
65 sgn.Branch("vars", "std::vector<float>", &px2);
66
67 // std::cout << "create tree " << std::endl;
68
69 sgn.SetDirectory(&f);
70 bkg.SetDirectory(&f);
71
72 f1->SetParameters(1, 5, sX1, 5, sY1);
73 f2->SetParameters(1, 5, sX2, 5, sY2);
74 gRandom->SetSeed(0);
75 std::cout << "Filling ROOT tree " << std::endl;
76 for (int i = 0; i < n; ++i) {
77 if (i % 1000 == 0)
78 std::cout << "Generating image event ... " << i << std::endl;
79 h1->Reset();
80 h2->Reset();
81 // generate random means in range [3,7] to be not too much on the border
82 f1->SetParameter(1, gRandom->Uniform(3, 7));
83 f1->SetParameter(3, gRandom->Uniform(3, 7));
84 f2->SetParameter(1, gRandom->Uniform(3, 7));
85 f2->SetParameter(3, gRandom->Uniform(3, 7));
86
87 h1->FillRandom("f1", nRndmEvts);
88 h2->FillRandom("f2", nRndmEvts);
89
90 for (int k = 0; k < nh; ++k) {
91 for (int l = 0; l < nw; ++l) {
92 int m = k * nw + l;
93 // add some noise in each bin
94 x1[m] = h1->GetBinContent(k + 1, l + 1) + gRandom->Gaus(0, pixelNoise);
95 x2[m] = h2->GetBinContent(k + 1, l + 1) + gRandom->Gaus(0, pixelNoise);
96 }
97 }
98 sgn.Fill();
99 bkg.Fill();
100 }
101 sgn.Write();
102 bkg.Write();
103
104 Info("MakeImagesTree", "Signal and background tree with images data written to the file %s", f.GetName());
105 sgn.Print();
106 bkg.Print();
107 f.Close();
108}
109
110void TMVA_CNN_Classification(std::vector<bool> opt = {1, 1, 1, 1, 1})
111{
112
113 bool useTMVACNN = (opt.size() > 0) ? opt[0] : false;
114 bool useKerasCNN = (opt.size() > 1) ? opt[1] : false;
115 bool useTMVADNN = (opt.size() > 2) ? opt[2] : false;
116 bool useTMVABDT = (opt.size() > 3) ? opt[3] : false;
117 bool usePyTorchCNN = (opt.size() > 4) ? opt[4] : false;
118#ifndef R__HAS_TMVACPU
119#ifndef R__HAS_TMVAGPU
120 Warning("TMVA_CNN_Classification",
121 "TMVA is not build with GPU or CPU multi-thread support. Cannot use TMVA Deep Learning for CNN");
122 useTMVACNN = false;
123#endif
124#endif
125
126 bool writeOutputFile = true;
127
128 int num_threads = 0; // use default threads
129
131
132 // do enable MT running
133 if (num_threads >= 0) {
134 ROOT::EnableImplicitMT(num_threads);
135 if (num_threads > 0) gSystem->Setenv("OMP_NUM_THREADS", TString::Format("%d",num_threads));
136 }
137 else
138 gSystem->Setenv("OMP_NUM_THREADS", "1");
139
140 std::cout << "Running with nthreads = " << ROOT::GetThreadPoolSize() << std::endl;
141
142#ifdef R__HAS_PYMVA
143 gSystem->Setenv("KERAS_BACKEND", "tensorflow");
144 // for using Keras
146#else
147 useKerasCNN = false;
148 usePyTorchCNN = false;
149#endif
150
151 TFile *outputFile = nullptr;
152 if (writeOutputFile)
153 outputFile = TFile::Open("TMVA_CNN_ClassificationOutput.root", "RECREATE");
154
155 /***
156 ## Create TMVA Factory
157
158 Create the Factory class. Later you can choose the methods
159 whose performance you'd like to investigate.
160
161 The factory is the major TMVA object you have to interact with. Here is the list of parameters you need to pass
162
163 - The first argument is the base of the name of all the output
164 weightfiles in the directory weight/ that will be created with the
165 method parameters
166
167 - The second argument is the output file for the training results
168
169 - The third argument is a string option defining some general configuration for the TMVA session.
170 For example all TMVA output can be suppressed by removing the "!" (not) in front of the "Silent" argument in the
171 option string
172
173 - note that we disable any pre-transformation of the input variables and we avoid computing correlations between
174 input variables
175 ***/
176
177 TMVA::Factory factory(
178 "TMVA_CNN_Classification", outputFile,
179 "!V:ROC:!Silent:Color:AnalysisType=Classification:Transformations=None:!Correlations");
180
181 /***
182
183 ## Declare DataLoader(s)
184
185 The next step is to declare the DataLoader class that deals with input variables
186
187 Define the input variables that shall be used for the MVA training
188 note that you may also use variable expressions, which can be parsed by TTree::Draw( "expression" )]
189
190 In this case the input data consists of an image of 16x16 pixels. Each single pixel is a branch in a ROOT TTree
191
192 **/
193
194 TMVA::DataLoader *loader = new TMVA::DataLoader("dataset");
195
196 /***
197
198 ## Setup Dataset(s)
199
200 Define input data file and signal and background trees
201
202 **/
203
204 int imgSize = 16 * 16;
205 TString inputFileName = "images_data_16x16.root";
206
207 bool fileExist = !gSystem->AccessPathName(inputFileName);
208
209 // if file does not exists create it
210 if (!fileExist) {
211 MakeImagesTree(5000, 16, 16);
212 }
213
214 // TString inputFileName = "tmva_class_example.root";
215
216 auto inputFile = TFile::Open(inputFileName);
217 if (!inputFile) {
218 Error("TMVA_CNN_Classification", "Error opening input file %s - exit", inputFileName.Data());
219 return;
220 }
221
222 // --- Register the training and test trees
223
224 TTree *signalTree = (TTree *)inputFile->Get("sig_tree");
225 TTree *backgroundTree = (TTree *)inputFile->Get("bkg_tree");
226
227 int nEventsSig = signalTree->GetEntries();
228 int nEventsBkg = backgroundTree->GetEntries();
229
230 // global event weights per tree (see below for setting event-wise weights)
231 Double_t signalWeight = 1.0;
232 Double_t backgroundWeight = 1.0;
233
234 // You can add an arbitrary number of signal or background trees
235 loader->AddSignalTree(signalTree, signalWeight);
236 loader->AddBackgroundTree(backgroundTree, backgroundWeight);
237
238 /// add event variables (image)
239 /// use new method (from ROOT 6.20 to add a variable array for all image data)
240 loader->AddVariablesArray("vars", imgSize);
241
242 // Set individual event weights (the variables must exist in the original TTree)
243 // for signal : factory->SetSignalWeightExpression ("weight1*weight2");
244 // for background: factory->SetBackgroundWeightExpression("weight1*weight2");
245 // loader->SetBackgroundWeightExpression( "weight" );
246
247 // Apply additional cuts on the signal and background samples (can be different)
248 TCut mycuts = ""; // for example: TCut mycuts = "abs(var1)<0.5 && abs(var2-0.5)<1";
249 TCut mycutb = ""; // for example: TCut mycutb = "abs(var1)<0.5";
250
251 // Tell the factory how to use the training and testing events
252 //
253 // If no numbers of events are given, half of the events in the tree are used
254 // for training, and the other half for testing:
255 // loader->PrepareTrainingAndTestTree( mycut, "SplitMode=random:!V" );
256 // It is possible also to specify the number of training and testing events,
257 // note we disable the computation of the correlation matrix of the input variables
258
259 int nTrainSig = 0.8 * nEventsSig;
260 int nTrainBkg = 0.8 * nEventsBkg;
261
262 // build the string options for DataLoader::PrepareTrainingAndTestTree
263 TString prepareOptions = TString::Format(
264 "nTrain_Signal=%d:nTrain_Background=%d:SplitMode=Random:SplitSeed=100:NormMode=NumEvents:!V:!CalcCorrelations",
265 nTrainSig, nTrainBkg);
266
267 loader->PrepareTrainingAndTestTree(mycuts, mycutb, prepareOptions);
268
269 /***
270
271 DataSetInfo : [dataset] : Added class "Signal"
272 : Add Tree sig_tree of type Signal with 10000 events
273 DataSetInfo : [dataset] : Added class "Background"
274 : Add Tree bkg_tree of type Background with 10000 events
275
276
277
278 **/
279
280 // signalTree->Print();
281
282 /****
283 # Booking Methods
284
285 Here we book the TMVA methods. We book a Boosted Decision Tree method (BDT)
286
287 **/
288
289 // Boosted Decision Trees
290 if (useTMVABDT) {
291 factory.BookMethod(loader, TMVA::Types::kBDT, "BDT",
292 "!V:NTrees=400:MinNodeSize=2.5%:MaxDepth=2:BoostType=AdaBoost:AdaBoostBeta=0.5:"
293 "UseBaggedBoost:BaggedSampleFraction=0.5:SeparationType=GiniIndex:nCuts=20");
294 }
295 /**
296
297 #### Booking Deep Neural Network
298
299 Here we book the DNN of TMVA. See the example TMVA_Higgs_Classification.C for a detailed description of the
300 options
301
302 **/
303
304 if (useTMVADNN) {
305
306 TString layoutString(
307 "Layout=DENSE|100|RELU,BNORM,DENSE|100|RELU,BNORM,DENSE|100|RELU,BNORM,DENSE|100|RELU,DENSE|1|LINEAR");
308
309 // Training strategies
310 // one can catenate several training strings with different parameters (e.g. learning rates or regularizations
311 // parameters) The training string must be concatenates with the `|` delimiter
312 TString trainingString1("LearningRate=1e-3,Momentum=0.9,Repetitions=1,"
313 "ConvergenceSteps=5,BatchSize=100,TestRepetitions=1,"
314 "MaxEpochs=20,WeightDecay=1e-4,Regularization=None,"
315 "Optimizer=ADAM,DropConfig=0.0+0.0+0.0+0.");
316
317 TString trainingStrategyString("TrainingStrategy=");
318 trainingStrategyString += trainingString1; // + "|" + trainingString2 + ....
319
320 // Build now the full DNN Option string
321
322 TString dnnOptions("!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=None:"
323 "WeightInitialization=XAVIER");
324 dnnOptions.Append(":");
325 dnnOptions.Append(layoutString);
326 dnnOptions.Append(":");
327 dnnOptions.Append(trainingStrategyString);
328
329 TString dnnMethodName = "TMVA_DNN_CPU";
330// use GPU if available
331#ifdef R__HAS_TMVAGPU
332 dnnOptions += ":Architecture=GPU";
333 dnnMethodName = "TMVA_DNN_GPU";
334#elif defined(R__HAS_TMVACPU)
335 dnnOptions += ":Architecture=CPU";
336#endif
337
338 factory.BookMethod(loader, TMVA::Types::kDL, dnnMethodName, dnnOptions);
339 }
340
341 /***
342 ### Book Convolutional Neural Network in TMVA
343
344 For building a CNN one needs to define
345
346 - Input Layout : number of channels (in this case = 1) | image height | image width
347 - Batch Layout : batch size | number of channels | image size = (height*width)
348
349 Then one add Convolutional layers and MaxPool layers.
350
351 - For Convolutional layer the option string has to be:
352 - CONV | number of units | filter height | filter width | stride height | stride width | padding height | paddig
353 width | activation function
354
355 - note in this case we are using a filer 3x3 and padding=1 and stride=1 so we get the output dimension of the
356 conv layer equal to the input
357
358 - note we use after the first convolutional layer a batch normalization layer. This seems to help significantly the
359 convergence
360
361 - For the MaxPool layer:
362 - MAXPOOL | pool height | pool width | stride height | stride width
363
364 The RESHAPE layer is needed to flatten the output before the Dense layer
365
366
367 Note that to run the CNN is required to have CPU or GPU support
368
369 ***/
370
371 if (useTMVACNN) {
372
373 TString inputLayoutString("InputLayout=1|16|16");
374
375 // Batch Layout
376 TString layoutString("Layout=CONV|10|3|3|1|1|1|1|RELU,BNORM,CONV|10|3|3|1|1|1|1|RELU,MAXPOOL|2|2|1|1,"
377 "RESHAPE|FLAT,DENSE|100|RELU,DENSE|1|LINEAR");
378
379 // Training strategies.
380 TString trainingString1("LearningRate=1e-3,Momentum=0.9,Repetitions=1,"
381 "ConvergenceSteps=5,BatchSize=100,TestRepetitions=1,"
382 "MaxEpochs=20,WeightDecay=1e-4,Regularization=None,"
383 "Optimizer=ADAM,DropConfig=0.0+0.0+0.0+0.0");
384
385 TString trainingStrategyString("TrainingStrategy=");
386 trainingStrategyString +=
387 trainingString1; // + "|" + trainingString2 + "|" + trainingString3; for concatenating more training strings
388
389 // Build full CNN Options.
390 TString cnnOptions("!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=None:"
391 "WeightInitialization=XAVIER");
392
393 cnnOptions.Append(":");
394 cnnOptions.Append(inputLayoutString);
395 cnnOptions.Append(":");
396 cnnOptions.Append(layoutString);
397 cnnOptions.Append(":");
398 cnnOptions.Append(trainingStrategyString);
399
400 //// New DL (CNN)
401 TString cnnMethodName = "TMVA_CNN_CPU";
402// use GPU if available
403#ifdef R__HAS_TMVAGPU
404 cnnOptions += ":Architecture=GPU";
405 cnnMethodName = "TMVA_CNN_GPU";
406#else
407 cnnOptions += ":Architecture=CPU";
408 cnnMethodName = "TMVA_CNN_CPU";
409#endif
410
411 factory.BookMethod(loader, TMVA::Types::kDL, cnnMethodName, cnnOptions);
412 }
413
414 /**
415 ### Book Convolutional Neural Network in Keras using a generated model
416
417 **/
418
419 if (useKerasCNN) {
420
421 Info("TMVA_CNN_Classification", "Building convolutional keras model");
422 // create python script which can be executed
423 // create 2 conv2d layer + maxpool + dense
424 TMacro m;
425 m.AddLine("import tensorflow");
426 m.AddLine("from tensorflow.keras.models import Sequential");
427 m.AddLine("from tensorflow.keras.optimizers import Adam");
428 m.AddLine(
429 "from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, Reshape, BatchNormalization");
430 m.AddLine("");
431 m.AddLine("model = Sequential() ");
432 m.AddLine("model.add(Reshape((16, 16, 1), input_shape = (256, )))");
433 m.AddLine("model.add(Conv2D(10, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
434 "'relu', padding = 'same'))");
435 m.AddLine("model.add(BatchNormalization())");
436 m.AddLine("model.add(Conv2D(10, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
437 "'relu', padding = 'same'))");
438 // m.AddLine("model.add(BatchNormalization())");
439 m.AddLine("model.add(MaxPooling2D(pool_size = (2, 2), strides = (1,1))) ");
440 m.AddLine("model.add(Flatten())");
441 m.AddLine("model.add(Dense(256, activation = 'relu')) ");
442 m.AddLine("model.add(Dense(2, activation = 'sigmoid')) ");
443 m.AddLine("model.compile(loss = 'binary_crossentropy', optimizer = Adam(lr = 0.001), metrics = ['accuracy'])");
444 m.AddLine("model.save('model_cnn.h5')");
445 m.AddLine("model.summary()");
446
447 m.SaveSource("make_cnn_model.py");
448 // execute
449 gSystem->Exec(TMVA::Python_Executable() + " make_cnn_model.py");
450
451 if (gSystem->AccessPathName("model_cnn.h5")) {
452 Warning("TMVA_CNN_Classification", "Error creating Keras model file - skip using Keras");
453 } else {
454 // book PyKeras method only if Keras model could be created
455 Info("TMVA_CNN_Classification", "Booking tf.Keras CNN model");
456 factory.BookMethod(
457 loader, TMVA::Types::kPyKeras, "PyKeras",
458 "H:!V:VarTransform=None:FilenameModel=model_cnn.h5:tf.keras:"
459 "FilenameTrainedModel=trained_model_cnn.h5:NumEpochs=20:BatchSize=100:"
460 "GpuOptions=allow_growth=True"); // needed for RTX NVidia card and to avoid TF allocates all GPU memory
461 }
462 }
463
464 if (usePyTorchCNN) {
465
466 Info("TMVA_CNN_Classification", "Using Convolutional PyTorch Model");
467 TString pyTorchFileName = gROOT->GetTutorialDir() + TString("/tmva/PyTorch_Generate_CNN_Model.py");
468 // check that pytorch can be imported and file defining the model and used later when booking the method is existing
469 if (gSystem->Exec(TMVA::Python_Executable() + " -c 'import torch'") || gSystem->AccessPathName(pyTorchFileName) ) {
470 Warning("TMVA_CNN_Classification", "PyTorch is not installed or model building file is not existing - skip using PyTorch");
471 }
472 else {
473 // book PyTorch method only if PyTorch model could be created
474 Info("TMVA_CNN_Classification", "Booking PyTorch CNN model");
475 TString methodOpt = "H:!V:VarTransform=None:FilenameModel=PyTorchModelCNN.pt:"
476 "FilenameTrainedModel=PyTorchTrainedModelCNN.pt:NumEpochs=20:BatchSize=100";
477 methodOpt += TString(":UserCode=") + pyTorchFileName;
478 factory.BookMethod(loader, TMVA::Types::kPyTorch, "PyTorch", methodOpt);
479 }
480 }
481
482
483 //// ## Train Methods
484
485 factory.TrainAllMethods();
486
487 /// ## Test and Evaluate Methods
488
489 factory.TestAllMethods();
490
491 factory.EvaluateAllMethods();
492
493 /// ## Plot ROC Curve
494
495 auto c1 = factory.GetROCCurve(loader);
496 c1->Draw();
497
498 // close outputfile to save output file
499 outputFile->Close();
500}
#define f(i)
Definition RSha256.hxx:104
static const double x2[5]
static const double x1[5]
double Double_t
Definition RtypesCore.h:59
void Info(const char *location, const char *msgfmt,...)
Use this function for informational messages.
Definition TError.cxx:220
void Error(const char *location, const char *msgfmt,...)
Use this function in case an error occurred.
Definition TError.cxx:187
void Warning(const char *location, const char *msgfmt,...)
Use this function in warning situations.
Definition TError.cxx:231
#define gROOT
Definition TROOT.h:404
R__EXTERN TRandom * gRandom
Definition TRandom.h:62
R__EXTERN TSystem * gSystem
Definition TSystem.h:559
A specialized string object used for TTree selections.
Definition TCut.h:25
virtual void SetParameters(const Double_t *params)
Definition TF1.h:644
virtual void SetParameter(Int_t param, Double_t value)
Definition TF1.h:634
A 2-Dim function with parameters.
Definition TF2.h:29
A ROOT file is a suite of consecutive data records (TKey instances) with a well defined format.
Definition TFile.h:54
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
Definition TFile.cxx:4025
void Close(Option_t *option="") override
Close a file.
Definition TFile.cxx:899
virtual void Reset(Option_t *option="")
Reset.
Definition TH1.cxx:9941
virtual void FillRandom(const char *fname, Int_t ntimes=5000, TRandom *rng=nullptr)
Fill histogram following distribution in function fname.
Definition TH1.cxx:3526
virtual Double_t GetBinContent(Int_t bin) const
Return content of bin number bin.
Definition TH1.cxx:4994
2-D histogram with a double per channel (see TH1 documentation)}
Definition TH2.h:292
void AddVariablesArray(const TString &expression, int size, char type='F', Double_t min=0, Double_t max=0)
user inserts discriminating array of variables in data set info in case input tree provides an array ...
void AddSignalTree(TTree *signal, Double_t weight=1.0, Types::ETreeType treetype=Types::kMaxTreeType)
number of signal events (used to compute significance)
void PrepareTrainingAndTestTree(const TCut &cut, const TString &splitOpt)
prepare the training and test trees -> same cuts for signal and background
void AddBackgroundTree(TTree *background, Double_t weight=1.0, Types::ETreeType treetype=Types::kMaxTreeType)
number of signal events (used to compute significance)
This is the main MVA steering class.
Definition Factory.h:80
static void PyInitialize()
Initialize Python interpreter.
static Tools & Instance()
Definition Tools.cxx:71
Class supporting a collection of lines with C++ code.
Definition TMacro.h:31
virtual TObjString * AddLine(const char *text)
Add line with text in the list of lines of this macro.
Definition TMacro.cxx:141
virtual Double_t Gaus(Double_t mean=0, Double_t sigma=1)
Samples a random number from the standard Normal (Gaussian) Distribution with the given mean and sigm...
Definition TRandom.cxx:274
virtual void SetSeed(ULong_t seed=0)
Set the random generator seed.
Definition TRandom.cxx:608
virtual Double_t Uniform(Double_t x1=1)
Returns a uniform deviate on the interval (0, x1).
Definition TRandom.cxx:672
Basic string class.
Definition TString.h:136
const char * Data() const
Definition TString.h:369
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
Definition TString.cxx:2336
virtual Int_t Exec(const char *shellcmd)
Execute a command.
Definition TSystem.cxx:656
virtual Bool_t AccessPathName(const char *path, EAccessMode mode=kFileExists)
Returns FALSE if one can access a file using the specified access mode.
Definition TSystem.cxx:1296
virtual void Setenv(const char *name, const char *value)
Set environment variable.
Definition TSystem.cxx:1647
A TTree represents a columnar dataset.
Definition TTree.h:79
virtual Long64_t GetEntries() const
Definition TTree.h:460
return c1
Definition legend1.C:41
const Int_t n
Definition legend1.C:16
TH1F * h1
Definition legend1.C:5
TF1 * f1
Definition legend1.C:11
void EnableImplicitMT(UInt_t numthreads=0)
Enable ROOT's implicit multi-threading for all objects and methods that provide an internal paralleli...
Definition TROOT.cxx:527
UInt_t GetThreadPoolSize()
Returns the size of ROOT's thread pool.
Definition TROOT.cxx:565
TString Python_Executable()
Function to find current Python executable used by ROOT If Python2 is installed return "python" Inste...
auto * m
Definition textangle.C:8
auto * l
Definition textangle.C:4