Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA_RNN_Classification.C
Go to the documentation of this file.
1/// \file
2/// \ingroup tutorial_tmva
3/// \notebook
4/// TMVA Classification Example Using a Recurrent Neural Network
5///
6/// This is an example of using a RNN in TMVA. We do classification using a toy time dependent data set
7/// that is generated when running this example macro
8///
9/// \macro_image
10/// \macro_output
11/// \macro_code
12///
13/// \author Lorenzo Moneta
14/***
15
16 # TMVA Classification Example Using a Recurrent Neural Network
17
18 This is an example of using a RNN in TMVA.
19 We do the classification using a toy data set containing a time series of data sample ntimes
20 and with dimension ndim that is generated when running the provided function `MakeTimeData (nevents, ntime, ndim)`
21
22
23**/
24
25#include<TROOT.h>
26
27#include "TMVA/Factory.h"
28#include "TMVA/DataLoader.h"
29#include "TMVA/DataSetInfo.h"
30#include "TMVA/Config.h"
31#include "TMVA/MethodDL.h"
32
33
34#include "TFile.h"
35#include "TTree.h"
36
37/// Helper function to generate the time data set
38/// make some time data but not of fixed length.
39/// use a poisson with mu = 5 and truncated at 10
40///
41void MakeTimeData(int n, int ntime, int ndim )
42{
43
44 // const int ntime = 10;
45 // const int ndim = 30; // number of dim/time
46 TString fname = TString::Format("time_data_t%d_d%d.root", ntime, ndim);
47 std::vector<TH1 *> v1(ntime);
48 std::vector<TH1 *> v2(ntime);
49 int i = 0;
50 for (int i = 0; i < ntime; ++i) {
51 v1[i] = new TH1D(TString::Format("h1_%d", i), "h1", ndim, 0, 10);
52 v2[i] = new TH1D(TString::Format("h2_%d", i), "h2", ndim, 0, 10);
53 }
54
55 auto f1 = new TF1("f1", "gaus");
56 auto f2 = new TF1("f2", "gaus");
57
58 TFile f(fname, "RECREATE");
59 TTree sgn("sgn", "sgn");
60 TTree bkg("bkg", "bkg");
61
62 std::vector<std::vector<float>> x1(ntime);
63 std::vector<std::vector<float>> x2(ntime);
64
65 for (int i = 0; i < ntime; ++i) {
66 x1[i] = std::vector<float>(ndim);
67 x2[i] = std::vector<float>(ndim);
68 }
69
70 for (auto i = 0; i < ntime; i++) {
71 bkg.Branch(Form("vars_time%d", i), "std::vector<float>", &x1[i]);
72 sgn.Branch(Form("vars_time%d", i), "std::vector<float>", &x2[i]);
73 }
74
75 sgn.SetDirectory(&f);
76 bkg.SetDirectory(&f);
77 gRandom->SetSeed(0);
78
79 std::vector<double> mean1(ntime);
80 std::vector<double> mean2(ntime);
81 std::vector<double> sigma1(ntime);
82 std::vector<double> sigma2(ntime);
83 for (int j = 0; j < ntime; ++j) {
84 mean1[j] = 5. + 0.2 * sin(TMath::Pi() * j / double(ntime));
85 mean2[j] = 5. + 0.2 * cos(TMath::Pi() * j / double(ntime));
86 sigma1[j] = 4 + 0.3 * sin(TMath::Pi() * j / double(ntime));
87 sigma2[j] = 4 + 0.3 * cos(TMath::Pi() * j / double(ntime));
88 }
89 for (int i = 0; i < n; ++i) {
90
91 if (i % 1000 == 0)
92 std::cout << "Generating event ... " << i << std::endl;
93
94 for (int j = 0; j < ntime; ++j) {
95 auto h1 = v1[j];
96 auto h2 = v2[j];
97 h1->Reset();
98 h2->Reset();
99
100 f1->SetParameters(1, mean1[j], sigma1[j]);
101 f2->SetParameters(1, mean2[j], sigma2[j]);
102
103 h1->FillRandom("f1", 1000);
104 h2->FillRandom("f2", 1000);
105
106 for (int k = 0; k < ndim; ++k) {
107 // std::cout << j*10+k << " ";
108 x1[j][k] = h1->GetBinContent(k + 1) + gRandom->Gaus(0, 10);
109 x2[j][k] = h2->GetBinContent(k + 1) + gRandom->Gaus(0, 10);
110 }
111 }
112 // std::cout << std::endl;
113 sgn.Fill();
114 bkg.Fill();
115
116 if (n == 1) {
117 auto c1 = new TCanvas();
118 c1->Divide(ntime, 2);
119 for (int j = 0; j < ntime; ++j) {
120 c1->cd(j + 1);
121 v1[j]->Draw();
122 }
123 for (int j = 0; j < ntime; ++j) {
124 c1->cd(ntime + j + 1);
125 v2[j]->Draw();
126 }
127 gPad->Update();
128 }
129 }
130 if (n > 1) {
131 sgn.Write();
132 bkg.Write();
133 sgn.Print();
134 bkg.Print();
135 f.Close();
136 }
137}
138/// macro for performing a classification using a Recurrent Neural Network
139/// @param nevts = 2000 Number of events used. (increase for better classification results)
140/// @param use_type
141/// use_type = 0 use Simple RNN network
142/// use_type = 1 use LSTM network
143/// use_type = 2 use GRU
144/// use_type = 3 build 3 different networks with RNN, LSTM and GRU
145
146void TMVA_RNN_Classification(int nevts = 2000, int use_type = 1)
147{
148
149 const int ninput = 30;
150 const int ntime = 10;
151 const int batchSize = 100;
152 const int maxepochs = 20;
153
154 int nTotEvts = nevts; // total events to be generated for signal or background
155
156 bool useKeras = true;
157
158
159 bool useTMVA_RNN = true;
160 bool useTMVA_DNN = true;
161 bool useTMVA_BDT = false;
162
163 std::vector<std::string> rnn_types = {"RNN", "LSTM", "GRU"};
164 std::vector<bool> use_rnn_type = {1, 1, 1};
165 if (use_type >=0 && use_type < 3) {
166 use_rnn_type = {0,0,0};
167 use_rnn_type[use_type] = 1;
168 }
169 bool useGPU = true; // use GPU for TMVA if available
170
171#ifndef R__HAS_TMVAGPU
172 useGPU = false;
173#ifndef R__HAS_TMVACPU
174 Warning("TMVA_RNN_Classification", "TMVA is not build with GPU or CPU multi-thread support. Cannot use TMVA Deep Learning for RNN");
175 useTMVA_RNN = false;
176#endif
177#endif
178
179
180 TString archString = (useGPU) ? "GPU" : "CPU";
181
182 bool writeOutputFile = true;
183
184
185
186 const char *rnn_type = "RNN";
187
188#ifdef R__HAS_PYMVA
190#else
191 useKeras = false;
192#endif
193
194 int num_threads = 4; // use by default all threads
195 gSystem->Setenv("OMP_NUM_THREADS", "1"); // switch off MT in OpenBLAS
196 // do enable MT running
197 if (num_threads >= 0) {
198 ROOT::EnableImplicitMT(num_threads);
199 }
200
202
203 std::cout << "Running with nthreads = " << ROOT::GetThreadPoolSize() << std::endl;
204
205 TString inputFileName = "time_data_t10_d30.root";
206
207 bool fileExist = !gSystem->AccessPathName(inputFileName);
208
209 // if file does not exists create it
210 if (!fileExist) {
211 MakeTimeData(nTotEvts,ntime, ninput);
212 }
213
214
215 auto inputFile = TFile::Open(inputFileName);
216 if (!inputFile) {
217 Error("TMVA_RNN_Classification", "Error opening input file %s - exit", inputFileName.Data());
218 return;
219 }
220
221
222 std::cout << "--- RNNClassification : Using input file: " << inputFile->GetName() << std::endl;
223
224 // Create a ROOT output file where TMVA will store ntuples, histograms, etc.
225 TString outfileName(TString::Format("data_RNN_%s.root", archString.Data()));
226 TFile *outputFile = nullptr;
227 if (writeOutputFile) outputFile = TFile::Open(outfileName, "RECREATE");
228
229 /**
230 ## Declare Factory
231
232 Create the Factory class. Later you can choose the methods
233 whose performance you'd like to investigate.
234
235 The factory is the major TMVA object you have to interact with. Here is the list of parameters you need to
236pass
237
238 - The first argument is the base of the name of all the output
239 weightfiles in the directory weight/ that will be created with the
240 method parameters
241
242 - The second argument is the output file for the training results
243
244 - The third argument is a string option defining some general configuration for the TMVA session.
245 For example all TMVA output can be suppressed by removing the "!" (not) in front of the "Silent" argument in
246the option string
247
248 **/
249
250 // Creating the factory object
251 TMVA::Factory *factory = new TMVA::Factory("TMVAClassification", outputFile,
252 "!V:!Silent:Color:DrawProgressBar:Transformations=None:!Correlations:"
253 "AnalysisType=Classification:ModelPersistence");
254 TMVA::DataLoader *dataloader = new TMVA::DataLoader("dataset");
255
256 TTree *signalTree = (TTree *)inputFile->Get("sgn");
257 TTree *background = (TTree *)inputFile->Get("bkg");
258
259 const int nvar = ninput * ntime;
260
261 /// add variables - use new AddVariablesArray function
262 for (auto i = 0; i < ntime; i++) {
263 dataloader->AddVariablesArray(Form("vars_time%d", i), ninput);
264 }
265
266 dataloader->AddSignalTree(signalTree, 1.0);
267 dataloader->AddBackgroundTree(background, 1.0);
268
269 // check given input
270 auto &datainfo = dataloader->GetDataSetInfo();
271 auto vars = datainfo.GetListOfVariables();
272 std::cout << "number of variables is " << vars.size() << std::endl;
273 for (auto &v : vars)
274 std::cout << v << ",";
275 std::cout << std::endl;
276
277 int nTrainSig = 0.8 * nTotEvts;
278 int nTrainBkg = 0.8 * nTotEvts;
279
280 // build the string options for DataLoader::PrepareTrainingAndTestTree
281 TString prepareOptions = TString::Format("nTrain_Signal=%d:nTrain_Background=%d:SplitMode=Random:SplitSeed=100:NormMode=NumEvents:!V:!CalcCorrelations", nTrainSig, nTrainBkg);
282
283 // Apply additional cuts on the signal and background samples (can be different)
284 TCut mycuts = ""; // for example: TCut mycuts = "abs(var1)<0.5 && abs(var2-0.5)<1";
285 TCut mycutb = "";
286
287 dataloader->PrepareTrainingAndTestTree(mycuts, mycutb, prepareOptions);
288
289 std::cout << "prepared DATA LOADER " << std::endl;
290
291 /**
292 ## Book TMVA recurrent models
293
294 Book the different types of recurrent models in TMVA (SimpleRNN, LSTM or GRU)
295
296 **/
297
298 if (useTMVA_RNN) {
299
300 for (int i = 0; i < 3; ++i) {
301
302 if (!use_rnn_type[i])
303 continue;
304
305 const char *rnn_type = rnn_types[i].c_str();
306
307 /// define the inputlayout string for RNN
308 /// the input data should be organize as following:
309 //// input layout for RNN: time x ndim
310
311 TString inputLayoutString = TString::Format("InputLayout=%d|%d", ntime, ninput);
312
313 /// Define RNN layer layout
314 /// it should be LayerType (RNN or LSTM or GRU) | number of units | number of inputs | time steps | remember output (typically no=0 | return full sequence
315 TString rnnLayout = TString::Format("%s|10|%d|%d|0|1", rnn_type, ninput, ntime);
316
317 /// add after RNN a reshape layer (needed top flatten the output) and a dense layer with 64 units and a last one
318 /// Note the last layer is linear because when using Crossentropy a Sigmoid is applied already
319 TString layoutString = TString("Layout=") + rnnLayout + TString(",RESHAPE|FLAT,DENSE|64|TANH,LINEAR");
320
321 /// Defining Training strategies. Different training strings can be concatenate. Use however only one
322 TString trainingString1 = TString::Format("LearningRate=1e-3,Momentum=0.0,Repetitions=1,"
323 "ConvergenceSteps=5,BatchSize=%d,TestRepetitions=1,"
324 "WeightDecay=1e-2,Regularization=None,MaxEpochs=%d,"
325 "Optimizer=ADAM,DropConfig=0.0+0.+0.+0.",
326 batchSize,maxepochs);
327
328 TString trainingStrategyString("TrainingStrategy=");
329 trainingStrategyString += trainingString1; // + "|" + trainingString2
330
331 /// Define the full RNN Noption string adding the final options for all network
332 TString rnnOptions("!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=None:"
333 "WeightInitialization=XAVIERUNIFORM:ValidationSize=0.2:RandomSeed=1234");
334
335 rnnOptions.Append(":");
336 rnnOptions.Append(inputLayoutString);
337 rnnOptions.Append(":");
338 rnnOptions.Append(layoutString);
339 rnnOptions.Append(":");
340 rnnOptions.Append(trainingStrategyString);
341 rnnOptions.Append(":");
342 rnnOptions.Append(TString::Format("Architecture=%s", archString.Data()));
343
344 TString rnnName = "TMVA_" + TString(rnn_type);
345 factory->BookMethod(dataloader, TMVA::Types::kDL, rnnName, rnnOptions);
346
347 }
348 }
349
350 /**
351 ## Book TMVA fully connected dense layer models
352
353 **/
354
355 if (useTMVA_DNN) {
356 // Method DL with Dense Layer
357 TString inputLayoutString = TString::Format("InputLayout=1|1|%d", ntime * ninput);
358
359 TString layoutString("Layout=DENSE|64|TANH,DENSE|TANH|64,DENSE|TANH|64,LINEAR");
360 // Training strategies.
361 TString trainingString1("LearningRate=1e-3,Momentum=0.0,Repetitions=1,"
362 "ConvergenceSteps=10,BatchSize=256,TestRepetitions=1,"
363 "WeightDecay=1e-4,Regularization=None,MaxEpochs=20"
364 "DropConfig=0.0+0.+0.+0.,Optimizer=ADAM");
365 TString trainingStrategyString("TrainingStrategy=");
366 trainingStrategyString += trainingString1; // + "|" + trainingString2
367
368 // General Options.
369 TString dnnOptions("!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=None:"
370 "WeightInitialization=XAVIER:RandomSeed=0");
371
372 dnnOptions.Append(":");
373 dnnOptions.Append(inputLayoutString);
374 dnnOptions.Append(":");
375 dnnOptions.Append(layoutString);
376 dnnOptions.Append(":");
377 dnnOptions.Append(trainingStrategyString);
378 dnnOptions.Append(":");
379 dnnOptions.Append(archString);
380
381 TString dnnName = "TMVA_DNN";
382 factory->BookMethod(dataloader, TMVA::Types::kDL, dnnName, dnnOptions);
383 }
384
385 /**
386 ## Book Keras recurrent models
387
388 Book the different types of recurrent models in Keras (SimpleRNN, LSTM or GRU)
389
390 **/
391
392 if (useKeras) {
393
394 for (int i = 0; i < 3; i++) {
395
396 if (use_rnn_type[i]) {
397
398 TString modelName = TString::Format("model_%s.h5", rnn_types[i].c_str());
399 TString trainedModelName = TString::Format("trained_model_%s.h5", rnn_types[i].c_str());
400
401 Info("TMVA_RNN_Classification", "Building recurrent keras model using a %s layer", rnn_types[i].c_str());
402 // create python script which can be executed
403 // create 2 conv2d layer + maxpool + dense
404 TMacro m;
405 m.AddLine("import tensorflow");
406 m.AddLine("from tensorflow.keras.models import Sequential");
407 m.AddLine("from tensorflow.keras.optimizers import Adam");
408 m.AddLine("from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, SimpleRNN, GRU, LSTM, Reshape, "
409 "BatchNormalization");
410 m.AddLine("");
411 m.AddLine("model = Sequential() ");
412 m.AddLine("model.add(Reshape((10, 30), input_shape = (10*30, )))");
413 // add recurrent neural network depending on type / Use option to return the full output
414 if (rnn_types[i] == "LSTM")
415 m.AddLine("model.add(LSTM(units=10, return_sequences=True) )");
416 else if (rnn_types[i] == "GRU")
417 m.AddLine("model.add(GRU(units=10, return_sequences=True) )");
418 else
419 m.AddLine("model.add(SimpleRNN(units=10, return_sequences=True) )");
420
421 // m.AddLine("model.add(BatchNormalization())");
422 m.AddLine("model.add(Flatten())"); // needed if returning the full time output sequence
423 m.AddLine("model.add(Dense(64, activation = 'tanh')) ");
424 m.AddLine("model.add(Dense(2, activation = 'sigmoid')) ");
425 m.AddLine(
426 "model.compile(loss = 'binary_crossentropy', optimizer = Adam(learning_rate = 0.001), weighted_metrics = ['accuracy'])");
427 m.AddLine(TString::Format("modelName = '%s'", modelName.Data()));
428 m.AddLine("model.save(modelName)");
429 m.AddLine("model.summary()");
430
431 m.SaveSource("make_rnn_model.py");
432 // execute python script to make the model
433 auto ret = (TString *)gROOT->ProcessLine("TMVA::Python_Executable()");
434 TString python_exe = (ret) ? *(ret) : "python";
435 gSystem->Exec(python_exe + " make_rnn_model.py");
436
437 if (gSystem->AccessPathName(modelName)) {
438 Warning("TMVA_RNN_Classification", "Error creating Keras recurrent model file - Skip using Keras");
439 useKeras = false;
440 } else {
441 // book PyKeras method only if Keras model could be created
442 Info("TMVA_RNN_Classification", "Booking Keras %s model", rnn_types[i].c_str());
443 factory->BookMethod(dataloader, TMVA::Types::kPyKeras,
444 TString::Format("PyKeras_%s", rnn_types[i].c_str()),
445 TString::Format("!H:!V:VarTransform=None:FilenameModel=%s:tf.keras:"
446 "FilenameTrainedModel=%s:GpuOptions=allow_growth=True:"
447 "NumEpochs=%d:BatchSize=%d",
448 modelName.Data(), trainedModelName.Data(), maxepochs, batchSize));
449 }
450 }
451 }
452 }
453
454 // use BDT in case not using Keras or TMVA DL
455 if (!useKeras || !useTMVA_BDT)
456 useTMVA_BDT = true;
457
458 /**
459 ## Book TMVA BDT
460 **/
461
462 if (useTMVA_BDT) {
463
464 factory->BookMethod(dataloader, TMVA::Types::kBDT, "BDTG",
465 "!H:!V:NTrees=100:MinNodeSize=2.5%:BoostType=Grad:Shrinkage=0.10:UseBaggedBoost:"
466 "BaggedSampleFraction=0.5:nCuts=20:"
467 "MaxDepth=2");
468
469 }
470
471 /// Train all methods
472 factory->TrainAllMethods();
473
474 std::cout << "nthreads = " << ROOT::GetThreadPoolSize() << std::endl;
475
476 // ---- Evaluate all MVAs using the set of test events
477 factory->TestAllMethods();
478
479 // ----- Evaluate and compare performance of all configured MVAs
480 factory->EvaluateAllMethods();
481
482 // check method
483
484 // plot ROC curve
485 auto c1 = factory->GetROCCurve(dataloader);
486 c1->Draw();
487
488 if (outputFile) outputFile->Close();
489}
#define f(i)
Definition RSha256.hxx:104
void Info(const char *location, const char *msgfmt,...)
Use this function for informational messages.
Definition TError.cxx:218
void Error(const char *location, const char *msgfmt,...)
Use this function in case an error occurred.
Definition TError.cxx:185
void Warning(const char *location, const char *msgfmt,...)
Use this function in warning situations.
Definition TError.cxx:229
Option_t Option_t TPoint TPoint const char x2
Option_t Option_t TPoint TPoint const char x1
#define gROOT
Definition TROOT.h:407
R__EXTERN TRandom * gRandom
Definition TRandom.h:62
char * Form(const char *fmt,...)
Formats a string in a circular formatting buffer.
Definition TString.cxx:2467
R__EXTERN TSystem * gSystem
Definition TSystem.h:560
#define gPad
The Canvas class.
Definition TCanvas.h:23
A specialized string object used for TTree selections.
Definition TCut.h:25
1-Dim function class
Definition TF1.h:214
virtual void SetParameters(const Double_t *params)
Definition TF1.h:650
A ROOT file is composed of a header, followed by consecutive data records (TKey instances) with a wel...
Definition TFile.h:53
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
Definition TFile.cxx:4075
void Close(Option_t *option="") override
Close a file.
Definition TFile.cxx:936
void Draw(Option_t *chopt="") override
Draw this graph with its current attributes.
Definition TGraph.cxx:809
1-D histogram with a double per channel (see TH1 documentation)}
Definition TH1.h:620
void Reset(Option_t *option="") override
Reset.
Definition TH1.cxx:10030
virtual void FillRandom(const char *fname, Int_t ntimes=5000, TRandom *rng=nullptr)
Fill histogram following distribution in function fname.
Definition TH1.cxx:3520
virtual Double_t GetBinContent(Int_t bin) const
Return content of bin number bin.
Definition TH1.cxx:5032
static Config & Instance()
static function: returns TMVA instance
Definition Config.cxx:98
void AddVariablesArray(const TString &expression, int size, char type='F', Double_t min=0, Double_t max=0)
user inserts discriminating array of variables in data set info in case input tree provides an array ...
void AddSignalTree(TTree *signal, Double_t weight=1.0, Types::ETreeType treetype=Types::kMaxTreeType)
number of signal events (used to compute significance)
void PrepareTrainingAndTestTree(const TCut &cut, const TString &splitOpt)
prepare the training and test trees -> same cuts for signal and background
void AddBackgroundTree(TTree *background, Double_t weight=1.0, Types::ETreeType treetype=Types::kMaxTreeType)
number of signal events (used to compute significance)
DataSetInfo & GetDataSetInfo()
std::vector< TString > GetListOfVariables() const
returns list of variables
This is the main MVA steering class.
Definition Factory.h:80
void TrainAllMethods()
Iterates through all booked methods and calls training.
Definition Factory.cxx:1114
MethodBase * BookMethod(DataLoader *loader, TString theMethodName, TString methodTitle, TString theOption="")
Book a classifier or regression method.
Definition Factory.cxx:352
void TestAllMethods()
Evaluates all booked methods on the testing data and adds the output to the Results in the corresponi...
Definition Factory.cxx:1271
void EvaluateAllMethods(void)
Iterates over all MVAs that have been booked, and calls their evaluation methods.
Definition Factory.cxx:1376
TGraph * GetROCCurve(DataLoader *loader, TString theMethodName, Bool_t setTitles=kTRUE, UInt_t iClass=0, Types::ETreeType type=Types::kTesting)
Argument iClass specifies the class to generate the ROC curve in a multiclass setting.
Definition Factory.cxx:912
static void PyInitialize()
Initialize Python interpreter.
Class supporting a collection of lines with C++ code.
Definition TMacro.h:31
virtual Double_t Gaus(Double_t mean=0, Double_t sigma=1)
Samples a random number from the standard Normal (Gaussian) Distribution with the given mean and sigm...
Definition TRandom.cxx:274
virtual void SetSeed(ULong_t seed=0)
Set the random generator seed.
Definition TRandom.cxx:608
Basic string class.
Definition TString.h:139
const char * Data() const
Definition TString.h:380
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
Definition TString.cxx:2356
virtual Int_t Exec(const char *shellcmd)
Execute a command.
Definition TSystem.cxx:640
virtual Bool_t AccessPathName(const char *path, EAccessMode mode=kFileExists)
Returns FALSE if one can access a file using the specified access mode.
Definition TSystem.cxx:1283
virtual void Setenv(const char *name, const char *value)
Set environment variable.
Definition TSystem.cxx:1634
A TTree represents a columnar dataset.
Definition TTree.h:79
RVec< PromoteType< T > > cos(const RVec< T > &v)
Definition RVec.hxx:1815
RVec< PromoteType< T > > sin(const RVec< T > &v)
Definition RVec.hxx:1814
return c1
Definition legend1.C:41
const Int_t n
Definition legend1.C:16
TH1F * h1
Definition legend1.C:5
TF1 * f1
Definition legend1.C:11
void EnableImplicitMT(UInt_t numthreads=0)
Enable ROOT's implicit multi-threading for all objects and methods that provide an internal paralleli...
Definition TROOT.cxx:539
UInt_t GetThreadPoolSize()
Returns the size of ROOT's thread pool.
Definition TROOT.cxx:577
constexpr Double_t Pi()
Definition TMath.h:37
TMarker m
Definition textangle.C:8