Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA_CNN_Classification.C
Go to the documentation of this file.
1/// \file
2/// \ingroup tutorial_tmva
3/// \notebook
4/// TMVA Classification Example Using a Convolutional Neural Network
5///
6/// This is an example of using a CNN in TMVA. We do classification using a toy image data set
7/// that is generated when running the example macro
8///
9/// \macro_image
10/// \macro_output
11/// \macro_code
12///
13/// \author Lorenzo Moneta
14
15/***
16
17 # TMVA Classification Example Using a Convolutional Neural Network
18
19
20**/
21
22/// Helper function to create input images data
23/// we create a signal and background 2D histograms from 2d gaussians
24/// with a location (means in X and Y) different for each event
25/// The difference between signal and background is in the gaussian width.
26/// The width for the bakground gaussian is slightly larger than the signal width by few % values
27///
28///
29void MakeImagesTree(int n, int nh, int nw)
30{
31
32 // image size (nh x nw)
33 const int ntot = nh * nw;
34 const TString fileOutName = TString::Format("images_data_%dx%d.root", nh, nw);
35
36 const int nRndmEvts = 10000; // number of events we use to fill each image
37 double delta_sigma = 0.1; // 5% difference in the sigma
38 double pixelNoise = 5;
39
40 double sX1 = 3;
41 double sY1 = 3;
42 double sX2 = sX1 + delta_sigma;
43 double sY2 = sY1 - delta_sigma;
44
45 auto h1 = new TH2D("h1", "h1", nh, 0, 10, nw, 0, 10);
46 auto h2 = new TH2D("h2", "h2", nh, 0, 10, nw, 0, 10);
47
48 auto f1 = new TF2("f1", "xygaus");
49 auto f2 = new TF2("f2", "xygaus");
50 TTree sgn("sig_tree", "signal_tree");
51 TTree bkg("bkg_tree", "bakground_tree");
52
53 TFile f(fileOutName, "RECREATE");
54
55 std::vector<float> x1(ntot);
56 std::vector<float> x2(ntot);
57
58 // create signal and background trees with a single branch
59 // an std::vector<float> of size nh x nw containing the image data
60
61 std::vector<float> *px1 = &x1;
62 std::vector<float> *px2 = &x2;
63
64 bkg.Branch("vars", "std::vector<float>", &px1);
65 sgn.Branch("vars", "std::vector<float>", &px2);
66
67 // std::cout << "create tree " << std::endl;
68
69 sgn.SetDirectory(&f);
70 bkg.SetDirectory(&f);
71
72 f1->SetParameters(1, 5, sX1, 5, sY1);
73 f2->SetParameters(1, 5, sX2, 5, sY2);
74 gRandom->SetSeed(0);
75 std::cout << "Filling ROOT tree " << std::endl;
76 for (int i = 0; i < n; ++i) {
77 if (i % 1000 == 0)
78 std::cout << "Generating image event ... " << i << std::endl;
79 h1->Reset();
80 h2->Reset();
81 // generate random means in range [3,7] to be not too much on the border
82 f1->SetParameter(1, gRandom->Uniform(3, 7));
83 f1->SetParameter(3, gRandom->Uniform(3, 7));
84 f2->SetParameter(1, gRandom->Uniform(3, 7));
85 f2->SetParameter(3, gRandom->Uniform(3, 7));
86
87 h1->FillRandom("f1", nRndmEvts);
88 h2->FillRandom("f2", nRndmEvts);
89
90 for (int k = 0; k < nh; ++k) {
91 for (int l = 0; l < nw; ++l) {
92 int m = k * nw + l;
93 // add some noise in each bin
94 x1[m] = h1->GetBinContent(k + 1, l + 1) + gRandom->Gaus(0, pixelNoise);
95 x2[m] = h2->GetBinContent(k + 1, l + 1) + gRandom->Gaus(0, pixelNoise);
96 }
97 }
98 sgn.Fill();
99 bkg.Fill();
100 }
101 sgn.Write();
102 bkg.Write();
103
104 Info("MakeImagesTree", "Signal and background tree with images data written to the file %s", f.GetName());
105 sgn.Print();
106 bkg.Print();
107 f.Close();
108}
109
110void TMVA_CNN_Classification(std::vector<bool> opt = {1, 1, 1, 1, 1})
111{
112
113 bool useTMVACNN = (opt.size() > 0) ? opt[0] : false;
114 bool useKerasCNN = (opt.size() > 1) ? opt[1] : false;
115 bool useTMVADNN = (opt.size() > 2) ? opt[2] : false;
116 bool useTMVABDT = (opt.size() > 3) ? opt[3] : false;
117 bool usePyTorchCNN = (opt.size() > 4) ? opt[4] : false;
118#ifndef R__HAS_TMVACPU
119#ifndef R__HAS_TMVAGPU
120 Warning("TMVA_CNN_Classification",
121 "TMVA is not build with GPU or CPU multi-thread support. Cannot use TMVA Deep Learning for CNN");
122 useTMVACNN = false;
123#endif
124#endif
125
126 bool writeOutputFile = true;
127
128 int num_threads = 0; // use default threads
129
131
132 // do enable MT running
133 if (num_threads >= 0) {
134 ROOT::EnableImplicitMT(num_threads);
135 if (num_threads > 0) gSystem->Setenv("OMP_NUM_THREADS", TString::Format("%d",num_threads));
136 }
137 else
138 gSystem->Setenv("OMP_NUM_THREADS", "1");
139
140 std::cout << "Running with nthreads = " << ROOT::GetThreadPoolSize() << std::endl;
141
142#ifdef R__HAS_PYMVA
143 gSystem->Setenv("KERAS_BACKEND", "tensorflow");
144 // for using Keras
146#else
147 useKerasCNN = false;
148#endif
149
150 TFile *outputFile = nullptr;
151 if (writeOutputFile)
152 outputFile = TFile::Open("TMVA_CNN_ClassificationOutput.root", "RECREATE");
153
154 /***
155 ## Create TMVA Factory
156
157 Create the Factory class. Later you can choose the methods
158 whose performance you'd like to investigate.
159
160 The factory is the major TMVA object you have to interact with. Here is the list of parameters you need to pass
161
162 - The first argument is the base of the name of all the output
163 weightfiles in the directory weight/ that will be created with the
164 method parameters
165
166 - The second argument is the output file for the training results
167
168 - The third argument is a string option defining some general configuration for the TMVA session.
169 For example all TMVA output can be suppressed by removing the "!" (not) in front of the "Silent" argument in the
170 option string
171
172 - note that we disable any pre-transformation of the input variables and we avoid computing correlations between
173 input variables
174 ***/
175
176 TMVA::Factory factory(
177 "TMVA_CNN_Classification", outputFile,
178 "!V:ROC:!Silent:Color:AnalysisType=Classification:Transformations=None:!Correlations");
179
180 /***
181
182 ## Declare DataLoader(s)
183
184 The next step is to declare the DataLoader class that deals with input variables
185
186 Define the input variables that shall be used for the MVA training
187 note that you may also use variable expressions, which can be parsed by TTree::Draw( "expression" )]
188
189 In this case the input data consists of an image of 16x16 pixels. Each single pixel is a branch in a ROOT TTree
190
191 **/
192
193 TMVA::DataLoader *loader = new TMVA::DataLoader("dataset");
194
195 /***
196
197 ## Setup Dataset(s)
198
199 Define input data file and signal and background trees
200
201 **/
202
203 int imgSize = 16 * 16;
204 TString inputFileName = "images_data_16x16.root";
205
206 bool fileExist = !gSystem->AccessPathName(inputFileName);
207
208 // if file does not exists create it
209 if (!fileExist) {
210 MakeImagesTree(5000, 16, 16);
211 }
212
213 // TString inputFileName = "tmva_class_example.root";
214
215 auto inputFile = TFile::Open(inputFileName);
216 if (!inputFile) {
217 Error("TMVA_CNN_Classification", "Error opening input file %s - exit", inputFileName.Data());
218 return;
219 }
220
221 // --- Register the training and test trees
222
223 TTree *signalTree = (TTree *)inputFile->Get("sig_tree");
224 TTree *backgroundTree = (TTree *)inputFile->Get("bkg_tree");
225
226 int nEventsSig = signalTree->GetEntries();
227 int nEventsBkg = backgroundTree->GetEntries();
228
229 // global event weights per tree (see below for setting event-wise weights)
230 Double_t signalWeight = 1.0;
231 Double_t backgroundWeight = 1.0;
232
233 // You can add an arbitrary number of signal or background trees
234 loader->AddSignalTree(signalTree, signalWeight);
235 loader->AddBackgroundTree(backgroundTree, backgroundWeight);
236
237 /// add event variables (image)
238 /// use new method (from ROOT 6.20 to add a variable array for all image data)
239 loader->AddVariablesArray("vars", imgSize);
240
241 // Set individual event weights (the variables must exist in the original TTree)
242 // for signal : factory->SetSignalWeightExpression ("weight1*weight2");
243 // for background: factory->SetBackgroundWeightExpression("weight1*weight2");
244 // loader->SetBackgroundWeightExpression( "weight" );
245
246 // Apply additional cuts on the signal and background samples (can be different)
247 TCut mycuts = ""; // for example: TCut mycuts = "abs(var1)<0.5 && abs(var2-0.5)<1";
248 TCut mycutb = ""; // for example: TCut mycutb = "abs(var1)<0.5";
249
250 // Tell the factory how to use the training and testing events
251 //
252 // If no numbers of events are given, half of the events in the tree are used
253 // for training, and the other half for testing:
254 // loader->PrepareTrainingAndTestTree( mycut, "SplitMode=random:!V" );
255 // It is possible also to specify the number of training and testing events,
256 // note we disable the computation of the correlation matrix of the input variables
257
258 int nTrainSig = 0.8 * nEventsSig;
259 int nTrainBkg = 0.8 * nEventsBkg;
260
261 // build the string options for DataLoader::PrepareTrainingAndTestTree
262 TString prepareOptions = TString::Format(
263 "nTrain_Signal=%d:nTrain_Background=%d:SplitMode=Random:SplitSeed=100:NormMode=NumEvents:!V:!CalcCorrelations",
264 nTrainSig, nTrainBkg);
265
266 loader->PrepareTrainingAndTestTree(mycuts, mycutb, prepareOptions);
267
268 /***
269
270 DataSetInfo : [dataset] : Added class "Signal"
271 : Add Tree sig_tree of type Signal with 10000 events
272 DataSetInfo : [dataset] : Added class "Background"
273 : Add Tree bkg_tree of type Background with 10000 events
274
275
276
277 **/
278
279 // signalTree->Print();
280
281 /****
282 # Booking Methods
283
284 Here we book the TMVA methods. We book a Boosted Decision Tree method (BDT)
285
286 **/
287
288 // Boosted Decision Trees
289 if (useTMVABDT) {
290 factory.BookMethod(loader, TMVA::Types::kBDT, "BDT",
291 "!V:NTrees=400:MinNodeSize=2.5%:MaxDepth=2:BoostType=AdaBoost:AdaBoostBeta=0.5:"
292 "UseBaggedBoost:BaggedSampleFraction=0.5:SeparationType=GiniIndex:nCuts=20");
293 }
294 /**
295
296 #### Booking Deep Neural Network
297
298 Here we book the DNN of TMVA. See the example TMVA_Higgs_Classification.C for a detailed description of the
299 options
300
301 **/
302
303 if (useTMVADNN) {
304
305 TString layoutString(
306 "Layout=DENSE|100|RELU,BNORM,DENSE|100|RELU,BNORM,DENSE|100|RELU,BNORM,DENSE|100|RELU,DENSE|1|LINEAR");
307
308 // Training strategies
309 // one can catenate several training strings with different parameters (e.g. learning rates or regularizations
310 // parameters) The training string must be concatenates with the `|` delimiter
311 TString trainingString1("LearningRate=1e-3,Momentum=0.9,Repetitions=1,"
312 "ConvergenceSteps=5,BatchSize=100,TestRepetitions=1,"
313 "MaxEpochs=20,WeightDecay=1e-4,Regularization=None,"
314 "Optimizer=ADAM,DropConfig=0.0+0.0+0.0+0.");
315
316 TString trainingStrategyString("TrainingStrategy=");
317 trainingStrategyString += trainingString1; // + "|" + trainingString2 + ....
318
319 // Build now the full DNN Option string
320
321 TString dnnOptions("!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=None:"
322 "WeightInitialization=XAVIER");
323 dnnOptions.Append(":");
324 dnnOptions.Append(layoutString);
325 dnnOptions.Append(":");
326 dnnOptions.Append(trainingStrategyString);
327
328 TString dnnMethodName = "TMVA_DNN_CPU";
329// use GPU if available
330#ifdef R__HAS_TMVAGPU
331 dnnOptions += ":Architecture=GPU";
332 dnnMethodName = "TMVA_DNN_GPU";
333#elif defined(R__HAS_TMVACPU)
334 dnnOptions += ":Architecture=CPU";
335#endif
336
337 factory.BookMethod(loader, TMVA::Types::kDL, dnnMethodName, dnnOptions);
338 }
339
340 /***
341 ### Book Convolutional Neural Network in TMVA
342
343 For building a CNN one needs to define
344
345 - Input Layout : number of channels (in this case = 1) | image height | image width
346 - Batch Layout : batch size | number of channels | image size = (height*width)
347
348 Then one add Convolutional layers and MaxPool layers.
349
350 - For Convolutional layer the option string has to be:
351 - CONV | number of units | filter height | filter width | stride height | stride width | padding height | paddig
352 width | activation function
353
354 - note in this case we are using a filer 3x3 and padding=1 and stride=1 so we get the output dimension of the
355 conv layer equal to the input
356
357 - note we use after the first convolutional layer a batch normalization layer. This seems to help significatly the
358 convergence
359
360 - For the MaxPool layer:
361 - MAXPOOL | pool height | pool width | stride height | stride width
362
363 The RESHAPE layer is needed to flatten the output before the Dense layer
364
365
366 Note that to run the CNN is required to have CPU or GPU support
367
368 ***/
369
370 if (useTMVACNN) {
371
372 TString inputLayoutString("InputLayout=1|16|16");
373
374 // Batch Layout
375 TString layoutString("Layout=CONV|10|3|3|1|1|1|1|RELU,BNORM,CONV|10|3|3|1|1|1|1|RELU,MAXPOOL|2|2|1|1,"
376 "RESHAPE|FLAT,DENSE|100|RELU,DENSE|1|LINEAR");
377
378 // Training strategies.
379 TString trainingString1("LearningRate=1e-3,Momentum=0.9,Repetitions=1,"
380 "ConvergenceSteps=5,BatchSize=100,TestRepetitions=1,"
381 "MaxEpochs=20,WeightDecay=1e-4,Regularization=None,"
382 "Optimizer=ADAM,DropConfig=0.0+0.0+0.0+0.0");
383
384 TString trainingStrategyString("TrainingStrategy=");
385 trainingStrategyString +=
386 trainingString1; // + "|" + trainingString2 + "|" + trainingString3; for concatenating more training strings
387
388 // Build full CNN Options.
389 TString cnnOptions("!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=None:"
390 "WeightInitialization=XAVIER");
391
392 cnnOptions.Append(":");
393 cnnOptions.Append(inputLayoutString);
394 cnnOptions.Append(":");
395 cnnOptions.Append(layoutString);
396 cnnOptions.Append(":");
397 cnnOptions.Append(trainingStrategyString);
398
399 //// New DL (CNN)
400 TString cnnMethodName = "TMVA_CNN_CPU";
401// use GPU if available
402#ifdef R__HAS_TMVAGPU
403 cnnOptions += ":Architecture=GPU";
404 cnnMethodName = "TMVA_CNN_GPU";
405#else
406 cnnOptions += ":Architecture=CPU";
407 cnnMethodName = "TMVA_CNN_CPU";
408#endif
409
410 factory.BookMethod(loader, TMVA::Types::kDL, cnnMethodName, cnnOptions);
411 }
412
413 /**
414 ### Book Convolutional Neural Network in Keras using a generated model
415
416 **/
417
418 if (useKerasCNN) {
419
420 Info("TMVA_CNN_Classification", "Building convolutional keras model");
421 // create python script which can be executed
422 // crceate 2 conv2d layer + maxpool + dense
423 TMacro m;
424 m.AddLine("import tensorflow");
425 m.AddLine("from tensorflow.keras.models import Sequential");
426 m.AddLine("from tensorflow.keras.optimizers import Adam");
427 m.AddLine(
428 "from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, Reshape, BatchNormalization");
429 m.AddLine("");
430 m.AddLine("model = Sequential() ");
431 m.AddLine("model.add(Reshape((16, 16, 1), input_shape = (256, )))");
432 m.AddLine("model.add(Conv2D(10, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
433 "'relu', padding = 'same'))");
434 m.AddLine("model.add(BatchNormalization())");
435 m.AddLine("model.add(Conv2D(10, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
436 "'relu', padding = 'same'))");
437 // m.AddLine("model.add(BatchNormalization())");
438 m.AddLine("model.add(MaxPooling2D(pool_size = (2, 2), strides = (1,1))) ");
439 m.AddLine("model.add(Flatten())");
440 m.AddLine("model.add(Dense(256, activation = 'relu')) ");
441 m.AddLine("model.add(Dense(2, activation = 'sigmoid')) ");
442 m.AddLine("model.compile(loss = 'binary_crossentropy', optimizer = Adam(lr = 0.001), metrics = ['accuracy'])");
443 m.AddLine("model.save('model_cnn.h5')");
444 m.AddLine("model.summary()");
445
446 m.SaveSource("make_cnn_model.py");
447 // execute
448 gSystem->Exec("python make_cnn_model.py");
449
450 if (gSystem->AccessPathName("model_cnn.h5")) {
451 Warning("TMVA_CNN_Classification", "Error creating Keras model file - skip using Keras");
452 } else {
453 // book PyKeras method only if Keras model could be created
454 Info("TMVA_CNN_Classification", "Booking tf.Keras CNN model");
455 factory.BookMethod(
456 loader, TMVA::Types::kPyKeras, "PyKeras",
457 "H:!V:VarTransform=None:FilenameModel=model_cnn.h5:tf.keras:"
458 "FilenameTrainedModel=trained_model_cnn.h5:NumEpochs=20:BatchSize=100:"
459 "GpuOptions=allow_growth=True"); // needed for RTX NVidia card and to avoid TF allocates all GPU memory
460 }
461 }
462
463 if (usePyTorchCNN) {
464
465 Info("TMVA_CNN_Classification", "Using Convolutional PyTorch Model");
466 TString pyTorchFileName = gROOT->GetTutorialDir() + TString("/tmva/PyTorch_Generate_CNN_Model.py");
467 // check that pytorch can be imported and file defining the model and used later when booking the method is existing
468 if (gSystem->Exec("python -c 'import torch'") || gSystem->AccessPathName(pyTorchFileName) ) {
469 Warning("TMVA_CNN_Classification", "PyTorch is not installed or model building file is not existing - skip using PyTorch");
470 }
471 else {
472 // book PyTorch method only if PyTorch model could be created
473 Info("TMVA_CNN_Classification", "Booking PyTorch CNN model");
474 TString methodOpt = "H:!V:VarTransform=None:FilenameModel=PyTorchModelCNN.pt:"
475 "FilenameTrainedModel=PyTorchTrainedModelCNN.pt:NumEpochs=20:BatchSize=100";
476 methodOpt += TString(":UserCode=") + pyTorchFileName;
477 factory.BookMethod(loader, TMVA::Types::kPyTorch, "PyTorch", methodOpt);
478 }
479 }
480
481
482 //// ## Train Methods
483
484 factory.TrainAllMethods();
485
486 /// ## Test and Evaluate Methods
487
488 factory.TestAllMethods();
489
490 factory.EvaluateAllMethods();
491
492 /// ## Plot ROC Curve
493
494 auto c1 = factory.GetROCCurve(loader);
495 c1->Draw();
496
497 // close outputfile to save output file
498 outputFile->Close();
499}
#define f(i)
Definition RSha256.hxx:104
static const double x2[5]
static const double x1[5]
double Double_t
Definition RtypesCore.h:59
void Info(const char *location, const char *msgfmt,...)
Use this function for informational messages.
Definition TError.cxx:220
void Error(const char *location, const char *msgfmt,...)
Use this function in case an error occurred.
Definition TError.cxx:187
void Warning(const char *location, const char *msgfmt,...)
Use this function in warning situations.
Definition TError.cxx:231
#define gROOT
Definition TROOT.h:406
R__EXTERN TRandom * gRandom
Definition TRandom.h:62
R__EXTERN TSystem * gSystem
Definition TSystem.h:559
A specialized string object used for TTree selections.
Definition TCut.h:25
virtual void SetParameters(const Double_t *params)
Definition TF1.h:644
virtual void SetParameter(Int_t param, Double_t value)
Definition TF1.h:634
A 2-Dim function with parameters.
Definition TF2.h:29
A ROOT file is a suite of consecutive data records (TKey instances) with a well defined format.
Definition TFile.h:54
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
Definition TFile.cxx:3997
void Close(Option_t *option="") override
Close a file.
Definition TFile.cxx:879
virtual void Reset(Option_t *option="")
Reset.
Definition TH1.cxx:9947
virtual void FillRandom(const char *fname, Int_t ntimes=5000, TRandom *rng=nullptr)
Fill histogram following distribution in function fname.
Definition TH1.cxx:3525
virtual Double_t GetBinContent(Int_t bin) const
Return content of bin number bin.
Definition TH1.cxx:4993
2-D histogram with a double per channel (see TH1 documentation)}
Definition TH2.h:292
void AddVariablesArray(const TString &expression, int size, char type='F', Double_t min=0, Double_t max=0)
user inserts discriminating array of variables in data set info in case input tree provides an array ...
void AddSignalTree(TTree *signal, Double_t weight=1.0, Types::ETreeType treetype=Types::kMaxTreeType)
number of signal events (used to compute significance)
void PrepareTrainingAndTestTree(const TCut &cut, const TString &splitOpt)
prepare the training and test trees -> same cuts for signal and background
void AddBackgroundTree(TTree *background, Double_t weight=1.0, Types::ETreeType treetype=Types::kMaxTreeType)
number of signal events (used to compute significance)
This is the main MVA steering class.
Definition Factory.h:80
static void PyInitialize()
Initialize Python interpreter.
static Tools & Instance()
Definition Tools.cxx:75
Class supporting a collection of lines with C++ code.
Definition TMacro.h:31
virtual TObjString * AddLine(const char *text)
Add line with text in the list of lines of this macro.
Definition TMacro.cxx:141
virtual Double_t Gaus(Double_t mean=0, Double_t sigma=1)
Samples a random number from the standard Normal (Gaussian) Distribution with the given mean and sigm...
Definition TRandom.cxx:274
virtual void SetSeed(ULong_t seed=0)
Set the random generator seed.
Definition TRandom.cxx:608
virtual Double_t Uniform(Double_t x1=1)
Returns a uniform deviate on the interval (0, x1).
Definition TRandom.cxx:672
Basic string class.
Definition TString.h:136
const char * Data() const
Definition TString.h:369
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
Definition TString.cxx:2331
virtual Int_t Exec(const char *shellcmd)
Execute a command.
Definition TSystem.cxx:654
virtual Bool_t AccessPathName(const char *path, EAccessMode mode=kFileExists)
Returns FALSE if one can access a file using the specified access mode.
Definition TSystem.cxx:1294
virtual void Setenv(const char *name, const char *value)
Set environment variable.
Definition TSystem.cxx:1645
A TTree represents a columnar dataset.
Definition TTree.h:79
virtual Long64_t GetEntries() const
Definition TTree.h:460
return c1
Definition legend1.C:41
const Int_t n
Definition legend1.C:16
TH1F * h1
Definition legend1.C:5
TF1 * f1
Definition legend1.C:11
void EnableImplicitMT(UInt_t numthreads=0)
Enable ROOT's implicit multi-threading for all objects and methods that provide an internal paralleli...
Definition TROOT.cxx:525
UInt_t GetThreadPoolSize()
Returns the size of ROOT's thread pool.
Definition TROOT.cxx:563
auto * m
Definition textangle.C:8
auto * l
Definition textangle.C:4