Logo ROOT   6.16/01
Reference Guide
MethodPyKeras.cxx
Go to the documentation of this file.
1// @(#)root/tmva/pymva $Id$
2// Author: Stefan Wunsch, 2016
3
4#include <Python.h>
6
7#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
8#include <numpy/arrayobject.h>
9
10#include "TMVA/Types.h"
11#include "TMVA/Config.h"
13#include "TMVA/Results.h"
16#include "TMVA/Tools.h"
17#include "TMVA/Timer.h"
18
19using namespace TMVA;
20
21REGISTER_METHOD(PyKeras)
22
24
25MethodPyKeras::MethodPyKeras(const TString &jobName, const TString &methodTitle, DataSetInfo &dsi, const TString &theOption)
26 : PyMethodBase(jobName, Types::kPyKeras, methodTitle, dsi, theOption) {
27 fNumEpochs = 10;
28 fBatchSize = 100;
29 fVerbose = 1;
30 fContinueTraining = false;
31 fSaveBestOnly = true;
33 fLearningRateSchedule = ""; // empty string deactivates learning rate scheduler
34 fFilenameTrainedModel = ""; // empty string sets output model filename to default (in weights/)
35 fTensorBoard = ""; // empty string deactivates TensorBoard callback
36}
37
38MethodPyKeras::MethodPyKeras(DataSetInfo &theData, const TString &theWeightFile)
39 : PyMethodBase(Types::kPyKeras, theData, theWeightFile) {
40 fNumEpochs = 10;
41 fBatchSize = 100;
42 fVerbose = 1;
43 fContinueTraining = false;
44 fSaveBestOnly = true;
46 fLearningRateSchedule = ""; // empty string deactivates learning rate scheduler
47 fFilenameTrainedModel = ""; // empty string sets output model filename to default (in weights/)
48 fTensorBoard = ""; // empty string deactivates TensorBoard callback
49}
50
52}
53
55 if (type == Types::kRegression) return kTRUE;
56 if (type == Types::kClassification && numberClasses == 2) return kTRUE;
57 if (type == Types::kMulticlass && numberClasses >= 2) return kTRUE;
58 return kFALSE;
59}
60
61///////////////////////////////////////////////////////////////////////////////
62
64 DeclareOptionRef(fFilenameModel, "FilenameModel", "Filename of the initial Keras model");
65 DeclareOptionRef(fFilenameTrainedModel, "FilenameTrainedModel", "Filename of the trained output Keras model");
66 DeclareOptionRef(fBatchSize, "BatchSize", "Training batch size");
67 DeclareOptionRef(fNumEpochs, "NumEpochs", "Number of training epochs");
68 DeclareOptionRef(fVerbose, "Verbose", "Keras verbosity during training");
69 DeclareOptionRef(fContinueTraining, "ContinueTraining", "Load weights from previous training");
70 DeclareOptionRef(fSaveBestOnly, "SaveBestOnly", "Store only weights with smallest validation loss");
71 DeclareOptionRef(fTriesEarlyStopping, "TriesEarlyStopping", "Number of epochs with no improvement in validation loss after which training will be stopped. The default or a negative number deactivates this option.");
72 DeclareOptionRef(fLearningRateSchedule, "LearningRateSchedule", "Set new learning rate during training at specific epochs, e.g., \"50,0.01;70,0.005\"");
73 DeclareOptionRef(fTensorBoard, "TensorBoard",
74 "Write a log during training to visualize and monitor the training performance with TensorBoard");
75 DeclareOptionRef(fTensorBoard, "TensorBoard",
76 "Write a log during training to visualize and monitor the training performance with TensorBoard");
77
78 DeclareOptionRef(fNumValidationString = "20%", "ValidationSize", "Part of the training data to use for validation. "
79 "Specify as 0.2 or 20% to use a fifth of the data set as validation set. "
80 "Specify as 100 to use exactly 100 events. (Default: 20%)");
81
82}
83
84
85////////////////////////////////////////////////////////////////////////////////
86/// Validation of the ValidationSize option. Allowed formats are 20%, 0.2 and
87/// 100 etc.
88/// - 20% and 0.2 selects 20% of the training set as validation data.
89/// - 100 selects 100 events as the validation data.
90///
91/// @return number of samples in validation set
92///
94{
95 Int_t nValidationSamples = 0;
96 UInt_t trainingSetSize = GetEventCollection(Types::kTraining).size();
97
98 // Parsing + Validation
99 // --------------------
100 if (fNumValidationString.EndsWith("%")) {
101 // Relative spec. format 20%
102 TString intValStr = TString(fNumValidationString.Strip(TString::kTrailing, '%'));
103
104 if (intValStr.IsFloat()) {
105 Double_t valSizeAsDouble = fNumValidationString.Atof() / 100.0;
106 nValidationSamples = GetEventCollection(Types::kTraining).size() * valSizeAsDouble;
107 } else {
108 Log() << kFATAL << "Cannot parse number \"" << fNumValidationString
109 << "\". Expected string like \"20%\" or \"20.0%\"." << Endl;
110 }
111 } else if (fNumValidationString.IsFloat()) {
112 Double_t valSizeAsDouble = fNumValidationString.Atof();
113
114 if (valSizeAsDouble < 1.0) {
115 // Relative spec. format 0.2
116 nValidationSamples = GetEventCollection(Types::kTraining).size() * valSizeAsDouble;
117 } else {
118 // Absolute spec format 100 or 100.0
119 nValidationSamples = valSizeAsDouble;
120 }
121 } else {
122 Log() << kFATAL << "Cannot parse number \"" << fNumValidationString << "\". Expected string like \"0.2\" or \"100\"."
123 << Endl;
124 }
125
126 // Value validation
127 // ----------------
128 if (nValidationSamples < 0) {
129 Log() << kFATAL << "Validation size \"" << fNumValidationString << "\" is negative." << Endl;
130 }
131
132 if (nValidationSamples == 0) {
133 Log() << kFATAL << "Validation size \"" << fNumValidationString << "\" is zero." << Endl;
134 }
135
136 if (nValidationSamples >= (Int_t)trainingSetSize) {
137 Log() << kFATAL << "Validation size \"" << fNumValidationString
138 << "\" is larger than or equal in size to training set (size=\"" << trainingSetSize << "\")." << Endl;
139 }
140
141 return nValidationSamples;
142}
143
145 // Set default filename for trained model if option is not used
146 if (fFilenameTrainedModel.IsNull()) {
147 fFilenameTrainedModel = GetWeightFileDir() + "/TrainedModel_" + GetName() + ".h5";
148 }
149 // Setup model, either the initial model from `fFilenameModel` or
150 // the trained model from `fFilenameTrainedModel`
151 if (fContinueTraining) Log() << kINFO << "Continue training with trained model" << Endl;
153}
154
155void MethodPyKeras::SetupKerasModel(bool loadTrainedModel) {
156 /*
157 * Load Keras model from file
158 */
159
160 // Load initial model or already trained model
161 TString filenameLoadModel;
162 if (loadTrainedModel) {
163 filenameLoadModel = fFilenameTrainedModel;
164 }
165 else {
166 filenameLoadModel = fFilenameModel;
167 }
168 PyRunString("model = keras.models.load_model('"+filenameLoadModel+"')",
169 "Failed to load Keras model from file: "+filenameLoadModel);
170 Log() << kINFO << "Load model from file: " << filenameLoadModel << Endl;
171
172 /*
173 * Init variables and weights
174 */
175
176 // Get variables, classes and target numbers
180 else Log() << kFATAL << "Selected analysis type is not implemented" << Endl;
181
182 // Init evaluation (needed for getMvaValue)
183 fVals = new float[fNVars]; // holds values used for classification and regression
184 npy_intp dimsVals[2] = {(npy_intp)1, (npy_intp)fNVars};
185 PyArrayObject* pVals = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsVals, NPY_FLOAT, (void*)fVals);
186 PyDict_SetItemString(fLocalNS, "vals", (PyObject*)pVals);
187
188 fOutput.resize(fNOutputs); // holds classification probabilities or regression output
189 npy_intp dimsOutput[2] = {(npy_intp)1, (npy_intp)fNOutputs};
190 PyArrayObject* pOutput = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsOutput, NPY_FLOAT, (void*)&fOutput[0]);
191 PyDict_SetItemString(fLocalNS, "output", (PyObject*)pOutput);
192
193 // Mark the model as setup
194 fModelIsSetup = true;
195}
196
198 if (!PyIsInitialized()) {
199 Log() << kFATAL << "Python is not initialized" << Endl;
200 }
201 _import_array(); // required to use numpy arrays
202
203 // Import Keras
204 // NOTE: sys.argv has to be cleared because otherwise TensorFlow breaks
205 PyRunString("import sys; sys.argv = ['']", "Set sys.argv failed");
206 PyRunString("import keras", "Import Keras failed");
207
208 // Set flag that model is not setup
209 fModelIsSetup = false;
210}
211
213 if(!fModelIsSetup) Log() << kFATAL << "Model is not setup for training" << Endl;
214
215 /*
216 * Load training data to numpy array
217 */
218
219 UInt_t nAllEvents = Data()->GetNTrainingEvents();
220 UInt_t nValEvents = GetNumValidationSamples();
221 UInt_t nTrainingEvents = nAllEvents - nValEvents;
222
223 Log() << kINFO << "Split TMVA training data in " << nTrainingEvents << " training events and "
224 << nValEvents << " validation events" << Endl;
225
226 float* trainDataX = new float[nTrainingEvents*fNVars];
227 float* trainDataY = new float[nTrainingEvents*fNOutputs];
228 float* trainDataWeights = new float[nTrainingEvents];
229 for (UInt_t i=0; i<nTrainingEvents; i++) {
230 const TMVA::Event* e = GetTrainingEvent(i);
231 // Fill variables
232 for (UInt_t j=0; j<fNVars; j++) {
233 trainDataX[j + i*fNVars] = e->GetValue(j);
234 }
235 // Fill targets
236 // NOTE: For classification, convert class number in one-hot vector,
237 // e.g., 1 -> [0, 1] or 0 -> [1, 0] for binary classification
239 for (UInt_t j=0; j<fNOutputs; j++) {
240 trainDataY[j + i*fNOutputs] = 0;
241 }
242 trainDataY[e->GetClass() + i*fNOutputs] = 1;
243 }
244 else if (GetAnalysisType() == Types::kRegression) {
245 for (UInt_t j=0; j<fNOutputs; j++) {
246 trainDataY[j + i*fNOutputs] = e->GetTarget(j);
247 }
248 }
249 else Log() << kFATAL << "Can not fill target vector because analysis type is not known" << Endl;
250 // Fill weights
251 // NOTE: If no weight branch is given, this defaults to ones for all events
252 trainDataWeights[i] = e->GetWeight();
253 }
254
255 npy_intp dimsTrainX[2] = {(npy_intp)nTrainingEvents, (npy_intp)fNVars};
256 npy_intp dimsTrainY[2] = {(npy_intp)nTrainingEvents, (npy_intp)fNOutputs};
257 npy_intp dimsTrainWeights[1] = {(npy_intp)nTrainingEvents};
258 PyArrayObject* pTrainDataX = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsTrainX, NPY_FLOAT, (void*)trainDataX);
259 PyArrayObject* pTrainDataY = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsTrainY, NPY_FLOAT, (void*)trainDataY);
260 PyArrayObject* pTrainDataWeights = (PyArrayObject*)PyArray_SimpleNewFromData(1, dimsTrainWeights, NPY_FLOAT, (void*)trainDataWeights);
261 PyDict_SetItemString(fLocalNS, "trainX", (PyObject*)pTrainDataX);
262 PyDict_SetItemString(fLocalNS, "trainY", (PyObject*)pTrainDataY);
263 PyDict_SetItemString(fLocalNS, "trainWeights", (PyObject*)pTrainDataWeights);
264
265 /*
266 * Load validation data to numpy array
267 */
268
269 // NOTE: from TMVA, we get the validation data as a subset of all the training data
270 // we will not use test data for validation. They will be used for the real testing
271
272
273 float* valDataX = new float[nValEvents*fNVars];
274 float* valDataY = new float[nValEvents*fNOutputs];
275 float* valDataWeights = new float[nValEvents];
276 //validation events follows the trainig one in the TMVA training vector
277 for (UInt_t i=0; i< nValEvents ; i++) {
278 UInt_t ievt = nTrainingEvents + i; // TMVA event index
279 const TMVA::Event* e = GetTrainingEvent(ievt);
280 // Fill variables
281 for (UInt_t j=0; j<fNVars; j++) {
282 valDataX[j + i*fNVars] = e->GetValue(j);
283 }
284 // Fill targets
286 for (UInt_t j=0; j<fNOutputs; j++) {
287 valDataY[j + i*fNOutputs] = 0;
288 }
289 valDataY[e->GetClass() + i*fNOutputs] = 1;
290 }
291 else if (GetAnalysisType() == Types::kRegression) {
292 for (UInt_t j=0; j<fNOutputs; j++) {
293 valDataY[j + i*fNOutputs] = e->GetTarget(j);
294 }
295 }
296 else Log() << kFATAL << "Can not fill target vector because analysis type is not known" << Endl;
297 // Fill weights
298 valDataWeights[i] = e->GetWeight();
299 }
300
301 npy_intp dimsValX[2] = {(npy_intp)nValEvents, (npy_intp)fNVars};
302 npy_intp dimsValY[2] = {(npy_intp)nValEvents, (npy_intp)fNOutputs};
303 npy_intp dimsValWeights[1] = {(npy_intp)nValEvents};
304 PyArrayObject* pValDataX = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsValX, NPY_FLOAT, (void*)valDataX);
305 PyArrayObject* pValDataY = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsValY, NPY_FLOAT, (void*)valDataY);
306 PyArrayObject* pValDataWeights = (PyArrayObject*)PyArray_SimpleNewFromData(1, dimsValWeights, NPY_FLOAT, (void*)valDataWeights);
307 PyDict_SetItemString(fLocalNS, "valX", (PyObject*)pValDataX);
308 PyDict_SetItemString(fLocalNS, "valY", (PyObject*)pValDataY);
309 PyDict_SetItemString(fLocalNS, "valWeights", (PyObject*)pValDataWeights);
310
311 /*
312 * Train Keras model
313 */
314
315 // Setup parameters
316
317 PyObject* pBatchSize = PyLong_FromLong(fBatchSize);
318 PyObject* pNumEpochs = PyLong_FromLong(fNumEpochs);
319 PyObject* pVerbose = PyLong_FromLong(fVerbose);
320 PyDict_SetItemString(fLocalNS, "batchSize", pBatchSize);
321 PyDict_SetItemString(fLocalNS, "numEpochs", pNumEpochs);
322 PyDict_SetItemString(fLocalNS, "verbose", pVerbose);
323
324 // Setup training callbacks
325 PyRunString("callbacks = []");
326
327 // Callback: Save only weights with smallest validation loss
328 if (fSaveBestOnly) {
329 PyRunString("callbacks.append(keras.callbacks.ModelCheckpoint('"+fFilenameTrainedModel+"', monitor='val_loss', verbose=verbose, save_best_only=True, mode='auto'))", "Failed to setup training callback: SaveBestOnly");
330 Log() << kINFO << "Option SaveBestOnly: Only model weights with smallest validation loss will be stored" << Endl;
331 }
332
333 // Callback: Stop training early if no improvement in validation loss is observed
334 if (fTriesEarlyStopping>=0) {
335 TString tries;
336 tries.Form("%i", fTriesEarlyStopping);
337 PyRunString("callbacks.append(keras.callbacks.EarlyStopping(monitor='val_loss', patience="+tries+", verbose=verbose, mode='auto'))", "Failed to setup training callback: TriesEarlyStopping");
338 Log() << kINFO << "Option TriesEarlyStopping: Training will stop after " << tries << " number of epochs with no improvement of validation loss" << Endl;
339 }
340
341 // Callback: Learning rate scheduler
342 if (fLearningRateSchedule!="") {
343 // Setup a python dictionary with the desired learning rate steps
344 PyRunString("strScheduleSteps = '"+fLearningRateSchedule+"'\n"
345 "schedulerSteps = {}\n"
346 "for c in strScheduleSteps.split(';'):\n"
347 " x = c.split(',')\n"
348 " schedulerSteps[int(x[0])] = float(x[1])\n",
349 "Failed to setup steps for scheduler function from string: "+fLearningRateSchedule,
350 Py_file_input);
351 // Set scheduler function as piecewise function with given steps
352 PyRunString("def schedule(epoch, model=model, schedulerSteps=schedulerSteps):\n"
353 " if epoch in schedulerSteps: return float(schedulerSteps[epoch])\n"
354 " else: return float(model.optimizer.lr.get_value())\n",
355 "Failed to setup scheduler function with string: "+fLearningRateSchedule,
356 Py_file_input);
357 // Setup callback
358 PyRunString("callbacks.append(keras.callbacks.LearningRateScheduler(schedule))",
359 "Failed to setup training callback: LearningRateSchedule");
360 Log() << kINFO << "Option LearningRateSchedule: Set learning rate during training: " << fLearningRateSchedule << Endl;
361 }
362
363 // Callback: TensorBoard
364 if (fTensorBoard != "") {
365 TString logdir = TString("'") + fTensorBoard + TString("'");
367 "callbacks.append(keras.callbacks.TensorBoard(log_dir=" + logdir +
368 ", histogram_freq=0, batch_size=batchSize, write_graph=True, write_grads=False, write_images=False))",
369 "Failed to setup training callback: TensorBoard");
370 Log() << kINFO << "Option TensorBoard: Log files for training monitoring are stored in: " << logdir << Endl;
371 }
372
373 // Train model
374 PyRunString("history = model.fit(trainX, trainY, sample_weight=trainWeights, batch_size=batchSize, epochs=numEpochs, verbose=verbose, validation_data=(valX, valY, valWeights), callbacks=callbacks)",
375 "Failed to train model");
376
377 /*
378 * Store trained model to file (only if option 'SaveBestOnly' is NOT activated,
379 * because we do not want to override the best model checkpoint)
380 */
381
382 if (!fSaveBestOnly) {
383 PyRunString("model.save('"+fFilenameTrainedModel+"', overwrite=True)",
384 "Failed to save trained model: "+fFilenameTrainedModel);
385 Log() << kINFO << "Trained model written to file: " << fFilenameTrainedModel << Endl;
386 }
387
388 /*
389 * Clean-up
390 */
391
392 delete[] trainDataX;
393 delete[] trainDataY;
394 delete[] trainDataWeights;
395 delete[] valDataX;
396 delete[] valDataY;
397 delete[] valDataWeights;
398}
399
402}
403
405 // Cannot determine error
406 NoErrorCalc(errLower, errUpper);
407
408 // Check whether the model is setup
409 // NOTE: unfortunately this is needed because during evaluation ProcessOptions is not called again
410 if (!fModelIsSetup) {
411 // Setup the trained model
412 SetupKerasModel(true);
413 }
414
415 // Get signal probability (called mvaValue here)
416 const TMVA::Event* e = GetEvent();
417 for (UInt_t i=0; i<fNVars; i++) fVals[i] = e->GetValue(i);
418 PyRunString("for i,p in enumerate(model.predict(vals)): output[i]=p\n",
419 "Failed to get predictions");
420
422}
423
424std::vector<Double_t> MethodPyKeras::GetMvaValues(Long64_t firstEvt, Long64_t lastEvt, Bool_t logProgress) {
425 // Check whether the model is setup
426 // NOTE: Unfortunately this is needed because during evaluation ProcessOptions is not called again
427 if (!fModelIsSetup) {
428 // Setup the trained model
429 SetupKerasModel(true);
430 }
431
432 // Load data to numpy array
433 Long64_t nEvents = Data()->GetNEvents();
434 if (firstEvt > lastEvt || lastEvt > nEvents) lastEvt = nEvents;
435 if (firstEvt < 0) firstEvt = 0;
436 nEvents = lastEvt-firstEvt;
437
438 // use timer
439 Timer timer( nEvents, GetName(), kTRUE );
440
441 if (logProgress)
442 Log() << kHEADER << Form("[%s] : ",DataInfo().GetName())
443 << "Evaluation of " << GetMethodName() << " on "
444 << (Data()->GetCurrentType() == Types::kTraining ? "training" : "testing")
445 << " sample (" << nEvents << " events)" << Endl;
446
447 float* data = new float[nEvents*fNVars];
448 for (UInt_t i=0; i<nEvents; i++) {
449 Data()->SetCurrentEvent(i);
450 const TMVA::Event *e = GetEvent();
451 for (UInt_t j=0; j<fNVars; j++) {
452 data[j + i*fNVars] = e->GetValue(j);
453 }
454 }
455
456 npy_intp dimsData[2] = {(npy_intp)nEvents, (npy_intp)fNVars};
457 PyArrayObject* pDataMvaValues = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsData, NPY_FLOAT, (void*)data);
458 if (pDataMvaValues==0) Log() << "Failed to load data to Python array" << Endl;
459
460 // Get prediction for all events
461 PyObject* pModel = PyDict_GetItemString(fLocalNS, "model");
462 if (pModel==0) Log() << kFATAL << "Failed to get model Python object" << Endl;
463 PyArrayObject* pPredictions = (PyArrayObject*) PyObject_CallMethod(pModel, (char*)"predict", (char*)"O", pDataMvaValues);
464 if (pPredictions==0) Log() << kFATAL << "Failed to get predictions" << Endl;
465 delete[] data;
466
467 // Load predictions to double vector
468 // NOTE: The signal probability is given at the output
469 std::vector<double> mvaValues(nEvents);
470 float* predictionsData = (float*) PyArray_DATA(pPredictions);
471 for (UInt_t i=0; i<nEvents; i++) {
472 mvaValues[i] = (double) predictionsData[i*fNOutputs + TMVA::Types::kSignal];
473 }
474
475 if (logProgress) {
476 Log() << kINFO
477 << "Elapsed time for evaluation of " << nEvents << " events: "
478 << timer.GetElapsedTime() << " " << Endl;
479 }
480
481
482 return mvaValues;
483}
484
485std::vector<Float_t>& MethodPyKeras::GetRegressionValues() {
486 // Check whether the model is setup
487 // NOTE: unfortunately this is needed because during evaluation ProcessOptions is not called again
488 if (!fModelIsSetup){
489 // Setup the model and load weights
490 SetupKerasModel(true);
491 }
492
493 // Get regression values
494 const TMVA::Event* e = GetEvent();
495 for (UInt_t i=0; i<fNVars; i++) fVals[i] = e->GetValue(i);
496 PyRunString("for i,p in enumerate(model.predict(vals)): output[i]=p\n",
497 "Failed to get predictions");
498
499 // Use inverse transformation of targets to get final regression values
500 Event * eTrans = new Event(*e);
501 for (UInt_t i=0; i<fNOutputs; ++i) {
502 eTrans->SetTarget(i,fOutput[i]);
503 }
504
505 const Event* eTrans2 = GetTransformationHandler().InverseTransform(eTrans);
506 for (UInt_t i=0; i<fNOutputs; ++i) {
507 fOutput[i] = eTrans2->GetTarget(i);
508 }
509
510 return fOutput;
511}
512
513std::vector<Float_t>& MethodPyKeras::GetMulticlassValues() {
514 // Check whether the model is setup
515 // NOTE: unfortunately this is needed because during evaluation ProcessOptions is not called again
516 if (!fModelIsSetup){
517 // Setup the model and load weights
518 SetupKerasModel(true);
519 }
520
521 // Get class probabilites
522 const TMVA::Event* e = GetEvent();
523 for (UInt_t i=0; i<fNVars; i++) fVals[i] = e->GetValue(i);
524 PyRunString("for i,p in enumerate(model.predict(vals)): output[i]=p\n",
525 "Failed to get predictions");
526
527 return fOutput;
528}
529
531}
532
534// typical length of text line:
535// "|--------------------------------------------------------------|"
536 Log() << Endl;
537 Log() << "Keras is a high-level API for the Theano and Tensorflow packages." << Endl;
538 Log() << "This method wraps the training and predictions steps of the Keras" << Endl;
539 Log() << "Python package for TMVA, so that dataloading, preprocessing and" << Endl;
540 Log() << "evaluation can be done within the TMVA system. To use this Keras" << Endl;
541 Log() << "interface, you have to generate a model with Keras first. Then," << Endl;
542 Log() << "this model can be loaded and trained in TMVA." << Endl;
543 Log() << Endl;
544}
#define REGISTER_METHOD(CLASS)
for example
#define e(i)
Definition: RSha256.hxx:103
int Int_t
Definition: RtypesCore.h:41
unsigned int UInt_t
Definition: RtypesCore.h:42
const Bool_t kFALSE
Definition: RtypesCore.h:88
bool Bool_t
Definition: RtypesCore.h:59
double Double_t
Definition: RtypesCore.h:55
long long Long64_t
Definition: RtypesCore.h:69
const Bool_t kTRUE
Definition: RtypesCore.h:87
#define ClassImp(name)
Definition: Rtypes.h:363
int type
Definition: TGX11.cxx:120
_object PyObject
Definition: TPyArg.h:20
char * Form(const char *fmt,...)
OptionBase * DeclareOptionRef(T &ref, const TString &name, const TString &desc="")
MsgLogger & Log() const
Definition: Configurable.h:122
Class that contains all the data information.
Definition: DataSetInfo.h:60
UInt_t GetNClasses() const
Definition: DataSetInfo.h:136
UInt_t GetNTargets() const
Definition: DataSetInfo.h:111
Types::ETreeType GetCurrentType() const
Definition: DataSet.h:205
Long64_t GetNEvents(Types::ETreeType type=Types::kMaxTreeType) const
Definition: DataSet.h:217
Long64_t GetNTrainingEvents() const
Definition: DataSet.h:79
void SetCurrentEvent(Long64_t ievt) const
Definition: DataSet.h:99
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition: Event.cxx:360
Float_t GetTarget(UInt_t itgt) const
Definition: Event.h:97
const char * GetName() const
Definition: MethodBase.h:325
Types::EAnalysisType GetAnalysisType() const
Definition: MethodBase.h:428
const TString & GetWeightFileDir() const
Definition: MethodBase.h:481
const TString & GetMethodName() const
Definition: MethodBase.h:322
const Event * GetEvent() const
Definition: MethodBase.h:740
DataSetInfo & DataInfo() const
Definition: MethodBase.h:401
virtual void TestClassification()
initialization
UInt_t GetNVariables() const
Definition: MethodBase.h:336
TransformationHandler & GetTransformationHandler(Bool_t takeReroutedIfAvailable=true)
Definition: MethodBase.h:385
void NoErrorCalc(Double_t *const err, Double_t *const errUpper)
Definition: MethodBase.cxx:841
DataSet * Data() const
Definition: MethodBase.h:400
const Event * GetTrainingEvent(Long64_t ievt) const
Definition: MethodBase.h:760
void GetHelpMessage() const
std::vector< float > fOutput
Definition: MethodPyKeras.h:87
virtual void TestClassification()
initialization
void SetupKerasModel(Bool_t loadTrainedModel)
std::vector< Float_t > & GetMulticlassValues()
UInt_t GetNumValidationSamples()
Validation of the ValidationSize option.
Double_t GetMvaValue(Double_t *errLower, Double_t *errUpper)
std::vector< Float_t > & GetRegressionValues()
TString fNumValidationString
Definition: MethodPyKeras.h:83
Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t)
MethodPyKeras(const TString &jobName, const TString &methodTitle, DataSetInfo &dsi, const TString &theOption="")
TString fLearningRateSchedule
Definition: MethodPyKeras.h:81
TString fFilenameTrainedModel
Definition: MethodPyKeras.h:90
std::vector< Double_t > GetMvaValues(Long64_t firstEvt, Long64_t lastEvt, Bool_t logProgress)
get all the MVA values for the events of the current Data type
static int PyIsInitialized()
Check Python interpreter initialization status.
void PyRunString(TString code, TString errorMessage="Failed to run python code", int start=Py_single_input)
Execute Python code from string.
PyObject * fLocalNS
Definition: PyMethodBase.h:143
Timing information for training and evaluation of MVA methods.
Definition: Timer.h:58
TString GetElapsedTime(Bool_t Scientific=kTRUE)
returns pretty string with elapsed time
Definition: Timer.cxx:134
const Event * InverseTransform(const Event *, Bool_t suppressIfNoTargets=true) const
Singleton class for Global types used by TMVA.
Definition: Types.h:73
@ kSignal
Definition: Types.h:136
EAnalysisType
Definition: Types.h:127
@ kMulticlass
Definition: Types.h:130
@ kClassification
Definition: Types.h:128
@ kRegression
Definition: Types.h:129
@ kTraining
Definition: Types.h:144
@ kTrailing
Definition: TString.h:262
Abstract ClassifierFactory template that handles arbitrary types.
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:158
Double_t Log(Double_t x)
Definition: TMath.h:748