7#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
8#include <numpy/arrayobject.h>
24 PyGILState_STATE m_GILState;
27 PyGILRAII() : m_GILState(PyGILState_Ensure()) {}
28 ~PyGILRAII() { PyGILState_Release(m_GILState); }
86 DeclareOptionRef(
fTriesEarlyStopping,
"TriesEarlyStopping",
"Number of epochs with no improvement in validation loss after which training will be stopped. The default or a negative number deactivates this option.");
89 "Write a log during training to visualize and monitor the training performance with TensorBoard");
91 "Write a log during training to visualize and monitor the training performance with TensorBoard");
94 "Specify as 0.2 or 20% to use a fifth of the data set as validation set. "
95 "Specify as 100 to use exactly 100 events. (Default: 20%)");
110 Int_t nValidationSamples = 0;
115 if (fNumValidationString.EndsWith(
"%")) {
120 Double_t valSizeAsDouble = fNumValidationString.Atof() / 100.0;
121 nValidationSamples = GetEventCollection(
Types::kTraining).size() * valSizeAsDouble;
123 Log() << kFATAL <<
"Cannot parse number \"" << fNumValidationString
124 <<
"\". Expected string like \"20%\" or \"20.0%\"." <<
Endl;
126 }
else if (fNumValidationString.IsFloat()) {
127 Double_t valSizeAsDouble = fNumValidationString.Atof();
129 if (valSizeAsDouble < 1.0) {
131 nValidationSamples = GetEventCollection(
Types::kTraining).size() * valSizeAsDouble;
134 nValidationSamples = valSizeAsDouble;
137 Log() << kFATAL <<
"Cannot parse number \"" << fNumValidationString <<
"\". Expected string like \"0.2\" or \"100\"."
143 if (nValidationSamples < 0) {
144 Log() << kFATAL <<
"Validation size \"" << fNumValidationString <<
"\" is negative." <<
Endl;
147 if (nValidationSamples == 0) {
148 Log() << kFATAL <<
"Validation size \"" << fNumValidationString <<
"\" is zero." <<
Endl;
151 if (nValidationSamples >= (
Int_t)trainingSetSize) {
152 Log() << kFATAL <<
"Validation size \"" << fNumValidationString
153 <<
"\" is larger than or equal in size to training set (size=\"" << trainingSetSize <<
"\")." <<
Endl;
156 return nValidationSamples;
171 Log() << kINFO <<
"Using TensorFlow backend - setting special configuration options " <<
Endl;
173 PyRunString(
"from keras.backend import tensorflow_backend as K");
176 if (num_threads > 0) {
177 Log() << kINFO <<
"Setting the CPU number of threads = " << num_threads <<
Endl;
179 num_threads,num_threads));
189 for (
int item = 0; item < optlist->
GetEntries(); ++item) {
190 Log() << kINFO <<
"Applying GPU option: gpu_options." << optlist->
At(item)->
GetName() <<
Endl;
194 PyRunString(
"sess = tf.Session(config=session_conf)");
199 Log() << kWARNING <<
"Cannot set the given " <<
fNumThreads <<
" threads when not using tensorflow as backend" <<
Endl;
201 Log() << kWARNING <<
"Cannot set the given GPU option " <<
fGpuOptions <<
" when not using tensorflow as backend" <<
Endl;
218 if (loadTrainedModel) {
224 PyRunString(
"model = keras.models.load_model('"+filenameLoadModel+
"')",
225 "Failed to load Keras model from file: "+filenameLoadModel);
226 Log() << kINFO <<
"Load model from file: " << filenameLoadModel <<
Endl;
236 else Log() << kFATAL <<
"Selected analysis type is not implemented" <<
Endl;
240 npy_intp dimsVals[2] = {(npy_intp)1, (npy_intp)
fNVars};
241 PyArrayObject* pVals = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsVals, NPY_FLOAT, (
void*)
fVals);
245 npy_intp dimsOutput[2] = {(npy_intp)1, (npy_intp)
fNOutputs};
246 PyArrayObject* pOutput = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsOutput, NPY_FLOAT, (
void*)&
fOutput[0]);
255 TMVA::Internal::PyGILRAII raii;
258 Log() << kFATAL <<
"Python is not initialized" <<
Endl;
264 PyRunString(
"import sys; sys.argv = ['']",
"Set sys.argv failed");
265 PyRunString(
"import keras",
"Import Keras failed");
280 UInt_t nTrainingEvents = nAllEvents - nValEvents;
282 Log() << kINFO <<
"Split TMVA training data in " << nTrainingEvents <<
" training events and "
283 << nValEvents <<
" validation events" <<
Endl;
285 float* trainDataX =
new float[nTrainingEvents*
fNVars];
286 float* trainDataY =
new float[nTrainingEvents*
fNOutputs];
287 float* trainDataWeights =
new float[nTrainingEvents];
288 for (
UInt_t i=0; i<nTrainingEvents; i++) {
292 trainDataX[j + i*
fNVars] =
e->GetValue(j);
305 trainDataY[j + i*
fNOutputs] =
e->GetTarget(j);
308 else Log() << kFATAL <<
"Can not fill target vector because analysis type is not known" <<
Endl;
311 trainDataWeights[i] =
e->GetWeight();
314 npy_intp dimsTrainX[2] = {(npy_intp)nTrainingEvents, (npy_intp)
fNVars};
315 npy_intp dimsTrainY[2] = {(npy_intp)nTrainingEvents, (npy_intp)
fNOutputs};
316 npy_intp dimsTrainWeights[1] = {(npy_intp)nTrainingEvents};
317 PyArrayObject* pTrainDataX = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsTrainX, NPY_FLOAT, (
void*)trainDataX);
318 PyArrayObject* pTrainDataY = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsTrainY, NPY_FLOAT, (
void*)trainDataY);
319 PyArrayObject* pTrainDataWeights = (PyArrayObject*)PyArray_SimpleNewFromData(1, dimsTrainWeights, NPY_FLOAT, (
void*)trainDataWeights);
322 PyDict_SetItemString(
fLocalNS,
"trainWeights", (
PyObject*)pTrainDataWeights);
332 float* valDataX =
new float[nValEvents*
fNVars];
333 float* valDataY =
new float[nValEvents*
fNOutputs];
334 float* valDataWeights =
new float[nValEvents];
336 for (
UInt_t i=0; i< nValEvents ; i++) {
337 UInt_t ievt = nTrainingEvents + i;
341 valDataX[j + i*
fNVars] =
e->GetValue(j);
355 else Log() << kFATAL <<
"Can not fill target vector because analysis type is not known" <<
Endl;
357 valDataWeights[i] =
e->GetWeight();
360 npy_intp dimsValX[2] = {(npy_intp)nValEvents, (npy_intp)
fNVars};
361 npy_intp dimsValY[2] = {(npy_intp)nValEvents, (npy_intp)
fNOutputs};
362 npy_intp dimsValWeights[1] = {(npy_intp)nValEvents};
363 PyArrayObject* pValDataX = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsValX, NPY_FLOAT, (
void*)valDataX);
364 PyArrayObject* pValDataY = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsValY, NPY_FLOAT, (
void*)valDataY);
365 PyArrayObject* pValDataWeights = (PyArrayObject*)PyArray_SimpleNewFromData(1, dimsValWeights, NPY_FLOAT, (
void*)valDataWeights);
379 PyDict_SetItemString(
fLocalNS,
"batchSize", pBatchSize);
380 PyDict_SetItemString(
fLocalNS,
"numEpochs", pNumEpochs);
381 PyDict_SetItemString(
fLocalNS,
"verbose", pVerbose);
388 PyRunString(
"callbacks.append(keras.callbacks.ModelCheckpoint('"+
fFilenameTrainedModel+
"', monitor='val_loss', verbose=verbose, save_best_only=True, mode='auto'))",
"Failed to setup training callback: SaveBestOnly");
389 Log() << kINFO <<
"Option SaveBestOnly: Only model weights with smallest validation loss will be stored" <<
Endl;
396 PyRunString(
"callbacks.append(keras.callbacks.EarlyStopping(monitor='val_loss', patience="+tries+
", verbose=verbose, mode='auto'))",
"Failed to setup training callback: TriesEarlyStopping");
397 Log() << kINFO <<
"Option TriesEarlyStopping: Training will stop after " << tries <<
" number of epochs with no improvement of validation loss" <<
Endl;
404 "schedulerSteps = {}\n"
405 "for c in strScheduleSteps.split(';'):\n"
406 " x = c.split(',')\n"
407 " schedulerSteps[int(x[0])] = float(x[1])\n",
411 PyRunString(
"def schedule(epoch, model=model, schedulerSteps=schedulerSteps):\n"
412 " if epoch in schedulerSteps: return float(schedulerSteps[epoch])\n"
413 " else: return float(model.optimizer.lr.get_value())\n",
417 PyRunString(
"callbacks.append(keras.callbacks.LearningRateScheduler(schedule))",
418 "Failed to setup training callback: LearningRateSchedule");
426 "callbacks.append(keras.callbacks.TensorBoard(log_dir=" + logdir +
427 ", histogram_freq=0, batch_size=batchSize, write_graph=True, write_grads=False, write_images=False))",
428 "Failed to setup training callback: TensorBoard");
429 Log() << kINFO <<
"Option TensorBoard: Log files for training monitoring are stored in: " << logdir <<
Endl;
433 PyRunString(
"history = model.fit(trainX, trainY, sample_weight=trainWeights, batch_size=batchSize, epochs=numEpochs, verbose=verbose, validation_data=(valX, valY, valWeights), callbacks=callbacks)",
434 "Failed to train model");
453 delete[] trainDataWeights;
456 delete[] valDataWeights;
477 PyRunString(
"for i,p in enumerate(model.predict(vals)): output[i]=p\n",
478 "Failed to get predictions");
493 if (firstEvt > lastEvt || lastEvt > nEvents) lastEvt = nEvents;
494 if (firstEvt < 0) firstEvt = 0;
495 nEvents = lastEvt-firstEvt;
504 <<
" sample (" << nEvents <<
" events)" <<
Endl;
506 float* data =
new float[nEvents*
fNVars];
507 for (
UInt_t i=0; i<nEvents; i++) {
511 data[j + i*
fNVars] =
e->GetValue(j);
515 npy_intp dimsData[2] = {(npy_intp)nEvents, (npy_intp)
fNVars};
516 PyArrayObject* pDataMvaValues = (PyArrayObject*)PyArray_SimpleNewFromData(2, dimsData, NPY_FLOAT, (
void*)data);
517 if (pDataMvaValues==0)
Log() <<
"Failed to load data to Python array" <<
Endl;
521 if (pModel==0)
Log() << kFATAL <<
"Failed to get model Python object" <<
Endl;
522 PyArrayObject* pPredictions = (PyArrayObject*) PyObject_CallMethod(pModel, (
char*)
"predict", (
char*)
"O", pDataMvaValues);
523 if (pPredictions==0)
Log() << kFATAL <<
"Failed to get predictions" <<
Endl;
528 std::vector<double> mvaValues(nEvents);
529 float* predictionsData = (
float*) PyArray_DATA(pPredictions);
530 for (
UInt_t i=0; i<nEvents; i++) {
536 <<
"Elapsed time for evaluation of " << nEvents <<
" events: "
555 PyRunString(
"for i,p in enumerate(model.predict(vals)): output[i]=p\n",
556 "Failed to get predictions");
583 PyRunString(
"for i,p in enumerate(model.predict(vals)): output[i]=p\n",
584 "Failed to get predictions");
596 Log() <<
"Keras is a high-level API for the Theano and Tensorflow packages." <<
Endl;
597 Log() <<
"This method wraps the training and predictions steps of the Keras" <<
Endl;
598 Log() <<
"Python package for TMVA, so that dataloading, preprocessing and" <<
Endl;
599 Log() <<
"evaluation can be done within the TMVA system. To use this Keras" <<
Endl;
600 Log() <<
"interface, you have to generate a model with Keras first. Then," <<
Endl;
601 Log() <<
"this model can be loaded and trained in TMVA." <<
Endl;
608 PyRunString(
"keras_backend_is_set = keras.backend.backend() == \"tensorflow\"");
609 PyObject * keras_backend = PyDict_GetItemString(
fLocalNS,
"keras_backend_is_set");
610 if (keras_backend !=
nullptr && keras_backend == Py_True)
613 PyRunString(
"keras_backend_is_set = keras.backend.backend() == \"theano\"");
614 keras_backend = PyDict_GetItemString(
fLocalNS,
"keras_backend_is_set");
615 if (keras_backend !=
nullptr && keras_backend == Py_True)
618 PyRunString(
"keras_backend_is_set = keras.backend.backend() == \"cntk\"");
619 keras_backend = PyDict_GetItemString(
fLocalNS,
"keras_backend_is_set");
620 if (keras_backend !=
nullptr && keras_backend == Py_True)
#define REGISTER_METHOD(CLASS)
for example
char * Form(const char *fmt,...)
OptionBase * DeclareOptionRef(T &ref, const TString &name, const TString &desc="")
Class that contains all the data information.
UInt_t GetNClasses() const
UInt_t GetNTargets() const
Types::ETreeType GetCurrentType() const
Long64_t GetNEvents(Types::ETreeType type=Types::kMaxTreeType) const
Long64_t GetNTrainingEvents() const
void SetCurrentEvent(Long64_t ievt) const
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Float_t GetTarget(UInt_t itgt) const
const char * GetName() const
Types::EAnalysisType GetAnalysisType() const
const TString & GetWeightFileDir() const
const TString & GetMethodName() const
const Event * GetEvent() const
DataSetInfo & DataInfo() const
virtual void TestClassification()
initialization
UInt_t GetNVariables() const
TransformationHandler & GetTransformationHandler(Bool_t takeReroutedIfAvailable=true)
void NoErrorCalc(Double_t *const err, Double_t *const errUpper)
const Event * GetTrainingEvent(Long64_t ievt) const
void GetHelpMessage() const
std::vector< float > fOutput
virtual void TestClassification()
initialization
Int_t fTriesEarlyStopping
EBackendType
enumeration defining the used Keras backend
void SetupKerasModel(Bool_t loadTrainedModel)
std::vector< Float_t > & GetMulticlassValues()
UInt_t GetNumValidationSamples()
Validation of the ValidationSize option.
Double_t GetMvaValue(Double_t *errLower, Double_t *errUpper)
std::vector< Float_t > & GetRegressionValues()
TString fNumValidationString
Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t)
TString GetKerasBackendName()
MethodPyKeras(const TString &jobName, const TString &methodTitle, DataSetInfo &dsi, const TString &theOption="")
TString fLearningRateSchedule
EBackendType GetKerasBackend()
Get the Keras backend (can be: TensorFlow, Theano or CNTK)
TString fFilenameTrainedModel
std::vector< Double_t > GetMvaValues(Long64_t firstEvt, Long64_t lastEvt, Bool_t logProgress)
get all the MVA values for the events of the current Data type
static int PyIsInitialized()
Check Python interpreter initialization status.
void PyRunString(TString code, TString errorMessage="Failed to run python code", int start=Py_single_input)
Execute Python code from string.
Timing information for training and evaluation of MVA methods.
TString GetElapsedTime(Bool_t Scientific=kTRUE)
returns pretty string with elapsed time
Singleton class for Global types used by TMVA.
Int_t GetEntries() const
Return the number of objects in array (i.e.
TObject * At(Int_t idx) const
virtual const char * GetName() const
Returns name of object.
Bool_t IsFloat() const
Returns kTRUE if string contains a floating point or integer number.
TObjArray * Tokenize(const TString &delim) const
This function is used to isolate sequential tokens in a TString.
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
void Form(const char *fmt,...)
Formats a string using a printf style format descriptor.
create variable transformations
MsgLogger & Endl(MsgLogger &ml)