Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA_SOFIE_RDataFrame_JIT.C File Reference

Detailed Description

View in nbviewer Open in SWAN
This macro provides an example of using a trained model with Keras and make inference using SOFIE and RDataFrame This macro uses as input a Keras model generated with the TMVA_Higgs_Classification.C tutorial You need to run that macro before this one.

In this case we are parsing the input file and then run the inference in the same macro making use of the ROOT JITing capability

using namespace TMVA::Experimental;
/// function to compile the generated model and the declaration of the SofieFunctor
/// used by RDF.
/// Assume that the model name as in the header file
void CompileModelForRDF(const std::string & headerModelFile, unsigned int ninputs, unsigned int nslots=0) {
std::string modelName = headerModelFile.substr(0,headerModelFile.find(".hxx"));
std::string cmd = std::string("#include \"") + headerModelFile + std::string("\"");
auto ret = gInterpreter->Declare(cmd.c_str());
if (!ret)
throw std::runtime_error("Error compiling : " + cmd);
std::cout << "compiled : " << cmd << std::endl;
cmd = "auto sofie_functor = TMVA::Experimental::SofieFunctor<" + std::to_string(ninputs) + ",TMVA_SOFIE_" +
modelName + "::Session>(" + std::to_string(nslots) + ");";
ret = gInterpreter->Declare(cmd.c_str());
if (!ret)
throw std::runtime_error("Error compiling : " + cmd);
std::cout << "compiled : " << cmd << std::endl;
std::cout << "Model is ready to be evaluated" << std::endl;
return;
}
void TMVA_SOFIE_RDataFrame_JIT(std::string modelFile = "Higgs_trained_model.h5"){
// check if the input file exists
if (gSystem->AccessPathName(modelFile.c_str())) {
Info("TMVA_SOFIE_RDataFrame","You need to run TMVA_Higgs_Classification.C to generate the Keras trained model");
return;
}
// parse the input Keras model into RModel object
SOFIE::RModel model = SOFIE::PyKeras::Parse(modelFile);
std::string modelName = modelFile.substr(0,modelFile.find(".h5"));
std::string modelHeaderFile = modelName + std::string(".hxx");
//Generating inference code
model.Generate();
model.OutputGenerated(modelHeaderFile);
model.PrintGenerated();
// check that also weigh file exists
std::string modelWeightFile = modelName + std::string(".dat");
if (gSystem->AccessPathName(modelWeightFile.c_str())) {
Error("TMVA_SOFIE_RDataFrame","Generated weight file is missing");
return;
}
// now compile using ROOT JIT trained model (see function above)
CompileModelForRDF(modelHeaderFile,7);
std::string inputFileName = "Higgs_data.root";
std::string inputFile = "http://root.cern.ch/files/" + inputFileName;
ROOT::RDataFrame df1("sig_tree", inputFile);
auto h1 = df1.Define("DNN_Value", "sofie_functor(rdfslot_,m_jj, m_jjj, m_lv, m_jlv, m_bb, m_wbb, m_wwbb)")
.Histo1D({"h_sig", "", 100, 0, 1},"DNN_Value");
ROOT::RDataFrame df2("bkg_tree", inputFile);
auto h2 = df2.Define("DNN_Value", "sofie_functor(rdfslot_,m_jj, m_jjj, m_lv, m_jlv, m_bb, m_wbb, m_wwbb)")
.Histo1D({"h_bkg", "", 100, 0, 1},"DNN_Value");
h2->SetLineColor(kBlue);
auto c1 = new TCanvas();
h2->DrawClone();
h1->DrawClone("SAME");
c1->BuildLegend();
}
@ kRed
Definition Rtypes.h:66
@ kBlue
Definition Rtypes.h:66
void Info(const char *location, const char *msgfmt,...)
Use this function for informational messages.
Definition TError.cxx:218
void Error(const char *location, const char *msgfmt,...)
Use this function in case an error occurred.
Definition TError.cxx:185
#define gInterpreter
R__EXTERN TStyle * gStyle
Definition TStyle.h:433
R__EXTERN TSystem * gSystem
Definition TSystem.h:555
ROOT's RDataFrame offers a modern, high-level interface for analysis of data stored in TTree ,...
virtual void SetLineColor(Color_t lcolor)
Set the line color.
Definition TAttLine.h:40
The Canvas class.
Definition TCanvas.h:23
void OutputGenerated(std::string filename="", bool append=false)
Definition RModel.cxx:923
void Generate(std::underlying_type_t< Options > options, int batchSize=-1, long pos=0)
Definition RModel.cxx:529
static void PyInitialize()
Initialize Python interpreter.
virtual TObject * DrawClone(Option_t *option="") const
Draw a clone of this object in the current selected pad with: gROOT->SetSelectedPad(c1).
Definition TObject.cxx:299
void SetOptStat(Int_t stat=1)
The type of information printed in the histogram statistics box can be selected via the parameter mod...
Definition TStyle.cxx:1636
virtual Bool_t AccessPathName(const char *path, EAccessMode mode=kFileExists)
Returns FALSE if one can access a file using the specified access mode.
Definition TSystem.cxx:1296
return c1
Definition legend1.C:41
TH1F * h1
Definition legend1.C:5
Model has not a defined batch size, assume is 1 - input shape for tensor dense_input : { 1 , 7 }
//Code generated automatically by TMVA for Inference of Model file [Higgs_trained_model.h5] at [Tue Mar 19 18:20:38 2024]
#ifndef ROOT_TMVA_SOFIE_HIGGS_TRAINED_MODEL
#define ROOT_TMVA_SOFIE_HIGGS_TRAINED_MODEL
#include <algorithm>
#include <vector>
#include <cmath>
#include "TMVA/SOFIE_common.hxx"
#include <fstream>
namespace TMVA_SOFIE_Higgs_trained_model{
namespace BLAS{
extern "C" void sgemv_(const char * trans, const int * m, const int * n, const float * alpha, const float * A,
const int * lda, const float * X, const int * incx, const float * beta, const float * Y, const int * incy);
extern "C" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,
const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,
const float * beta, float * C, const int * ldc);
}//BLAS
struct Session {
std::vector<float> fTensor_dense4bias0 = std::vector<float>(2);
float * tensor_dense4bias0 = fTensor_dense4bias0.data();
std::vector<float> fTensor_dense3bias0 = std::vector<float>(64);
float * tensor_dense3bias0 = fTensor_dense3bias0.data();
std::vector<float> fTensor_dense3kernel0 = std::vector<float>(4096);
float * tensor_dense3kernel0 = fTensor_dense3kernel0.data();
std::vector<float> fTensor_dense2bias0 = std::vector<float>(64);
float * tensor_dense2bias0 = fTensor_dense2bias0.data();
std::vector<float> fTensor_dense4kernel0 = std::vector<float>(128);
float * tensor_dense4kernel0 = fTensor_dense4kernel0.data();
std::vector<float> fTensor_dense2kernel0 = std::vector<float>(4096);
float * tensor_dense2kernel0 = fTensor_dense2kernel0.data();
std::vector<float> fTensor_dense1bias0 = std::vector<float>(64);
float * tensor_dense1bias0 = fTensor_dense1bias0.data();
std::vector<float> fTensor_dense1kernel0 = std::vector<float>(4096);
float * tensor_dense1kernel0 = fTensor_dense1kernel0.data();
std::vector<float> fTensor_densebias0 = std::vector<float>(64);
float * tensor_densebias0 = fTensor_densebias0.data();
std::vector<float> fTensor_densekernel0 = std::vector<float>(448);
float * tensor_densekernel0 = fTensor_densekernel0.data();
//--- declare and allocate the intermediate tensors
std::vector<float> fTensor_dense4Sigmoid0 = std::vector<float>(2);
float * tensor_dense4Sigmoid0 = fTensor_dense4Sigmoid0.data();
std::vector<float> fTensor_dense2bias0bcast = std::vector<float>(64);
float * tensor_dense2bias0bcast = fTensor_dense2bias0bcast.data();
std::vector<float> fTensor_denseDense = std::vector<float>(64);
float * tensor_denseDense = fTensor_denseDense.data();
std::vector<float> fTensor_dense4Dense = std::vector<float>(2);
float * tensor_dense4Dense = fTensor_dense4Dense.data();
std::vector<float> fTensor_dense1bias0bcast = std::vector<float>(64);
float * tensor_dense1bias0bcast = fTensor_dense1bias0bcast.data();
std::vector<float> fTensor_dense1Dense = std::vector<float>(64);
float * tensor_dense1Dense = fTensor_dense1Dense.data();
std::vector<float> fTensor_densebias0bcast = std::vector<float>(64);
float * tensor_densebias0bcast = fTensor_densebias0bcast.data();
std::vector<float> fTensor_dense2Relu0 = std::vector<float>(64);
float * tensor_dense2Relu0 = fTensor_dense2Relu0.data();
std::vector<float> fTensor_denseRelu0 = std::vector<float>(64);
float * tensor_denseRelu0 = fTensor_denseRelu0.data();
std::vector<float> fTensor_dense1Relu0 = std::vector<float>(64);
float * tensor_dense1Relu0 = fTensor_dense1Relu0.data();
std::vector<float> fTensor_dense3bias0bcast = std::vector<float>(64);
float * tensor_dense3bias0bcast = fTensor_dense3bias0bcast.data();
std::vector<float> fTensor_dense2Dense = std::vector<float>(64);
float * tensor_dense2Dense = fTensor_dense2Dense.data();
std::vector<float> fTensor_dense3Relu0 = std::vector<float>(64);
float * tensor_dense3Relu0 = fTensor_dense3Relu0.data();
std::vector<float> fTensor_dense3Dense = std::vector<float>(64);
float * tensor_dense3Dense = fTensor_dense3Dense.data();
std::vector<float> fTensor_dense4bias0bcast = std::vector<float>(2);
float * tensor_dense4bias0bcast = fTensor_dense4bias0bcast.data();
Session(std::string filename ="Higgs_trained_model.dat") {
//--- reading weights from file
std::ifstream f;
f.open(filename);
if (!f.is_open()) {
throw std::runtime_error("tmva-sofie failed to open file for input weights");
}
std::string tensor_name;
size_t length;
f >> tensor_name >> length;
if (tensor_name != "tensor_dense4bias0" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_dense4bias0 , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_dense4bias0[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_dense3bias0" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_dense3bias0 , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 64) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 64 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_dense3bias0[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_dense3kernel0" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_dense3kernel0 , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 4096) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 4096 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_dense3kernel0[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_dense2bias0" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_dense2bias0 , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 64) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 64 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_dense2bias0[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_dense4kernel0" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_dense4kernel0 , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 128) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 128 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_dense4kernel0[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_dense2kernel0" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_dense2kernel0 , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 4096) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 4096 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_dense2kernel0[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_dense1bias0" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_dense1bias0 , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 64) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 64 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_dense1bias0[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_dense1kernel0" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_dense1kernel0 , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 4096) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 4096 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_dense1kernel0[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_densebias0" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_densebias0 , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 64) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 64 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_densebias0[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_densekernel0" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_densekernel0 , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 448) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 448 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_densekernel0[i];
f.close();
//---- allocate the intermediate dynamic tensors
//--- broadcast bias tensor densebias0for Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_densebias0,{ 64 }, { 1 , 64 });
std::copy(data, data + 64, tensor_densebias0bcast);
delete [] data;
}
//--- broadcast bias tensor dense1bias0for Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_dense1bias0,{ 64 }, { 1 , 64 });
std::copy(data, data + 64, tensor_dense1bias0bcast);
delete [] data;
}
//--- broadcast bias tensor dense2bias0for Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_dense2bias0,{ 64 }, { 1 , 64 });
std::copy(data, data + 64, tensor_dense2bias0bcast);
delete [] data;
}
//--- broadcast bias tensor dense3bias0for Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_dense3bias0,{ 64 }, { 1 , 64 });
std::copy(data, data + 64, tensor_dense3bias0bcast);
delete [] data;
}
//--- broadcast bias tensor dense4bias0for Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_dense4bias0,{ 2 }, { 1 , 2 });
std::copy(data, data + 2, tensor_dense4bias0bcast);
delete [] data;
}
}
std::vector<float> infer(float* tensor_denseinput){
//--------- Gemm
char op_0_transA = 'n';
char op_0_transB = 'n';
int op_0_m = 1;
int op_0_n = 64;
int op_0_k = 7;
float op_0_alpha = 1;
float op_0_beta = 1;
int op_0_lda = 7;
int op_0_ldb = 64;
std::copy(tensor_densebias0bcast, tensor_densebias0bcast + 64, tensor_denseDense);
BLAS::sgemm_(&op_0_transB, &op_0_transA, &op_0_n, &op_0_m, &op_0_k, &op_0_alpha, tensor_densekernel0, &op_0_ldb, tensor_denseinput, &op_0_lda, &op_0_beta, tensor_denseDense, &op_0_n);
//------ RELU
for (int id = 0; id < 64 ; id++){
tensor_denseRelu0[id] = ((tensor_denseDense[id] > 0 )? tensor_denseDense[id] : 0);
}
//--------- Gemm
char op_2_transA = 'n';
char op_2_transB = 'n';
int op_2_m = 1;
int op_2_n = 64;
int op_2_k = 64;
float op_2_alpha = 1;
float op_2_beta = 1;
int op_2_lda = 64;
int op_2_ldb = 64;
std::copy(tensor_dense1bias0bcast, tensor_dense1bias0bcast + 64, tensor_dense1Dense);
BLAS::sgemm_(&op_2_transB, &op_2_transA, &op_2_n, &op_2_m, &op_2_k, &op_2_alpha, tensor_dense1kernel0, &op_2_ldb, tensor_denseRelu0, &op_2_lda, &op_2_beta, tensor_dense1Dense, &op_2_n);
//------ RELU
for (int id = 0; id < 64 ; id++){
tensor_dense1Relu0[id] = ((tensor_dense1Dense[id] > 0 )? tensor_dense1Dense[id] : 0);
}
//--------- Gemm
char op_4_transA = 'n';
char op_4_transB = 'n';
int op_4_m = 1;
int op_4_n = 64;
int op_4_k = 64;
float op_4_alpha = 1;
float op_4_beta = 1;
int op_4_lda = 64;
int op_4_ldb = 64;
std::copy(tensor_dense2bias0bcast, tensor_dense2bias0bcast + 64, tensor_dense2Dense);
BLAS::sgemm_(&op_4_transB, &op_4_transA, &op_4_n, &op_4_m, &op_4_k, &op_4_alpha, tensor_dense2kernel0, &op_4_ldb, tensor_dense1Relu0, &op_4_lda, &op_4_beta, tensor_dense2Dense, &op_4_n);
//------ RELU
for (int id = 0; id < 64 ; id++){
tensor_dense2Relu0[id] = ((tensor_dense2Dense[id] > 0 )? tensor_dense2Dense[id] : 0);
}
//--------- Gemm
char op_6_transA = 'n';
char op_6_transB = 'n';
int op_6_m = 1;
int op_6_n = 64;
int op_6_k = 64;
float op_6_alpha = 1;
float op_6_beta = 1;
int op_6_lda = 64;
int op_6_ldb = 64;
std::copy(tensor_dense3bias0bcast, tensor_dense3bias0bcast + 64, tensor_dense3Dense);
BLAS::sgemm_(&op_6_transB, &op_6_transA, &op_6_n, &op_6_m, &op_6_k, &op_6_alpha, tensor_dense3kernel0, &op_6_ldb, tensor_dense2Relu0, &op_6_lda, &op_6_beta, tensor_dense3Dense, &op_6_n);
//------ RELU
for (int id = 0; id < 64 ; id++){
tensor_dense3Relu0[id] = ((tensor_dense3Dense[id] > 0 )? tensor_dense3Dense[id] : 0);
}
//--------- Gemm
char op_8_transA = 'n';
char op_8_transB = 'n';
int op_8_m = 1;
int op_8_n = 2;
int op_8_k = 64;
float op_8_alpha = 1;
float op_8_beta = 1;
int op_8_lda = 64;
int op_8_ldb = 2;
std::copy(tensor_dense4bias0bcast, tensor_dense4bias0bcast + 2, tensor_dense4Dense);
BLAS::sgemm_(&op_8_transB, &op_8_transA, &op_8_n, &op_8_m, &op_8_k, &op_8_alpha, tensor_dense4kernel0, &op_8_ldb, tensor_dense3Relu0, &op_8_lda, &op_8_beta, tensor_dense4Dense, &op_8_n);
for (int id = 0; id < 2 ; id++){
tensor_dense4Sigmoid0[id] = 1 / (1 + std::exp( - tensor_dense4Dense[id]));
}
return fTensor_dense4Sigmoid0;
}
};
} //TMVA_SOFIE_Higgs_trained_model
#endif // ROOT_TMVA_SOFIE_HIGGS_TRAINED_MODEL
compiled : #include "Higgs_trained_model.hxx"
compiled : auto sofie_functor = TMVA::Experimental::SofieFunctor<7,TMVA_SOFIE_Higgs_trained_model::Session>(0);
Model is ready to be evaluated
Author
Lorenzo Moneta

Definition in file TMVA_SOFIE_RDataFrame_JIT.C.