Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA_SOFIE_Keras.C File Reference

Detailed Description

View in nbviewer Open in SWAN
This macro provides a simple example for the parsing of Keras .h5 file into RModel object and further generating the .hxx header files for inference.

using namespace TMVA::Experimental;
import os\n\
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\
\n\
import numpy as np\n\
from tensorflow.keras.models import Model\n\
from tensorflow.keras.layers import Input,Dense,Activation,ReLU\n\
from tensorflow.keras.optimizers import SGD\n\
\n\
input=Input(shape=(64,),batch_size=4)\n\
x=Dense(32)(input)\n\
x=Activation('relu')(x)\n\
x=Dense(16,activation='relu')(x)\n\
x=Dense(8,activation='relu')(x)\n\
x=Dense(4)(x)\n\
output=ReLU()(x)\n\
model=Model(inputs=input,outputs=output)\n\
\n\
randomGenerator=np.random.RandomState(0)\n\
x_train=randomGenerator.rand(4,64)\n\
y_train=randomGenerator.rand(4,4)\n\
\n\
model.compile(loss='mean_squared_error', optimizer=SGD(learning_rate=0.01))\n\
model.fit(x_train, y_train, epochs=5, batch_size=4)\n\
model.save('KerasModel.h5')\n";
void TMVA_SOFIE_Keras(const char * modelFile = nullptr, bool printModelInfo = true){
//Running the Python script to generate Keras .h5 file
if (modelFile == nullptr) {
m.AddLine(pythonSrc);
m.SaveSource("make_keras_model.py");
gSystem->Exec(TMVA::Python_Executable() + " make_keras_model.py");
modelFile = "KerasModel.h5";
}
//Parsing the saved Keras .h5 file into RModel object
SOFIE::RModel model = SOFIE::PyKeras::Parse(modelFile);
//Generating inference code
model.Generate();
// generate output header. By default it will be modelName.hxx
model.OutputGenerated();
if (!printModelInfo) return;
//Printing required input tensors
std::cout<<"\n\n";
//Printing initialized tensors (weights)
std::cout<<"\n\n";
//Printing intermediate tensors
std::cout<<"\n\n";
//Printing generated inference code
std::cout<<"\n\n";
model.PrintGenerated();
}
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
R__EXTERN TSystem * gSystem
Definition TSystem.h:572
void OutputGenerated(std::string filename="", bool append=false)
Definition RModel.cxx:1284
void Generate(std::underlying_type_t< Options > options, int batchSize=-1, long pos=0, bool verbose=false)
Definition RModel.cxx:916
static void PyInitialize()
Initialize Python interpreter.
Class supporting a collection of lines with C++ code.
Definition TMacro.h:31
Basic string class.
Definition TString.h:139
virtual Int_t Exec(const char *shellcmd)
Execute a command.
Definition TSystem.cxx:653
TString Python_Executable()
Function to find current Python executable used by ROOT If "Python3" is installed,...
TMarker m
Definition textangle.C:8
Epoch 1/5
1/1 [==============================] - ETA: 0s - loss: 0.3494␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
1/1 [==============================] - 0s 407ms/step - loss: 0.3494
Epoch 2/5
1/1 [==============================] - ETA: 0s - loss: 0.3483␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
1/1 [==============================] - 0s 3ms/step - loss: 0.3483
Epoch 3/5
1/1 [==============================] - ETA: 0s - loss: 0.3471␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
1/1 [==============================] - 0s 2ms/step - loss: 0.3471
Epoch 4/5
1/1 [==============================] - ETA: 0s - loss: 0.3460␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
1/1 [==============================] - 0s 3ms/step - loss: 0.3460
Epoch 5/5
1/1 [==============================] - ETA: 0s - loss: 0.3450␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
1/1 [==============================] - 0s 2ms/step - loss: 0.3450
TF/Keras Version: 2.15.1
Model requires following inputs:
Fully Specified Tensor name: input_1 type: float shape: [4,64]
Model initialized the following tensors:
Tensor name: "dense_3bias0" type: float shape: [4]
Tensor name: "dense_3kernel0" type: float shape: [8,4]
Tensor name: "dense_2kernel0" type: float shape: [16,8]
Tensor name: "dense_1bias0" type: float shape: [16]
Tensor name: "dense_1kernel0" type: float shape: [32,16]
Tensor name: "densebias0" type: float shape: [32]
Tensor name: "dense_2bias0" type: float shape: [8]
Tensor name: "densekernel0" type: float shape: [64,32]
Model specify the following intermediate tensors:
Tensor name: "dense_3BiasAdd0" type: float shape: [4,4]
Tensor name: "re_luRelu0" type: float shape: [4,4]
Tensor name: "dense_3bias0bcast" type: float shape: [4,4]
Tensor name: "dense_1Dense" type: float shape: [4,16]
Tensor name: "dense_1Relu0" type: float shape: [4,16]
Tensor name: "activationRelu0" type: float shape: [4,32]
Tensor name: "dense_2Relu0" type: float shape: [4,8]
Tensor name: "dense_2Dense" type: float shape: [4,8]
Tensor name: "denseBiasAdd0" type: float shape: [4,32]
Tensor name: "dense_2bias0bcast" type: float shape: [4,8]
Tensor name: "dense_1bias0bcast" type: float shape: [4,16]
Tensor name: "densebias0bcast" type: float shape: [4,32]
//Code generated automatically by TMVA for Inference of Model file [KerasModel.h5] at [Fri May 16 13:45:25 2025]
#ifndef ROOT_TMVA_SOFIE_KERASMODEL
#define ROOT_TMVA_SOFIE_KERASMODEL
#include <algorithm>
#include <vector>
#include "TMVA/SOFIE_common.hxx"
#include <fstream>
namespace TMVA_SOFIE_KerasModel{
namespace BLAS{
extern "C" void sgemv_(const char * trans, const int * m, const int * n, const float * alpha, const float * A,
const int * lda, const float * X, const int * incx, const float * beta, const float * Y, const int * incy);
extern "C" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,
const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,
const float * beta, float * C, const int * ldc);
}//BLAS
struct Session {
// initialized tensors
std::vector<float> fTensor_dense_3bias0 = std::vector<float>(4);
float * tensor_dense_3bias0 = fTensor_dense_3bias0.data();
std::vector<float> fTensor_dense_3kernel0 = std::vector<float>(32);
float * tensor_dense_3kernel0 = fTensor_dense_3kernel0.data();
std::vector<float> fTensor_dense_2kernel0 = std::vector<float>(128);
float * tensor_dense_2kernel0 = fTensor_dense_2kernel0.data();
std::vector<float> fTensor_dense_1bias0 = std::vector<float>(16);
float * tensor_dense_1bias0 = fTensor_dense_1bias0.data();
std::vector<float> fTensor_dense_1kernel0 = std::vector<float>(512);
float * tensor_dense_1kernel0 = fTensor_dense_1kernel0.data();
std::vector<float> fTensor_densebias0 = std::vector<float>(32);
float * tensor_densebias0 = fTensor_densebias0.data();
std::vector<float> fTensor_dense_2bias0 = std::vector<float>(8);
float * tensor_dense_2bias0 = fTensor_dense_2bias0.data();
std::vector<float> fTensor_densekernel0 = std::vector<float>(2048);
float * tensor_densekernel0 = fTensor_densekernel0.data();
//--- Allocating session memory pool to be used for allocating intermediate tensors
std::vector<char> fIntermediateMemoryPool = std::vector<char>(1920);
// --- Positioning intermediate tensor memory --
// Allocating memory for intermediate tensor denseBiasAdd0 with size 512 bytes
float* tensor_denseBiasAdd0 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 0);
// Allocating memory for intermediate tensor activationRelu0 with size 512 bytes
float* tensor_activationRelu0 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 512);
// Allocating memory for intermediate tensor dense_1Dense with size 256 bytes
float* tensor_dense_1Dense = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 1024);
// Allocating memory for intermediate tensor dense_1Relu0 with size 256 bytes
float* tensor_dense_1Relu0 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 1280);
// Allocating memory for intermediate tensor dense_2Dense with size 128 bytes
float* tensor_dense_2Dense = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 1536);
// Allocating memory for intermediate tensor dense_2Relu0 with size 128 bytes
float* tensor_dense_2Relu0 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 1664);
// Allocating memory for intermediate tensor dense_3BiasAdd0 with size 64 bytes
float* tensor_dense_3BiasAdd0 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 1792);
// Allocating memory for intermediate tensor re_luRelu0 with size 64 bytes
float* tensor_re_luRelu0 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 1856);
//--- declare and allocate the intermediate tensors
std::vector<float> fTensor_dense_3bias0bcast = std::vector<float>(16);
float * tensor_dense_3bias0bcast = fTensor_dense_3bias0bcast.data();
std::vector<float> fTensor_dense_2bias0bcast = std::vector<float>(32);
float * tensor_dense_2bias0bcast = fTensor_dense_2bias0bcast.data();
std::vector<float> fTensor_dense_1bias0bcast = std::vector<float>(64);
float * tensor_dense_1bias0bcast = fTensor_dense_1bias0bcast.data();
std::vector<float> fTensor_densebias0bcast = std::vector<float>(128);
float * tensor_densebias0bcast = fTensor_densebias0bcast.data();
Session(std::string filename ="KerasModel.dat") {
//--- reading weights from file
std::ifstream f;
f.open(filename);
if (!f.is_open()) {
throw std::runtime_error("tmva-sofie failed to open file " + filename + " for input weights");
}
using TMVA::Experimental::SOFIE::ReadTensorFromStream;
ReadTensorFromStream(f, tensor_dense_3bias0, "tensor_dense_3bias0", 4);
ReadTensorFromStream(f, tensor_dense_3kernel0, "tensor_dense_3kernel0", 32);
ReadTensorFromStream(f, tensor_dense_2kernel0, "tensor_dense_2kernel0", 128);
ReadTensorFromStream(f, tensor_dense_1bias0, "tensor_dense_1bias0", 16);
ReadTensorFromStream(f, tensor_dense_1kernel0, "tensor_dense_1kernel0", 512);
ReadTensorFromStream(f, tensor_densebias0, "tensor_densebias0", 32);
ReadTensorFromStream(f, tensor_dense_2bias0, "tensor_dense_2bias0", 8);
ReadTensorFromStream(f, tensor_densekernel0, "tensor_densekernel0", 2048);
f.close();
//--- broadcast bias tensor densebias0for Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_densebias0,{ 32 }, { 4 , 32 });
std::copy(data, data + 128, tensor_densebias0bcast);
delete [] data;
}
//--- broadcast bias tensor dense_1bias0for Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_dense_1bias0,{ 16 }, { 4 , 16 });
std::copy(data, data + 64, tensor_dense_1bias0bcast);
delete [] data;
}
//--- broadcast bias tensor dense_2bias0for Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_dense_2bias0,{ 8 }, { 4 , 8 });
std::copy(data, data + 32, tensor_dense_2bias0bcast);
delete [] data;
}
//--- broadcast bias tensor dense_3bias0for Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_dense_3bias0,{ 4 }, { 4 , 4 });
std::copy(data, data + 16, tensor_dense_3bias0bcast);
delete [] data;
}
}
void doInfer(float const* tensor_input_1, std::vector<float> &output_tensor_re_luRelu0 ){
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_denseBiasAdd0, false, false, 32, 4, 64, 1,tensor_densekernel0, tensor_input_1, 1,tensor_densebias0bcast);
//------ RELU
for (int id = 0; id < 128 ; id++){
tensor_activationRelu0[id] = ((tensor_denseBiasAdd0[id] > 0 )? tensor_denseBiasAdd0[id] : 0);
}
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_dense_1Dense, false, false, 16, 4, 32, 1,tensor_dense_1kernel0, tensor_activationRelu0, 1,tensor_dense_1bias0bcast);
//------ RELU
for (int id = 0; id < 64 ; id++){
tensor_dense_1Relu0[id] = ((tensor_dense_1Dense[id] > 0 )? tensor_dense_1Dense[id] : 0);
}
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_dense_2Dense, false, false, 8, 4, 16, 1,tensor_dense_2kernel0, tensor_dense_1Relu0, 1,tensor_dense_2bias0bcast);
//------ RELU
for (int id = 0; id < 32 ; id++){
tensor_dense_2Relu0[id] = ((tensor_dense_2Dense[id] > 0 )? tensor_dense_2Dense[id] : 0);
}
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_dense_3BiasAdd0, false, false, 4, 4, 8, 1,tensor_dense_3kernel0, tensor_dense_2Relu0, 1,tensor_dense_3bias0bcast);
//------ RELU
for (int id = 0; id < 16 ; id++){
tensor_re_luRelu0[id] = ((tensor_dense_3BiasAdd0[id] > 0 )? tensor_dense_3BiasAdd0[id] : 0);
}
using TMVA::Experimental::SOFIE::UTILITY::FillOutput;
FillOutput(tensor_re_luRelu0, output_tensor_re_luRelu0, 16);
}
std::vector<float> infer(float const* tensor_input_1){
std::vector<float > output_tensor_re_luRelu0;
doInfer(tensor_input_1, output_tensor_re_luRelu0 );
return {output_tensor_re_luRelu0};
}
}; // end of Session
} //TMVA_SOFIE_KerasModel
#endif // ROOT_TMVA_SOFIE_KERASMODEL
Author
Sanjiban Sengupta

Definition in file TMVA_SOFIE_Keras.C.