Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA_SOFIE_ONNX.C File Reference

Detailed Description

View in nbviewer Open in SWAN
This macro provides a simple example for the parsing of ONNX files into RModel object and further generating the .hxx header files for inference.

using namespace TMVA::Experimental;
void TMVA_SOFIE_ONNX(std::string inputFile = ""){
if (inputFile.empty() )
inputFile = std::string(gROOT->GetTutorialsDir()) + "/tmva/Linear_16.onnx";
//Creating parser object to parse ONNX files
SOFIE::RModel model = parser.Parse(inputFile, true);
//Generating inference code
model.Generate();
// write the code in a file (by default Linear_16.hxx and Linear_16.dat
model.OutputGenerated();
//Printing required input tensors
//Printing initialized tensors (weights)
std::cout<<"\n\n";
//Printing intermediate tensors
std::cout<<"\n\n";
//Checking if tensor already exist in model
std::cout<<"\n\nTensor \"16weight\" already exist: "<<std::boolalpha<<model.CheckIfTensorAlreadyExist("16weight")<<"\n\n";
std::vector<size_t> tensorShape = model.GetTensorShape("16weight");
std::cout<<"Shape of tensor \"16weight\": ";
for(auto& it:tensorShape){
std::cout<<it<<",";
}
std::cout<<"\n\nData type of tensor \"16weight\": ";
SOFIE::ETensorType tensorType = model.GetTensorType("16weight");
std::cout<<SOFIE::ConvertTypeToString(tensorType);
//Printing generated inference code
std::cout<<"\n\n";
model.PrintGenerated();
}
#define gROOT
Definition TROOT.h:407
RModel Parse(std::string filename, bool verbose=false)
const ETensorType & GetTensorType(std::string name)
Definition RModel.cxx:76
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:97
void OutputGenerated(std::string filename="", bool append=false)
Definition RModel.cxx:737
const std::vector< size_t > & GetTensorShape(std::string name)
Definition RModel.cxx:55
void Generate(std::underlying_type_t< Options > options, int batchSize=1, long pos=0)
Definition RModel.cxx:381
ONNX Version 6
Parsing model inputs....
graph input 0 name input.1 type 1
Parsing graph initializer list and fill model initialized tensors
initializer 0 name 0.bias type 1
add FLOAT initialized tensor 0.bias shape { 50 }
initializer 1 name 0.weight type 1
add FLOAT initialized tensor 0.weight shape { 50 , 100 }
initializer 2 name 10.bias type 1
add FLOAT initialized tensor 10.bias shape { 50 }
initializer 3 name 10.weight type 1
add FLOAT initialized tensor 10.weight shape { 50 , 50 }
initializer 4 name 12.bias type 1
add FLOAT initialized tensor 12.bias shape { 50 }
initializer 5 name 12.weight type 1
add FLOAT initialized tensor 12.weight shape { 50 , 50 }
initializer 6 name 14.bias type 1
add FLOAT initialized tensor 14.bias shape { 50 }
initializer 7 name 14.weight type 1
add FLOAT initialized tensor 14.weight shape { 50 , 50 }
initializer 8 name 16.bias type 1
add FLOAT initialized tensor 16.bias shape { 50 }
initializer 9 name 16.weight type 1
add FLOAT initialized tensor 16.weight shape { 50 , 50 }
initializer 10 name 18.bias type 1
add FLOAT initialized tensor 18.bias shape { 10 }
initializer 11 name 18.weight type 1
add FLOAT initialized tensor 18.weight shape { 10 , 50 }
initializer 12 name 2.bias type 1
add FLOAT initialized tensor 2.bias shape { 50 }
initializer 13 name 2.weight type 1
add FLOAT initialized tensor 2.weight shape { 50 , 50 }
initializer 14 name 4.bias type 1
add FLOAT initialized tensor 4.bias shape { 50 }
initializer 15 name 4.weight type 1
add FLOAT initialized tensor 4.weight shape { 50 , 50 }
initializer 16 name 6.bias type 1
add FLOAT initialized tensor 6.bias shape { 50 }
initializer 17 name 6.weight type 1
add FLOAT initialized tensor 6.weight shape { 50 , 50 }
initializer 18 name 8.bias type 1
add FLOAT initialized tensor 8.bias shape { 50 }
initializer 19 name 8.weight type 1
add FLOAT initialized tensor 8.weight shape { 50 , 50 }
Graph operator list (ONNX order)
Operator 0 : Gemm , 3 inputs : {input.1, 0.weight, 0.bias }
Operator 1 : Relu , 1 inputs : {21 }
Operator 2 : Gemm , 3 inputs : {22, 2.weight, 2.bias }
Operator 3 : Relu , 1 inputs : {23 }
Operator 4 : Gemm , 3 inputs : {24, 4.weight, 4.bias }
Operator 5 : Relu , 1 inputs : {25 }
Operator 6 : Gemm , 3 inputs : {26, 6.weight, 6.bias }
Operator 7 : Relu , 1 inputs : {27 }
Operator 8 : Gemm , 3 inputs : {28, 8.weight, 8.bias }
Operator 9 : Relu , 1 inputs : {29 }
Operator 10 : Gemm , 3 inputs : {30, 10.weight, 10.bias }
Operator 11 : Relu , 1 inputs : {31 }
Operator 12 : Gemm , 3 inputs : {32, 12.weight, 12.bias }
Operator 13 : Relu , 1 inputs : {33 }
Operator 14 : Gemm , 3 inputs : {34, 14.weight, 14.bias }
Operator 15 : Relu , 1 inputs : {35 }
Operator 16 : Gemm , 3 inputs : {36, 16.weight, 16.bias }
Operator 17 : Relu , 1 inputs : {37 }
Operator 18 : Gemm , 3 inputs : {38, 18.weight, 18.bias }
Re-Order graph operator list
Gemm input input.1 1 01
Gemm input 0.weight 0 11
Gemm input 0.bias 0 11
add node Gemm order 0
Relu input 21 1 01
add node Relu order 1
Gemm input 22 1 01
Gemm input 2.weight 0 11
Gemm input 2.bias 0 11
add node Gemm order 2
Relu input 23 1 01
add node Relu order 3
Gemm input 24 1 01
Gemm input 4.weight 0 11
Gemm input 4.bias 0 11
add node Gemm order 4
Relu input 25 1 01
add node Relu order 5
Gemm input 26 1 01
Gemm input 6.weight 0 11
Gemm input 6.bias 0 11
add node Gemm order 6
Relu input 27 1 01
add node Relu order 7
Gemm input 28 1 01
Gemm input 8.weight 0 11
Gemm input 8.bias 0 11
add node Gemm order 8
Relu input 29 1 01
add node Relu order 9
Gemm input 30 1 01
Gemm input 10.weight 0 11
Gemm input 10.bias 0 11
add node Gemm order 10
Relu input 31 1 01
add node Relu order 11
Gemm input 32 1 01
Gemm input 12.weight 0 11
Gemm input 12.bias 0 11
add node Gemm order 12
Relu input 33 1 01
add node Relu order 13
Gemm input 34 1 01
Gemm input 14.weight 0 11
Gemm input 14.bias 0 11
add node Gemm order 14
Relu input 35 1 01
add node Relu order 15
Gemm input 36 1 01
Gemm input 16.weight 0 11
Gemm input 16.bias 0 11
add node Gemm order 16
Relu input 37 1 01
add node Relu order 17
Gemm input 38 1 01
Gemm input 18.weight 0 11
Gemm input 18.bias 0 11
add node Gemm order 18
Graph operator list (re-ordered)
Operator 0 : Gemm , 3 inputs : {input.1, 0.weight, 0.bias }
Operator 1 : Relu , 1 inputs : {21 }
Operator 2 : Gemm , 3 inputs : {22, 2.weight, 2.bias }
Operator 3 : Relu , 1 inputs : {23 }
Operator 4 : Gemm , 3 inputs : {24, 4.weight, 4.bias }
Operator 5 : Relu , 1 inputs : {25 }
Operator 6 : Gemm , 3 inputs : {26, 6.weight, 6.bias }
Operator 7 : Relu , 1 inputs : {27 }
Operator 8 : Gemm , 3 inputs : {28, 8.weight, 8.bias }
Operator 9 : Relu , 1 inputs : {29 }
Operator 10 : Gemm , 3 inputs : {30, 10.weight, 10.bias }
Operator 11 : Relu , 1 inputs : {31 }
Operator 12 : Gemm , 3 inputs : {32, 12.weight, 12.bias }
Operator 13 : Relu , 1 inputs : {33 }
Operator 14 : Gemm , 3 inputs : {34, 14.weight, 14.bias }
Operator 15 : Relu , 1 inputs : {35 }
Operator 16 : Gemm , 3 inputs : {36, 16.weight, 16.bias }
Operator 17 : Relu , 1 inputs : {37 }
Operator 18 : Gemm , 3 inputs : {38, 18.weight, 18.bias }
Fill RModel with operators...
0 0 parsing operator Gemm
Parsing an operator Gemm
Creating operator Gemm
1 1 parsing operator Relu
Parsing an operator Relu
Creating operator Relu
2 2 parsing operator Gemm
Parsing an operator Gemm
Creating operator Gemm
3 3 parsing operator Relu
Parsing an operator Relu
Creating operator Relu
4 4 parsing operator Gemm
Parsing an operator Gemm
Creating operator Gemm
5 5 parsing operator Relu
Parsing an operator Relu
Creating operator Relu
6 6 parsing operator Gemm
Parsing an operator Gemm
Creating operator Gemm
7 7 parsing operator Relu
Parsing an operator Relu
Creating operator Relu
8 8 parsing operator Gemm
Parsing an operator Gemm
Creating operator Gemm
9 9 parsing operator Relu
Parsing an operator Relu
Creating operator Relu
10 10 parsing operator Gemm
Parsing an operator Gemm
Creating operator Gemm
11 11 parsing operator Relu
Parsing an operator Relu
Creating operator Relu
12 12 parsing operator Gemm
Parsing an operator Gemm
Creating operator Gemm
13 13 parsing operator Relu
Parsing an operator Relu
Creating operator Relu
14 14 parsing operator Gemm
Parsing an operator Gemm
Creating operator Gemm
15 15 parsing operator Relu
Parsing an operator Relu
Creating operator Relu
16 16 parsing operator Gemm
Parsing an operator Gemm
Creating operator Gemm
17 17 parsing operator Relu
Parsing an operator Relu
Creating operator Relu
18 18 parsing operator Gemm
Parsing an operator Gemm
Creating operator Gemm
Parsing Graph output list
output 0 name 39
Model requires following inputs:
Fully Specified Tensor name: input1 type: float shape: [16,100]
Model initialized the following tensors:
Tensor name: "8weight" type: float shape: [50,50]
Tensor name: "8bias" type: float shape: [50]
Tensor name: "4bias" type: float shape: [50]
Tensor name: "2weight" type: float shape: [50,50]
Tensor name: "0bias" type: float shape: [50]
Tensor name: "12bias" type: float shape: [50]
Tensor name: "18bias" type: float shape: [10]
Tensor name: "14bias" type: float shape: [50]
Tensor name: "4weight" type: float shape: [50,50]
Tensor name: "10weight" type: float shape: [50,50]
Tensor name: "6bias" type: float shape: [50]
Tensor name: "18weight" type: float shape: [10,50]
Tensor name: "0weight" type: float shape: [50,100]
Tensor name: "10bias" type: float shape: [50]
Tensor name: "2bias" type: float shape: [50]
Tensor name: "6weight" type: float shape: [50,50]
Tensor name: "14weight" type: float shape: [50,50]
Tensor name: "16weight" type: float shape: [50,50]
Tensor name: "12weight" type: float shape: [50,50]
Tensor name: "16bias" type: float shape: [50]
Model specify the following intermediate tensors:
Tensor name: "39" type: float shape: [16,10]
Tensor name: "18biasbcast" type: float shape: [16,10]
Tensor name: "38" type: float shape: [16,50]
Tensor name: "35" type: float shape: [16,50]
Tensor name: "14biasbcast" type: float shape: [16,50]
Tensor name: "34" type: float shape: [16,50]
Tensor name: "33" type: float shape: [16,50]
Tensor name: "36" type: float shape: [16,50]
Tensor name: "12biasbcast" type: float shape: [16,50]
Tensor name: "10biasbcast" type: float shape: [16,50]
Tensor name: "21" type: float shape: [16,50]
Tensor name: "24" type: float shape: [16,50]
Tensor name: "0biasbcast" type: float shape: [16,50]
Tensor name: "6biasbcast" type: float shape: [16,50]
Tensor name: "22" type: float shape: [16,50]
Tensor name: "23" type: float shape: [16,50]
Tensor name: "31" type: float shape: [16,50]
Tensor name: "2biasbcast" type: float shape: [16,50]
Tensor name: "32" type: float shape: [16,50]
Tensor name: "30" type: float shape: [16,50]
Tensor name: "25" type: float shape: [16,50]
Tensor name: "29" type: float shape: [16,50]
Tensor name: "4biasbcast" type: float shape: [16,50]
Tensor name: "37" type: float shape: [16,50]
Tensor name: "26" type: float shape: [16,50]
Tensor name: "16biasbcast" type: float shape: [16,50]
Tensor name: "8biasbcast" type: float shape: [16,50]
Tensor name: "27" type: float shape: [16,50]
Tensor name: "28" type: float shape: [16,50]
Tensor "16weight" already exist: true
Shape of tensor "16weight": 50,50,
Data type of tensor "16weight": float
//Code generated automatically by TMVA for Inference of Model file [Linear_16.onnx] at [Sun Dec 1 05:10:18 2024]
#ifndef ROOT_TMVA_SOFIE_LINEAR_16
#define ROOT_TMVA_SOFIE_LINEAR_16
#include <algorithm>
#include <vector>
#include "TMVA/SOFIE_common.hxx"
#include <fstream>
namespace TMVA_SOFIE_Linear_16{
namespace BLAS{
extern "C" void sgemv_(const char * trans, const int * m, const int * n, const float * alpha, const float * A,
const int * lda, const float * X, const int * incx, const float * beta, const float * Y, const int * incy);
extern "C" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,
const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,
const float * beta, float * C, const int * ldc);
}//BLAS
struct Session {
std::vector<float> fTensor_8weight = std::vector<float>(2500);
float * tensor_8weight = fTensor_8weight.data();
std::vector<float> fTensor_8bias = std::vector<float>(50);
float * tensor_8bias = fTensor_8bias.data();
std::vector<float> fTensor_4bias = std::vector<float>(50);
float * tensor_4bias = fTensor_4bias.data();
std::vector<float> fTensor_2weight = std::vector<float>(2500);
float * tensor_2weight = fTensor_2weight.data();
std::vector<float> fTensor_0bias = std::vector<float>(50);
float * tensor_0bias = fTensor_0bias.data();
std::vector<float> fTensor_12bias = std::vector<float>(50);
float * tensor_12bias = fTensor_12bias.data();
std::vector<float> fTensor_18bias = std::vector<float>(10);
float * tensor_18bias = fTensor_18bias.data();
std::vector<float> fTensor_14bias = std::vector<float>(50);
float * tensor_14bias = fTensor_14bias.data();
std::vector<float> fTensor_4weight = std::vector<float>(2500);
float * tensor_4weight = fTensor_4weight.data();
std::vector<float> fTensor_10weight = std::vector<float>(2500);
float * tensor_10weight = fTensor_10weight.data();
std::vector<float> fTensor_6bias = std::vector<float>(50);
float * tensor_6bias = fTensor_6bias.data();
std::vector<float> fTensor_18weight = std::vector<float>(500);
float * tensor_18weight = fTensor_18weight.data();
std::vector<float> fTensor_0weight = std::vector<float>(5000);
float * tensor_0weight = fTensor_0weight.data();
std::vector<float> fTensor_10bias = std::vector<float>(50);
float * tensor_10bias = fTensor_10bias.data();
std::vector<float> fTensor_2bias = std::vector<float>(50);
float * tensor_2bias = fTensor_2bias.data();
std::vector<float> fTensor_6weight = std::vector<float>(2500);
float * tensor_6weight = fTensor_6weight.data();
std::vector<float> fTensor_14weight = std::vector<float>(2500);
float * tensor_14weight = fTensor_14weight.data();
std::vector<float> fTensor_16weight = std::vector<float>(2500);
float * tensor_16weight = fTensor_16weight.data();
std::vector<float> fTensor_12weight = std::vector<float>(2500);
float * tensor_12weight = fTensor_12weight.data();
std::vector<float> fTensor_16bias = std::vector<float>(50);
float * tensor_16bias = fTensor_16bias.data();
std::vector<float> fTensor_39 = std::vector<float>(160);
float * tensor_39 = fTensor_39.data();
std::vector<float> fTensor_18biasbcast = std::vector<float>(160);
float * tensor_18biasbcast = fTensor_18biasbcast.data();
std::vector<float> fTensor_38 = std::vector<float>(800);
float * tensor_38 = fTensor_38.data();
std::vector<float> fTensor_35 = std::vector<float>(800);
float * tensor_35 = fTensor_35.data();
std::vector<float> fTensor_14biasbcast = std::vector<float>(800);
float * tensor_14biasbcast = fTensor_14biasbcast.data();
std::vector<float> fTensor_34 = std::vector<float>(800);
float * tensor_34 = fTensor_34.data();
std::vector<float> fTensor_33 = std::vector<float>(800);
float * tensor_33 = fTensor_33.data();
std::vector<float> fTensor_36 = std::vector<float>(800);
float * tensor_36 = fTensor_36.data();
std::vector<float> fTensor_12biasbcast = std::vector<float>(800);
float * tensor_12biasbcast = fTensor_12biasbcast.data();
std::vector<float> fTensor_10biasbcast = std::vector<float>(800);
float * tensor_10biasbcast = fTensor_10biasbcast.data();
std::vector<float> fTensor_21 = std::vector<float>(800);
float * tensor_21 = fTensor_21.data();
std::vector<float> fTensor_24 = std::vector<float>(800);
float * tensor_24 = fTensor_24.data();
std::vector<float> fTensor_0biasbcast = std::vector<float>(800);
float * tensor_0biasbcast = fTensor_0biasbcast.data();
std::vector<float> fTensor_6biasbcast = std::vector<float>(800);
float * tensor_6biasbcast = fTensor_6biasbcast.data();
std::vector<float> fTensor_22 = std::vector<float>(800);
float * tensor_22 = fTensor_22.data();
std::vector<float> fTensor_23 = std::vector<float>(800);
float * tensor_23 = fTensor_23.data();
std::vector<float> fTensor_31 = std::vector<float>(800);
float * tensor_31 = fTensor_31.data();
std::vector<float> fTensor_2biasbcast = std::vector<float>(800);
float * tensor_2biasbcast = fTensor_2biasbcast.data();
std::vector<float> fTensor_32 = std::vector<float>(800);
float * tensor_32 = fTensor_32.data();
std::vector<float> fTensor_30 = std::vector<float>(800);
float * tensor_30 = fTensor_30.data();
std::vector<float> fTensor_25 = std::vector<float>(800);
float * tensor_25 = fTensor_25.data();
std::vector<float> fTensor_29 = std::vector<float>(800);
float * tensor_29 = fTensor_29.data();
std::vector<float> fTensor_4biasbcast = std::vector<float>(800);
float * tensor_4biasbcast = fTensor_4biasbcast.data();
std::vector<float> fTensor_37 = std::vector<float>(800);
float * tensor_37 = fTensor_37.data();
std::vector<float> fTensor_26 = std::vector<float>(800);
float * tensor_26 = fTensor_26.data();
std::vector<float> fTensor_16biasbcast = std::vector<float>(800);
float * tensor_16biasbcast = fTensor_16biasbcast.data();
std::vector<float> fTensor_8biasbcast = std::vector<float>(800);
float * tensor_8biasbcast = fTensor_8biasbcast.data();
std::vector<float> fTensor_27 = std::vector<float>(800);
float * tensor_27 = fTensor_27.data();
std::vector<float> fTensor_28 = std::vector<float>(800);
float * tensor_28 = fTensor_28.data();
Session(std::string filename ="") {
if (filename.empty()) filename = "Linear_16.dat";
std::ifstream f;
f.open(filename);
if (!f.is_open()) {
throw std::runtime_error("tmva-sofie failed to open file for input weights");
}
std::string tensor_name;
size_t length;
f >> tensor_name >> length;
if (tensor_name != "tensor_8weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_8weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_8weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_8bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_8bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_8bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_4bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_4bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_4bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_2weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_2weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_2weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_0bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_0bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_0bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_12bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_12bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_12bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_18bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_18bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 10) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 10 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_18bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_14bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_14bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_14bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_4weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_4weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_4weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_10weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_10weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_10weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_6bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_6bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_6bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_18weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_18weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_18weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_0weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_0weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 5000) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 5000 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_0weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_10bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_10bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_10bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_2bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_2bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_2bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_6weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_6weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_6weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_14weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_14weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_14weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_16weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_16weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_16weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_12weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_12weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_12weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_16bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_16bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (size_t i = 0; i < length; ++i)
f >> tensor_16bias[i];
f.close();
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_0bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_0biasbcast);
delete [] data;
}
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_2bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_2biasbcast);
delete [] data;
}
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_4bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_4biasbcast);
delete [] data;
}
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_6bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_6biasbcast);
delete [] data;
}
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_8bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_8biasbcast);
delete [] data;
}
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_10bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_10biasbcast);
delete [] data;
}
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_12bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_12biasbcast);
delete [] data;
}
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_14bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_14biasbcast);
delete [] data;
}
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_16bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_16biasbcast);
delete [] data;
}
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_18bias,{ 10 }, { 16 , 10 });
std::copy(data, data + 160, tensor_18biasbcast);
delete [] data;
}
}
std::vector<float> infer(float* tensor_input1){
//--------- Gemm
char op_0_transA = 'n';
char op_0_transB = 't';
int op_0_m = 16;
int op_0_n = 50;
int op_0_k = 100;
float op_0_alpha = 1;
float op_0_beta = 1;
int op_0_lda = 100;
int op_0_ldb = 100;
std::copy(tensor_0biasbcast, tensor_0biasbcast + 800, tensor_21);
BLAS::sgemm_(&op_0_transB, &op_0_transA, &op_0_n, &op_0_m, &op_0_k, &op_0_alpha, tensor_0weight, &op_0_ldb, tensor_input1, &op_0_lda, &op_0_beta, tensor_21, &op_0_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_22[id] = ((tensor_21[id] > 0 )? tensor_21[id] : 0);
}
//--------- Gemm
char op_2_transA = 'n';
char op_2_transB = 't';
int op_2_m = 16;
int op_2_n = 50;
int op_2_k = 50;
float op_2_alpha = 1;
float op_2_beta = 1;
int op_2_lda = 50;
int op_2_ldb = 50;
std::copy(tensor_2biasbcast, tensor_2biasbcast + 800, tensor_23);
BLAS::sgemm_(&op_2_transB, &op_2_transA, &op_2_n, &op_2_m, &op_2_k, &op_2_alpha, tensor_2weight, &op_2_ldb, tensor_22, &op_2_lda, &op_2_beta, tensor_23, &op_2_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_24[id] = ((tensor_23[id] > 0 )? tensor_23[id] : 0);
}
//--------- Gemm
char op_4_transA = 'n';
char op_4_transB = 't';
int op_4_m = 16;
int op_4_n = 50;
int op_4_k = 50;
float op_4_alpha = 1;
float op_4_beta = 1;
int op_4_lda = 50;
int op_4_ldb = 50;
std::copy(tensor_4biasbcast, tensor_4biasbcast + 800, tensor_25);
BLAS::sgemm_(&op_4_transB, &op_4_transA, &op_4_n, &op_4_m, &op_4_k, &op_4_alpha, tensor_4weight, &op_4_ldb, tensor_24, &op_4_lda, &op_4_beta, tensor_25, &op_4_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_26[id] = ((tensor_25[id] > 0 )? tensor_25[id] : 0);
}
//--------- Gemm
char op_6_transA = 'n';
char op_6_transB = 't';
int op_6_m = 16;
int op_6_n = 50;
int op_6_k = 50;
float op_6_alpha = 1;
float op_6_beta = 1;
int op_6_lda = 50;
int op_6_ldb = 50;
std::copy(tensor_6biasbcast, tensor_6biasbcast + 800, tensor_27);
BLAS::sgemm_(&op_6_transB, &op_6_transA, &op_6_n, &op_6_m, &op_6_k, &op_6_alpha, tensor_6weight, &op_6_ldb, tensor_26, &op_6_lda, &op_6_beta, tensor_27, &op_6_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_28[id] = ((tensor_27[id] > 0 )? tensor_27[id] : 0);
}
//--------- Gemm
char op_8_transA = 'n';
char op_8_transB = 't';
int op_8_m = 16;
int op_8_n = 50;
int op_8_k = 50;
float op_8_alpha = 1;
float op_8_beta = 1;
int op_8_lda = 50;
int op_8_ldb = 50;
std::copy(tensor_8biasbcast, tensor_8biasbcast + 800, tensor_29);
BLAS::sgemm_(&op_8_transB, &op_8_transA, &op_8_n, &op_8_m, &op_8_k, &op_8_alpha, tensor_8weight, &op_8_ldb, tensor_28, &op_8_lda, &op_8_beta, tensor_29, &op_8_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_30[id] = ((tensor_29[id] > 0 )? tensor_29[id] : 0);
}
//--------- Gemm
char op_10_transA = 'n';
char op_10_transB = 't';
int op_10_m = 16;
int op_10_n = 50;
int op_10_k = 50;
float op_10_alpha = 1;
float op_10_beta = 1;
int op_10_lda = 50;
int op_10_ldb = 50;
std::copy(tensor_10biasbcast, tensor_10biasbcast + 800, tensor_31);
BLAS::sgemm_(&op_10_transB, &op_10_transA, &op_10_n, &op_10_m, &op_10_k, &op_10_alpha, tensor_10weight, &op_10_ldb, tensor_30, &op_10_lda, &op_10_beta, tensor_31, &op_10_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_32[id] = ((tensor_31[id] > 0 )? tensor_31[id] : 0);
}
//--------- Gemm
char op_12_transA = 'n';
char op_12_transB = 't';
int op_12_m = 16;
int op_12_n = 50;
int op_12_k = 50;
float op_12_alpha = 1;
float op_12_beta = 1;
int op_12_lda = 50;
int op_12_ldb = 50;
std::copy(tensor_12biasbcast, tensor_12biasbcast + 800, tensor_33);
BLAS::sgemm_(&op_12_transB, &op_12_transA, &op_12_n, &op_12_m, &op_12_k, &op_12_alpha, tensor_12weight, &op_12_ldb, tensor_32, &op_12_lda, &op_12_beta, tensor_33, &op_12_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_34[id] = ((tensor_33[id] > 0 )? tensor_33[id] : 0);
}
//--------- Gemm
char op_14_transA = 'n';
char op_14_transB = 't';
int op_14_m = 16;
int op_14_n = 50;
int op_14_k = 50;
float op_14_alpha = 1;
float op_14_beta = 1;
int op_14_lda = 50;
int op_14_ldb = 50;
std::copy(tensor_14biasbcast, tensor_14biasbcast + 800, tensor_35);
BLAS::sgemm_(&op_14_transB, &op_14_transA, &op_14_n, &op_14_m, &op_14_k, &op_14_alpha, tensor_14weight, &op_14_ldb, tensor_34, &op_14_lda, &op_14_beta, tensor_35, &op_14_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_36[id] = ((tensor_35[id] > 0 )? tensor_35[id] : 0);
}
//--------- Gemm
char op_16_transA = 'n';
char op_16_transB = 't';
int op_16_m = 16;
int op_16_n = 50;
int op_16_k = 50;
float op_16_alpha = 1;
float op_16_beta = 1;
int op_16_lda = 50;
int op_16_ldb = 50;
std::copy(tensor_16biasbcast, tensor_16biasbcast + 800, tensor_37);
BLAS::sgemm_(&op_16_transB, &op_16_transA, &op_16_n, &op_16_m, &op_16_k, &op_16_alpha, tensor_16weight, &op_16_ldb, tensor_36, &op_16_lda, &op_16_beta, tensor_37, &op_16_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_38[id] = ((tensor_37[id] > 0 )? tensor_37[id] : 0);
}
//--------- Gemm
char op_18_transA = 'n';
char op_18_transB = 't';
int op_18_m = 16;
int op_18_n = 10;
int op_18_k = 50;
float op_18_alpha = 1;
float op_18_beta = 1;
int op_18_lda = 50;
int op_18_ldb = 50;
std::copy(tensor_18biasbcast, tensor_18biasbcast + 160, tensor_39);
BLAS::sgemm_(&op_18_transB, &op_18_transA, &op_18_n, &op_18_m, &op_18_k, &op_18_alpha, tensor_18weight, &op_18_ldb, tensor_38, &op_18_lda, &op_18_beta, tensor_39, &op_18_n);
std::vector<float> ret (tensor_39, tensor_39 + 160);
return ret;
}
};
} //TMVA_SOFIE_Linear_16
#endif // ROOT_TMVA_SOFIE_LINEAR_16
Author
Sanjiban Sengupta

Definition in file TMVA_SOFIE_ONNX.C.