Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA_SOFIE_ONNX.C File Reference

Detailed Description

View in nbviewer Open in SWAN This macro provides a simple example for the parsing of ONNX files into RModel object and further generating the .hxx header files for inference.

using namespace TMVA::Experimental;
void TMVA_SOFIE_ONNX(std::string inputFile = ""){
if (inputFile.empty() )
inputFile = std::string(gROOT->GetTutorialsDir()) + "/tmva/Linear_16.onnx";
//Creating parser object to parse ONNX files
SOFIE::RModel model = parser.Parse(inputFile);
//Generating inference code
model.Generate();
// write the code in a file (by default Linear_16.hxx and Linear_16.dat
model.OutputGenerated();
//Printing required input tensors
//Printing initialized tensors (weights)
std::cout<<"\n\n";
//Printing intermediate tensors
std::cout<<"\n\n";
//Checking if tensor already exist in model
std::cout<<"\n\nTensor \"16weight\" already exist: "<<std::boolalpha<<model.CheckIfTensorAlreadyExist("16weight")<<"\n\n";
std::vector<size_t> tensorShape = model.GetTensorShape("16weight");
std::cout<<"Shape of tensor \"16weight\": ";
for(auto& it:tensorShape){
std::cout<<it<<",";
}
std::cout<<"\n\nData type of tensor \"16weight\": ";
SOFIE::ETensorType tensorType = model.GetTensorType("16weight");
std::cout<<SOFIE::ConvertTypeToString(tensorType);
//Printing generated inference code
std::cout<<"\n\n";
model.PrintGenerated();
}
#define gROOT
Definition TROOT.h:404
const ETensorType & GetTensorType(std::string name)
Definition RModel.cxx:70
void Generate(bool useSession=true, bool useWeightFile=true)
Definition RModel.cxx:175
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:91
void OutputGenerated(std::string filename="")
Definition RModel.cxx:525
const std::vector< size_t > & GetTensorShape(std::string name)
Definition RModel.cxx:49
Model requires following inputs:
Fully Specified Tensor name: input1 type: float shape: [16,100]
Model initialized the following tensors:
Tensor name: "8weight" type: float shape: [50,50]
Tensor name: "8bias" type: float shape: [50]
Tensor name: "4bias" type: float shape: [50]
Tensor name: "2weight" type: float shape: [50,50]
Tensor name: "0bias" type: float shape: [50]
Tensor name: "12bias" type: float shape: [50]
Tensor name: "18bias" type: float shape: [10]
Tensor name: "14bias" type: float shape: [50]
Tensor name: "4weight" type: float shape: [50,50]
Tensor name: "10weight" type: float shape: [50,50]
Tensor name: "6bias" type: float shape: [50]
Tensor name: "18weight" type: float shape: [10,50]
Tensor name: "0weight" type: float shape: [50,100]
Tensor name: "10bias" type: float shape: [50]
Tensor name: "2bias" type: float shape: [50]
Tensor name: "6weight" type: float shape: [50,50]
Tensor name: "14weight" type: float shape: [50,50]
Tensor name: "16weight" type: float shape: [50,50]
Tensor name: "12weight" type: float shape: [50,50]
Tensor name: "16bias" type: float shape: [50]
Model specify the following intermediate tensors:
Tensor name: "39" type: float shape: [16,10]
Tensor name: "18biasbcast" type: float shape: [16,10]
Tensor name: "38" type: float shape: [16,50]
Tensor name: "35" type: float shape: [16,50]
Tensor name: "14biasbcast" type: float shape: [16,50]
Tensor name: "34" type: float shape: [16,50]
Tensor name: "33" type: float shape: [16,50]
Tensor name: "36" type: float shape: [16,50]
Tensor name: "12biasbcast" type: float shape: [16,50]
Tensor name: "10biasbcast" type: float shape: [16,50]
Tensor name: "21" type: float shape: [16,50]
Tensor name: "24" type: float shape: [16,50]
Tensor name: "0biasbcast" type: float shape: [16,50]
Tensor name: "6biasbcast" type: float shape: [16,50]
Tensor name: "22" type: float shape: [16,50]
Tensor name: "23" type: float shape: [16,50]
Tensor name: "31" type: float shape: [16,50]
Tensor name: "2biasbcast" type: float shape: [16,50]
Tensor name: "32" type: float shape: [16,50]
Tensor name: "30" type: float shape: [16,50]
Tensor name: "25" type: float shape: [16,50]
Tensor name: "29" type: float shape: [16,50]
Tensor name: "4biasbcast" type: float shape: [16,50]
Tensor name: "37" type: float shape: [16,50]
Tensor name: "26" type: float shape: [16,50]
Tensor name: "16biasbcast" type: float shape: [16,50]
Tensor name: "8biasbcast" type: float shape: [16,50]
Tensor name: "27" type: float shape: [16,50]
Tensor name: "28" type: float shape: [16,50]
Tensor "16weight" already exist: true
Shape of tensor "16weight": 50,50,
Data type of tensor "16weight": float
//Code generated automatically by TMVA for Inference of Model file [Linear_16.onnx] at [Mon Sep 11 20:40:03 2023]
#ifndef TMVA_SOFIE_LINEAR_16
#define TMVA_SOFIE_LINEAR_16
#include<algorithm>
#include<vector>
#include "TMVA/SOFIE_common.hxx"
#include <fstream>
namespace TMVA_SOFIE_Linear_16{
namespace BLAS{
extern "C" void sgemv_(const char * trans, const int * m, const int * n, const float * alpha, const float * A,
const int * lda, const float * X, const int * incx, const float * beta, const float * Y, const int * incy);
extern "C" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,
const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,
const float * beta, float * C, const int * ldc);
}//BLAS
struct Session {
std::vector<float> fTensor_8weight = std::vector<float>(2500);
float * tensor_8weight = fTensor_8weight.data();
std::vector<float> fTensor_8bias = std::vector<float>(50);
float * tensor_8bias = fTensor_8bias.data();
std::vector<float> fTensor_4bias = std::vector<float>(50);
float * tensor_4bias = fTensor_4bias.data();
std::vector<float> fTensor_2weight = std::vector<float>(2500);
float * tensor_2weight = fTensor_2weight.data();
std::vector<float> fTensor_0bias = std::vector<float>(50);
float * tensor_0bias = fTensor_0bias.data();
std::vector<float> fTensor_12bias = std::vector<float>(50);
float * tensor_12bias = fTensor_12bias.data();
std::vector<float> fTensor_18bias = std::vector<float>(10);
float * tensor_18bias = fTensor_18bias.data();
std::vector<float> fTensor_14bias = std::vector<float>(50);
float * tensor_14bias = fTensor_14bias.data();
std::vector<float> fTensor_4weight = std::vector<float>(2500);
float * tensor_4weight = fTensor_4weight.data();
std::vector<float> fTensor_10weight = std::vector<float>(2500);
float * tensor_10weight = fTensor_10weight.data();
std::vector<float> fTensor_6bias = std::vector<float>(50);
float * tensor_6bias = fTensor_6bias.data();
std::vector<float> fTensor_18weight = std::vector<float>(500);
float * tensor_18weight = fTensor_18weight.data();
std::vector<float> fTensor_0weight = std::vector<float>(5000);
float * tensor_0weight = fTensor_0weight.data();
std::vector<float> fTensor_10bias = std::vector<float>(50);
float * tensor_10bias = fTensor_10bias.data();
std::vector<float> fTensor_2bias = std::vector<float>(50);
float * tensor_2bias = fTensor_2bias.data();
std::vector<float> fTensor_6weight = std::vector<float>(2500);
float * tensor_6weight = fTensor_6weight.data();
std::vector<float> fTensor_14weight = std::vector<float>(2500);
float * tensor_14weight = fTensor_14weight.data();
std::vector<float> fTensor_16weight = std::vector<float>(2500);
float * tensor_16weight = fTensor_16weight.data();
std::vector<float> fTensor_12weight = std::vector<float>(2500);
float * tensor_12weight = fTensor_12weight.data();
std::vector<float> fTensor_16bias = std::vector<float>(50);
float * tensor_16bias = fTensor_16bias.data();
std::vector<float> fTensor_39 = std::vector<float>(160);
float * tensor_39 = fTensor_39.data();
std::vector<float> fTensor_18biasbcast = std::vector<float>(160);
float * tensor_18biasbcast = fTensor_18biasbcast.data();
std::vector<float> fTensor_38 = std::vector<float>(800);
float * tensor_38 = fTensor_38.data();
std::vector<float> fTensor_35 = std::vector<float>(800);
float * tensor_35 = fTensor_35.data();
std::vector<float> fTensor_14biasbcast = std::vector<float>(800);
float * tensor_14biasbcast = fTensor_14biasbcast.data();
std::vector<float> fTensor_34 = std::vector<float>(800);
float * tensor_34 = fTensor_34.data();
std::vector<float> fTensor_33 = std::vector<float>(800);
float * tensor_33 = fTensor_33.data();
std::vector<float> fTensor_36 = std::vector<float>(800);
float * tensor_36 = fTensor_36.data();
std::vector<float> fTensor_12biasbcast = std::vector<float>(800);
float * tensor_12biasbcast = fTensor_12biasbcast.data();
std::vector<float> fTensor_10biasbcast = std::vector<float>(800);
float * tensor_10biasbcast = fTensor_10biasbcast.data();
std::vector<float> fTensor_21 = std::vector<float>(800);
float * tensor_21 = fTensor_21.data();
std::vector<float> fTensor_24 = std::vector<float>(800);
float * tensor_24 = fTensor_24.data();
std::vector<float> fTensor_0biasbcast = std::vector<float>(800);
float * tensor_0biasbcast = fTensor_0biasbcast.data();
std::vector<float> fTensor_6biasbcast = std::vector<float>(800);
float * tensor_6biasbcast = fTensor_6biasbcast.data();
std::vector<float> fTensor_22 = std::vector<float>(800);
float * tensor_22 = fTensor_22.data();
std::vector<float> fTensor_23 = std::vector<float>(800);
float * tensor_23 = fTensor_23.data();
std::vector<float> fTensor_31 = std::vector<float>(800);
float * tensor_31 = fTensor_31.data();
std::vector<float> fTensor_2biasbcast = std::vector<float>(800);
float * tensor_2biasbcast = fTensor_2biasbcast.data();
std::vector<float> fTensor_32 = std::vector<float>(800);
float * tensor_32 = fTensor_32.data();
std::vector<float> fTensor_30 = std::vector<float>(800);
float * tensor_30 = fTensor_30.data();
std::vector<float> fTensor_25 = std::vector<float>(800);
float * tensor_25 = fTensor_25.data();
std::vector<float> fTensor_29 = std::vector<float>(800);
float * tensor_29 = fTensor_29.data();
std::vector<float> fTensor_4biasbcast = std::vector<float>(800);
float * tensor_4biasbcast = fTensor_4biasbcast.data();
std::vector<float> fTensor_37 = std::vector<float>(800);
float * tensor_37 = fTensor_37.data();
std::vector<float> fTensor_26 = std::vector<float>(800);
float * tensor_26 = fTensor_26.data();
std::vector<float> fTensor_16biasbcast = std::vector<float>(800);
float * tensor_16biasbcast = fTensor_16biasbcast.data();
std::vector<float> fTensor_8biasbcast = std::vector<float>(800);
float * tensor_8biasbcast = fTensor_8biasbcast.data();
std::vector<float> fTensor_27 = std::vector<float>(800);
float * tensor_27 = fTensor_27.data();
std::vector<float> fTensor_28 = std::vector<float>(800);
float * tensor_28 = fTensor_28.data();
Session(std::string filename ="") {
if (filename.empty()) filename = "Linear_16.dat";
std::ifstream f;
f.open(filename);
if (!f.is_open()){
throw std::runtime_error("tmva-sofie failed to open file for input weights");
}
std::string tensor_name;
int length;
f >> tensor_name >> length;
if (tensor_name != "tensor_8weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_8weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_8weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_8bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_8bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_8bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_4bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_4bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_4bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_2weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_2weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_2weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_0bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_0bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_0bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_12bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_12bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_12bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_18bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_18bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 10) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 10 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_18bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_14bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_14bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_14bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_4weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_4weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_4weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_10weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_10weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_10weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_6bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_6bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_6bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_18weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_18weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_18weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_0weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_0weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 5000) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 5000 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_0weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_10bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_10bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_10bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_2bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_2bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_2bias[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_6weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_6weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_6weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_14weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_14weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_14weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_16weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_16weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_16weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_12weight" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_12weight , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 2500) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 2500 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_12weight[i];
f >> tensor_name >> length;
if (tensor_name != "tensor_16bias" ) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor name; expected name is tensor_16bias , read " + tensor_name;
throw std::runtime_error(err_msg);
}
if (length != 50) {
std::string err_msg = "TMVA-SOFIE failed to read the correct tensor size; expected size is 50 , read " + std::to_string(length) ;
throw std::runtime_error(err_msg);
}
for (int i =0; i < length; ++i)
f >> tensor_16bias[i];
f.close();
{
std::vector<size_t> oldShape = { 50 };
std::vector<size_t> newShape = { 16 , 50 };
float * newData_ptr = TMVA::Experimental::SOFIE::UTILITY::Unidirectional_broadcast<float>(tensor_0bias, oldShape, newShape);
std::copy(newData_ptr, newData_ptr + 800, tensor_0biasbcast);
delete [] newData_ptr;
}
{
std::vector<size_t> oldShape = { 50 };
std::vector<size_t> newShape = { 16 , 50 };
float * newData_ptr = TMVA::Experimental::SOFIE::UTILITY::Unidirectional_broadcast<float>(tensor_2bias, oldShape, newShape);
std::copy(newData_ptr, newData_ptr + 800, tensor_2biasbcast);
delete [] newData_ptr;
}
{
std::vector<size_t> oldShape = { 50 };
std::vector<size_t> newShape = { 16 , 50 };
float * newData_ptr = TMVA::Experimental::SOFIE::UTILITY::Unidirectional_broadcast<float>(tensor_4bias, oldShape, newShape);
std::copy(newData_ptr, newData_ptr + 800, tensor_4biasbcast);
delete [] newData_ptr;
}
{
std::vector<size_t> oldShape = { 50 };
std::vector<size_t> newShape = { 16 , 50 };
float * newData_ptr = TMVA::Experimental::SOFIE::UTILITY::Unidirectional_broadcast<float>(tensor_6bias, oldShape, newShape);
std::copy(newData_ptr, newData_ptr + 800, tensor_6biasbcast);
delete [] newData_ptr;
}
{
std::vector<size_t> oldShape = { 50 };
std::vector<size_t> newShape = { 16 , 50 };
float * newData_ptr = TMVA::Experimental::SOFIE::UTILITY::Unidirectional_broadcast<float>(tensor_8bias, oldShape, newShape);
std::copy(newData_ptr, newData_ptr + 800, tensor_8biasbcast);
delete [] newData_ptr;
}
{
std::vector<size_t> oldShape = { 50 };
std::vector<size_t> newShape = { 16 , 50 };
float * newData_ptr = TMVA::Experimental::SOFIE::UTILITY::Unidirectional_broadcast<float>(tensor_10bias, oldShape, newShape);
std::copy(newData_ptr, newData_ptr + 800, tensor_10biasbcast);
delete [] newData_ptr;
}
{
std::vector<size_t> oldShape = { 50 };
std::vector<size_t> newShape = { 16 , 50 };
float * newData_ptr = TMVA::Experimental::SOFIE::UTILITY::Unidirectional_broadcast<float>(tensor_12bias, oldShape, newShape);
std::copy(newData_ptr, newData_ptr + 800, tensor_12biasbcast);
delete [] newData_ptr;
}
{
std::vector<size_t> oldShape = { 50 };
std::vector<size_t> newShape = { 16 , 50 };
float * newData_ptr = TMVA::Experimental::SOFIE::UTILITY::Unidirectional_broadcast<float>(tensor_14bias, oldShape, newShape);
std::copy(newData_ptr, newData_ptr + 800, tensor_14biasbcast);
delete [] newData_ptr;
}
{
std::vector<size_t> oldShape = { 50 };
std::vector<size_t> newShape = { 16 , 50 };
float * newData_ptr = TMVA::Experimental::SOFIE::UTILITY::Unidirectional_broadcast<float>(tensor_16bias, oldShape, newShape);
std::copy(newData_ptr, newData_ptr + 800, tensor_16biasbcast);
delete [] newData_ptr;
}
{
std::vector<size_t> oldShape = { 10 };
std::vector<size_t> newShape = { 16 , 10 };
float * newData_ptr = TMVA::Experimental::SOFIE::UTILITY::Unidirectional_broadcast<float>(tensor_18bias, oldShape, newShape);
std::copy(newData_ptr, newData_ptr + 160, tensor_18biasbcast);
delete [] newData_ptr;
}
}
std::vector<float> infer(float* tensor_input1){
//--------- Gemm
char op_0_transA = 'n';
char op_0_transB = 't';
int op_0_m = 16;
int op_0_n = 50;
int op_0_k = 100;
float op_0_alpha = 1;
float op_0_beta = 1;
int op_0_lda = 100;
int op_0_ldb = 100;
std::copy(tensor_0biasbcast, tensor_0biasbcast + 800, tensor_21);
BLAS::sgemm_(&op_0_transB, &op_0_transA, &op_0_n, &op_0_m, &op_0_k, &op_0_alpha, tensor_0weight, &op_0_ldb, tensor_input1, &op_0_lda, &op_0_beta, tensor_21, &op_0_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_22[id] = ((tensor_21[id] > 0 )? tensor_21[id] : 0);
}
//--------- Gemm
char op_2_transA = 'n';
char op_2_transB = 't';
int op_2_m = 16;
int op_2_n = 50;
int op_2_k = 50;
float op_2_alpha = 1;
float op_2_beta = 1;
int op_2_lda = 50;
int op_2_ldb = 50;
std::copy(tensor_2biasbcast, tensor_2biasbcast + 800, tensor_23);
BLAS::sgemm_(&op_2_transB, &op_2_transA, &op_2_n, &op_2_m, &op_2_k, &op_2_alpha, tensor_2weight, &op_2_ldb, tensor_22, &op_2_lda, &op_2_beta, tensor_23, &op_2_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_24[id] = ((tensor_23[id] > 0 )? tensor_23[id] : 0);
}
//--------- Gemm
char op_4_transA = 'n';
char op_4_transB = 't';
int op_4_m = 16;
int op_4_n = 50;
int op_4_k = 50;
float op_4_alpha = 1;
float op_4_beta = 1;
int op_4_lda = 50;
int op_4_ldb = 50;
std::copy(tensor_4biasbcast, tensor_4biasbcast + 800, tensor_25);
BLAS::sgemm_(&op_4_transB, &op_4_transA, &op_4_n, &op_4_m, &op_4_k, &op_4_alpha, tensor_4weight, &op_4_ldb, tensor_24, &op_4_lda, &op_4_beta, tensor_25, &op_4_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_26[id] = ((tensor_25[id] > 0 )? tensor_25[id] : 0);
}
//--------- Gemm
char op_6_transA = 'n';
char op_6_transB = 't';
int op_6_m = 16;
int op_6_n = 50;
int op_6_k = 50;
float op_6_alpha = 1;
float op_6_beta = 1;
int op_6_lda = 50;
int op_6_ldb = 50;
std::copy(tensor_6biasbcast, tensor_6biasbcast + 800, tensor_27);
BLAS::sgemm_(&op_6_transB, &op_6_transA, &op_6_n, &op_6_m, &op_6_k, &op_6_alpha, tensor_6weight, &op_6_ldb, tensor_26, &op_6_lda, &op_6_beta, tensor_27, &op_6_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_28[id] = ((tensor_27[id] > 0 )? tensor_27[id] : 0);
}
//--------- Gemm
char op_8_transA = 'n';
char op_8_transB = 't';
int op_8_m = 16;
int op_8_n = 50;
int op_8_k = 50;
float op_8_alpha = 1;
float op_8_beta = 1;
int op_8_lda = 50;
int op_8_ldb = 50;
std::copy(tensor_8biasbcast, tensor_8biasbcast + 800, tensor_29);
BLAS::sgemm_(&op_8_transB, &op_8_transA, &op_8_n, &op_8_m, &op_8_k, &op_8_alpha, tensor_8weight, &op_8_ldb, tensor_28, &op_8_lda, &op_8_beta, tensor_29, &op_8_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_30[id] = ((tensor_29[id] > 0 )? tensor_29[id] : 0);
}
//--------- Gemm
char op_10_transA = 'n';
char op_10_transB = 't';
int op_10_m = 16;
int op_10_n = 50;
int op_10_k = 50;
float op_10_alpha = 1;
float op_10_beta = 1;
int op_10_lda = 50;
int op_10_ldb = 50;
std::copy(tensor_10biasbcast, tensor_10biasbcast + 800, tensor_31);
BLAS::sgemm_(&op_10_transB, &op_10_transA, &op_10_n, &op_10_m, &op_10_k, &op_10_alpha, tensor_10weight, &op_10_ldb, tensor_30, &op_10_lda, &op_10_beta, tensor_31, &op_10_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_32[id] = ((tensor_31[id] > 0 )? tensor_31[id] : 0);
}
//--------- Gemm
char op_12_transA = 'n';
char op_12_transB = 't';
int op_12_m = 16;
int op_12_n = 50;
int op_12_k = 50;
float op_12_alpha = 1;
float op_12_beta = 1;
int op_12_lda = 50;
int op_12_ldb = 50;
std::copy(tensor_12biasbcast, tensor_12biasbcast + 800, tensor_33);
BLAS::sgemm_(&op_12_transB, &op_12_transA, &op_12_n, &op_12_m, &op_12_k, &op_12_alpha, tensor_12weight, &op_12_ldb, tensor_32, &op_12_lda, &op_12_beta, tensor_33, &op_12_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_34[id] = ((tensor_33[id] > 0 )? tensor_33[id] : 0);
}
//--------- Gemm
char op_14_transA = 'n';
char op_14_transB = 't';
int op_14_m = 16;
int op_14_n = 50;
int op_14_k = 50;
float op_14_alpha = 1;
float op_14_beta = 1;
int op_14_lda = 50;
int op_14_ldb = 50;
std::copy(tensor_14biasbcast, tensor_14biasbcast + 800, tensor_35);
BLAS::sgemm_(&op_14_transB, &op_14_transA, &op_14_n, &op_14_m, &op_14_k, &op_14_alpha, tensor_14weight, &op_14_ldb, tensor_34, &op_14_lda, &op_14_beta, tensor_35, &op_14_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_36[id] = ((tensor_35[id] > 0 )? tensor_35[id] : 0);
}
//--------- Gemm
char op_16_transA = 'n';
char op_16_transB = 't';
int op_16_m = 16;
int op_16_n = 50;
int op_16_k = 50;
float op_16_alpha = 1;
float op_16_beta = 1;
int op_16_lda = 50;
int op_16_ldb = 50;
std::copy(tensor_16biasbcast, tensor_16biasbcast + 800, tensor_37);
BLAS::sgemm_(&op_16_transB, &op_16_transA, &op_16_n, &op_16_m, &op_16_k, &op_16_alpha, tensor_16weight, &op_16_ldb, tensor_36, &op_16_lda, &op_16_beta, tensor_37, &op_16_n);
//------ RELU
for (int id = 0; id < 800 ; id++){
tensor_38[id] = ((tensor_37[id] > 0 )? tensor_37[id] : 0);
}
//--------- Gemm
char op_18_transA = 'n';
char op_18_transB = 't';
int op_18_m = 16;
int op_18_n = 10;
int op_18_k = 50;
float op_18_alpha = 1;
float op_18_beta = 1;
int op_18_lda = 50;
int op_18_ldb = 50;
std::copy(tensor_18biasbcast, tensor_18biasbcast + 160, tensor_39);
BLAS::sgemm_(&op_18_transB, &op_18_transA, &op_18_n, &op_18_m, &op_18_k, &op_18_alpha, tensor_18weight, &op_18_ldb, tensor_38, &op_18_lda, &op_18_beta, tensor_39, &op_18_n);
std::vector<float> ret (tensor_39, tensor_39 + 160);
return ret;
}
};
} //TMVA_SOFIE_Linear_16
#endif // TMVA_SOFIE_LINEAR_16
Author
Sanjiban Sengupta

Definition in file TMVA_SOFIE_ONNX.C.