Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA_SOFIE_ONNX.py File Reference

Detailed Description

View in nbviewer Open in SWAN
This macro provides a simple example for:

  • creating a model with Pytorch and export to ONNX
  • parsing the ONNX file with SOFIE and generate C++ code
  • compiling the model using ROOT Cling
  • run the code and optionally compare with ONNXRuntime
import torch
import torch.nn as nn
import ROOT
import numpy as np
import inspect
def CreateAndTrainModel(modelName):
model = nn.Sequential(
nn.Linear(32,16),
nn.Linear(16,8),
nn.Linear(8,2),
nn.Softmax(dim=1)
)
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(),lr=0.01)
#train model with the random data
for i in range(500):
x=torch.randn(2,32)
y=torch.randn(2,2)
y_pred = model(x)
loss = criterion(y_pred,y)
#*******************************************************
## EXPORT to ONNX
#
# need to evaluate the model before exporting to ONNX
# and to provide a dummy input tensor to set the input model shape
modelFile = modelName + ".onnx"
dummy_x = torch.randn(1,32)
model(dummy_x)
#check for torch.onnx.export parameters
def filtered_kwargs(func, **candidate_kwargs):
sig = inspect.signature(func)
return {
k: v for k, v in candidate_kwargs.items()
}
kwargs = filtered_kwargs(
input_names=["input"],
output_names=["output"],
external_data=False, # may not exist
dynamo=True # may not exist
)
print("calling torch.onnx.export with parameters",kwargs)
try:
torch.onnx.export(model, dummy_x, modelFile, **kwargs)
print("model exported to ONNX as",modelFile)
return modelFile
except TypeError:
print("Cannot export model from pytorch to ONNX - with version ",torch.__version__)
print("Skip tutorial execution")
exit()
def ParseModel(modelFile, verbose=False):
model = parser.Parse(modelFile,verbose)
#
#print model weights
if (verbose):
data = model.GetTensorData['float']('0weight')
print("0weight",data)
data = model.GetTensorData['float']('2weight')
print("2weight",data)
# Generating inference code
#generate header file (and .dat file) with modelName+.hxx
if (verbose) :
modelCode = modelFile.replace(".onnx",".hxx")
print("Generated model header file ",modelCode)
return modelCode
###################################################################
## Step 1 : Create and Train model
###################################################################
#use an arbitrary modelName
modelName = "LinearModel"
modelFile = CreateAndTrainModel(modelName)
###################################################################
## Step 2 : Parse model and generate inference code with SOFIE
###################################################################
modelCode = ParseModel(modelFile, False)
###################################################################
## Step 3 : Compile the generated C++ model code
###################################################################
ROOT.gInterpreter.Declare('#include "' + modelCode + '"')
###################################################################
## Step 4: Evaluate the model
###################################################################
#get first the SOFIE session namespace
sofie = getattr(ROOT, 'TMVA_SOFIE_' + modelName)
session = sofie.Session()
print("\n************************************************************")
print("Running inference with SOFIE ")
print("\ninput to model is ",x)
# output shape is (1,2)
y_sofie = np.asarray(y.data())
print("-> output using SOFIE = ", y_sofie)
#check inference with onnx
try:
import onnxruntime as ort
# Load model
print("Running inference with ONNXRuntime ")
ort_session = ort.InferenceSession(modelFile)
# Run inference
outputs = ort_session.run(None, {"input": x})
y_ort = outputs[0]
print("-> output using ORT =", y_ort)
testFailed = abs(y_sofie-y_ort) > 0.01
if (np.any(testFailed)):
raiseError('Result is different between SOFIE and ONNXRT')
else :
print("OK")
except ImportError:
print("Missing ONNXRuntime: skipping comparison test")
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
calling torch.onnx.export with parameters {'input_names': ['input'], 'output_names': ['output'], 'external_data': False, 'dynamo': True}
[torch.onnx] Obtain model graph for `Sequential([...]` with `torch.export.export(..., strict=False)`...
[torch.onnx] Obtain model graph for `Sequential([...]` with `torch.export.export(..., strict=False)`... ✅
[torch.onnx] Run decomposition...
[torch.onnx] Run decomposition... ✅
[torch.onnx] Translate the graph into ONNX...
[torch.onnx] Translate the graph into ONNX... ✅
model exported to ONNX as LinearModel.onnx
Generated model header file LinearModel.hxx
************************************************************
Running inference with SOFIE
input to model is [[-0.43541127 -0.6999505 -0.02768864 0.23811446 0.25726634 -1.085633
0.41872853 0.8219983 -0.13312088 0.8314049 1.7008935 0.51832324
0.39762613 0.06118078 0.46557882 1.3935466 -1.0586125 -0.79214805
-0.7751602 -0.7748113 0.15902355 1.4220787 -0.7163378 -2.077027
1.173115 0.49559772 0.01957363 0.87363017 -1.1822985 0.1161613
-0.35567507 0.16426486]]
-> output using SOFIE = [0.52619046 0.47380957]
Missing ONNXRuntime: skipping comparison test
Author
Lorenzo Moneta

Definition in file TMVA_SOFIE_ONNX.py.