Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RModelParser_PyTorch.cxx
Go to the documentation of this file.
1// @(#)root/tmva/pymva $Id$
2// Author: Sanjiban Sengupta 2021
3
4/**********************************************************************************
5 * Project : TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package : TMVA *
7 * Function: TMVA::Experimental::SOFIE::PyTorch::Parse *
8 * *
9 * Description: *
10 * Parser function for translating PyTorch .pt model to RModel object *
11 * *
12 * Example Usage: *
13 * ~~~ {.cpp} *
14 * using TMVA::Experimental::SOFIE; *
15 * // Building the vector of input tensor shapes *
16 * std::vector<size_t> s1{120,1}; *
17 * std::vector<std::vector<size_t>> inputShape{s1}; *
18 * RModel model = PyTorch::Parse("trained_model_dense.pt",inputShape); *
19 * ~~~ *
20 * *
21 **********************************************************************************/
22
23
25
26#include <Python.h>
27
28#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
29#include <numpy/arrayobject.h>
30
31namespace TMVA{
32namespace Experimental{
33namespace SOFIE{
34namespace PyTorch{
35
36// Referencing Python utility functions present in PyMethodBase
39static std::vector<size_t>(& GetDataFromList)(PyObject*) = PyMethodBase::GetDataFromList;
40
41
42namespace INTERNAL{
43
44// For searching and calling specific preparatory function for PyTorch ONNX Graph's node
45std::unique_ptr<ROperator> MakePyTorchNode(PyObject* fNode);
46
47std::unique_ptr<ROperator> MakePyTorchGemm(PyObject* fNode); // For instantiating ROperator for PyTorch ONNX's Gemm operator
48std::unique_ptr<ROperator> MakePyTorchConv(PyObject* fNode); // For instantiating ROperator for PyTorch ONNX's Conv operator
49std::unique_ptr<ROperator> MakePyTorchRelu(PyObject* fNode); // For instantiating ROperator for PyTorch ONNX's Relu operator
50std::unique_ptr<ROperator> MakePyTorchSelu(PyObject* fNode); // For instantiating ROperator for PyTorch ONNX's Selu operator
51std::unique_ptr<ROperator> MakePyTorchSigmoid(PyObject* fNode); // For instantiating ROperator for PyTorch ONNX's Sigmoid operator
52std::unique_ptr<ROperator> MakePyTorchTranspose(PyObject* fNode); // For instantiating ROperator for PyTorch ONNX's Transpose operator
53
54// For mapping PyTorch ONNX Graph's Node with the preparatory functions for ROperators
55using PyTorchMethodMap = std::unordered_map<std::string, std::unique_ptr<ROperator> (*)(PyObject* fNode)>;
56
58{
59 {"onnx::Gemm", &MakePyTorchGemm},
60 {"onnx::Conv", &MakePyTorchConv},
61 {"onnx::Relu", &MakePyTorchRelu},
62 {"onnx::Selu", &MakePyTorchSelu},
63 {"onnx::Sigmoid", &MakePyTorchSigmoid},
64 {"onnx::Transpose", &MakePyTorchTranspose}
65};
66
67
68//////////////////////////////////////////////////////////////////////////////////
69/// \brief Prepares equivalent ROperator with respect to PyTorch ONNX node.
70///
71/// \param[in] fNode Python PyTorch ONNX Graph node
72/// \return unique pointer to ROperator object
73///
74/// Function searches for the passed PyTorch ONNX Graph node in the map, and calls
75/// the specific preparatory function, subsequently returning the ROperator object.
76///
77/// For developing new preparatory functions for supporting PyTorch ONNX Graph nodes
78/// in future, all one needs is to extract the required properties and attributes
79/// from the fNode dictionary which contains all the information about any PyTorch ONNX
80// Graph node and after any required transformations, these are passed for instantiating
81/// the ROperator object.
82///
83/// The fNode dictionary which holds all the information about a PyTorch ONNX Graph's node has
84/// following structure:-
85///
86/// dict fNode { 'nodeType' : Type of node (operator)
87/// 'nodeAttributes' : Attributes of the node
88/// 'nodeInputs' : List of names of input tensors
89/// 'nodeOutputs' : List of names of output tensors
90/// 'nodeDType' : Data-type of the operator node
91/// }
92///
93std::unique_ptr<ROperator> MakePyTorchNode(PyObject* fNode){
94 std::string fNodeType = PyStringAsString(PyDict_GetItemString(fNode,"nodeType"));
95 auto findNode = mapPyTorchNode.find(fNodeType);
96 if(findNode == mapPyTorchNode.end()){
97 throw std::runtime_error("TMVA::SOFIE - Parsing PyTorch node " +fNodeType+" is not yet supported ");
98 }
99 return (findNode->second)(fNode);
100}
101
102
103//////////////////////////////////////////////////////////////////////////////////
104/// \brief Prepares a ROperator_Gemm object
105///
106/// \param[in] fNode Python PyTorch ONNX Graph node
107/// \return Unique pointer to ROperator object
108///
109/// For PyTorch's Linear layer having Gemm operation in its ONNX graph,
110/// the names of the input tensor, output tensor are extracted, and then
111/// are passed to instantiate a ROperator_Gemm object using the required attributes.
112/// fInputs is a list of tensor names, which includes the names of the input tensor
113/// and the weight tensors.
114std::unique_ptr<ROperator> MakePyTorchGemm(PyObject* fNode){
115 PyObject* fAttributes = PyDict_GetItemString(fNode,"nodeAttributes");
116 PyObject* fInputs = PyDict_GetItemString(fNode,"nodeInputs");
117 PyObject* fOutputs = PyDict_GetItemString(fNode,"nodeOutputs");
118 std::string fNodeDType = PyStringAsString(PyList_GetItem(PyDict_GetItemString(fNode,"nodeDType"),0));
119
120 // Extracting the parameters for Gemm Operator
121 std::string fNameA = PyStringAsString(PyList_GetItem(fInputs,0));
122 std::string fNameB = PyStringAsString(PyList_GetItem(fInputs,1));
123 std::string fNameC = PyStringAsString(PyList_GetItem(fInputs,2));
124 std::string fNameY = PyStringAsString(PyList_GetItem(fOutputs,0));
125 float fAttrAlpha = (float)(PyFloat_AsDouble(PyDict_GetItemString(fAttributes,"alpha")));
126 float fAttrBeta = (float)(PyFloat_AsDouble(PyDict_GetItemString(fAttributes,"beta")));
127 int_t fAttrTransA;
128 int_t fAttrTransB;
129
130 if(PyDict_Contains(fAttributes,PyUnicode_FromString("transB"))){
131 fAttrTransB = (int_t)(PyLong_AsLong(PyDict_GetItemString(fAttributes,"transB")));
132 fAttrTransA = !fAttrTransB;
133 }
134 else{
135 fAttrTransA=(int_t)(PyLong_AsLong(PyDict_GetItemString(fAttributes,"transA")));
136 fAttrTransB = !fAttrTransA;
137 }
138
139 std::unique_ptr<ROperator> op;
140 switch(ConvertStringToType(fNodeDType)){
141 case ETensorType::FLOAT: {
142 op.reset(new ROperator_Gemm<float>(fAttrAlpha, fAttrBeta, fAttrTransA, fAttrTransB, fNameA, fNameB, fNameC, fNameY ));
143 break;
144 }
145 default:
146 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Gemm does not yet support input type " + fNodeDType);
147 }
148 return op;
149}
150
151//////////////////////////////////////////////////////////////////////////////////
152/// \brief Prepares a ROperator_Relu object
153///
154/// \param[in] fNode Python PyTorch ONNX Graph node
155/// \return Unique pointer to ROperator object
156///
157/// For instantiating a ROperator_Relu object, the names of
158/// input & output tensors and the data-type of the Graph node
159/// are extracted.
160std::unique_ptr<ROperator> MakePyTorchRelu(PyObject* fNode){
161 PyObject* fInputs = PyDict_GetItemString(fNode,"nodeInputs");
162 PyObject* fOutputs = PyDict_GetItemString(fNode,"nodeOutputs");
163 std::string fNodeDType = PyStringAsString(PyList_GetItem(PyDict_GetItemString(fNode,"nodeDType"),0));
164 std::string fNameX = PyStringAsString(PyList_GetItem(fInputs,0));
165 std::string fNameY = PyStringAsString(PyList_GetItem(fOutputs,0));
166 std::unique_ptr<ROperator> op;
167 switch(ConvertStringToType(fNodeDType)){
168 case ETensorType::FLOAT: {
169 op.reset(new ROperator_Relu<float>(fNameX,fNameY));
170 break;
171 }
172 default:
173 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Relu does not yet support input type " + fNodeDType);
174 }
175 return op;
176}
177
178//////////////////////////////////////////////////////////////////////////////////
179/// \brief Prepares a ROperator_Selu object
180///
181/// \param[in] fNode Python PyTorch ONNX Graph node
182/// \return Unique pointer to ROperator object
183///
184/// For instantiating a ROperator_Selu object, the names of
185/// input & output tensors and the data-type of the Graph node
186/// are extracted.
187std::unique_ptr<ROperator> MakePyTorchSelu(PyObject* fNode){
188 PyObject* fInputs = PyDict_GetItemString(fNode,"nodeInputs");
189 PyObject* fOutputs = PyDict_GetItemString(fNode,"nodeOutputs");
190 std::string fNodeDType = PyStringAsString(PyList_GetItem(PyDict_GetItemString(fNode,"nodeDType"),0));
191
192 std::unique_ptr<ROperator> op;
193 switch(ConvertStringToType(fNodeDType)){
194 case ETensorType::FLOAT: {
195 op.reset(new ROperator_Selu<float>(PyStringAsString(PyList_GetItem(fInputs,0)), PyStringAsString(PyList_GetItem(fOutputs,0))));
196 break;
197 }
198 default:
199 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Selu does not yet support input type " + fNodeDType);
200 }
201 return op;
202}
203
204//////////////////////////////////////////////////////////////////////////////////
205/// \brief Prepares a ROperator_Sigmoid object
206///
207/// \param[in] fNode Python PyTorch ONNX Graph node
208/// \return Unique pointer to ROperator object
209///
210/// For instantiating a ROperator_Sigmoid object, the names of
211/// input & output tensors and the data-type of the Graph node
212/// are extracted.
213std::unique_ptr<ROperator> MakePyTorchSigmoid(PyObject* fNode){
214 PyObject* fInputs = PyDict_GetItemString(fNode,"nodeInputs");
215 PyObject* fOutputs = PyDict_GetItemString(fNode,"nodeOutputs");
216 std::string fNodeDType = PyStringAsString(PyList_GetItem(PyDict_GetItemString(fNode,"nodeDType"),0));
217
218 std::unique_ptr<ROperator> op;
219 switch(ConvertStringToType(fNodeDType)){
220 case ETensorType::FLOAT: {
221 op.reset(new ROperator_Sigmoid<float>(PyStringAsString(PyList_GetItem(fInputs,0)), PyStringAsString(PyList_GetItem(fOutputs,0))));
222 break;
223 }
224 default:
225 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Sigmoid does not yet support input type " + fNodeDType);
226 }
227 return op;
228}
229
230
231//////////////////////////////////////////////////////////////////////////////////
232/// \brief Prepares a ROperator_Transpose object
233///
234/// \param[in] fNode Python PyTorch ONNX Graph node
235/// \return Unique pointer to ROperator object
236///
237/// For Transpose Operator of PyTorch's ONNX Graph, the permute dimensions are found,
238/// and are passed in instantiating the ROperator object.
239std::unique_ptr<ROperator> MakePyTorchTranspose(PyObject* fNode){
240 PyObject* fAttributes = PyDict_GetItemString(fNode,"nodeAttributes");
241 PyObject* fInputs = PyDict_GetItemString(fNode,"nodeInputs");
242 PyObject* fOutputs = PyDict_GetItemString(fNode,"nodeOutputs");
243 std::string fNodeDType = PyStringAsString(PyList_GetItem(PyDict_GetItemString(fNode,"nodeDType"),0));
244
245 // Extracting the Permute dimensions for transpose
246 std::vector<int_t> fAttrPermute;
247 PyObject* fPermute=PyDict_GetItemString(fAttributes,"perm");
248 for(Py_ssize_t permIter=0; permIter<PyList_Size(fPermute);++permIter){
249 fAttrPermute.push_back((int_t)PyLong_AsLong(PyList_GetItem(fPermute,permIter)));
250 }
251 std::string fNameData = PyStringAsString(PyList_GetItem(fInputs,0));
252 std::string fNameOutput = PyStringAsString(PyList_GetItem(fOutputs,0));
253
254 std::unique_ptr<ROperator> op;
255 switch(ConvertStringToType(fNodeDType)){
256 case ETensorType::FLOAT: {
257 op.reset(new ROperator_Transpose<float>(fAttrPermute, fNameData, fNameOutput));
258 break;
259 }
260 default:
261 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Transpose does not yet support input type " + fNodeDType);
262 }
263 return op;
264}
265
266
267//////////////////////////////////////////////////////////////////////////////////
268/// \brief Prepares a ROperator_Conv object
269///
270/// \param[in] fNode Python PyTorch ONNX Graph node
271/// \return Unique pointer to ROperator object
272///
273/// For Conv Operator of PyTorch's ONNX Graph, attributes like dilations, group,
274/// kernel shape, pads and strides are found, and are passed in instantiating the
275/// ROperator object with autopad default to `NOTSET`.
276std::unique_ptr<ROperator> MakePyTorchConv(PyObject* fNode){
277 PyObject* fAttributes = PyDict_GetItemString(fNode,"nodeAttributes");
278 PyObject* fInputs = PyDict_GetItemString(fNode,"nodeInputs");
279 PyObject* fOutputs = PyDict_GetItemString(fNode,"nodeOutputs");
280 std::string fNodeDType = PyStringAsString(PyList_GetItem(PyDict_GetItemString(fNode,"nodeDType"),0));
281
282 // Extracting the Conv Node Attributes
283 PyObject* fDilations = PyDict_GetItemString(fAttributes,"dilations");
284 PyObject* fGroup = PyDict_GetItemString(fAttributes,"group");
285 PyObject* fKernelShape = PyDict_GetItemString(fAttributes,"kernel_shape");
286 PyObject* fPads = PyDict_GetItemString(fAttributes,"pads");
287 PyObject* fStrides = PyDict_GetItemString(fAttributes,"strides");
288
289 std::string fAttrAutopad = "NOTSET";
290 std::vector<size_t> fAttrDilations = GetDataFromList(fDilations);
291 size_t fAttrGroup = PyLong_AsLong(fGroup);
292 std::vector<size_t> fAttrKernelShape = GetDataFromList(fKernelShape);
293 std::vector<size_t> fAttrPads = GetDataFromList(fPads);
294 std::vector<size_t> fAttrStrides = GetDataFromList(fStrides);
295 std::string nameX = PyStringAsString(PyList_GetItem(fInputs,0));
296 std::string nameW = PyStringAsString(PyList_GetItem(fInputs,1));
297 std::string nameB = PyStringAsString(PyList_GetItem(fInputs,2));
298 std::string nameY = PyStringAsString(PyList_GetItem(fOutputs,0));
299
300 std::unique_ptr<ROperator> op;
301 switch(ConvertStringToType(fNodeDType)){
302 case ETensorType::FLOAT: {
303 op.reset(new ROperator_Conv<float>(fAttrAutopad, fAttrDilations, fAttrGroup, fAttrKernelShape, fAttrPads, fAttrStrides, nameX, nameW, nameB, nameY));
304 break;
305 }
306 default:
307 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Conv does not yet support input type " + fNodeDType);
308 }
309 return op;
310}
311}//INTERNAL
312
313
314//////////////////////////////////////////////////////////////////////////////////
315/// \param[in] filename file location of PyTorch .pt model
316/// \param[in] inputShapes vector of input shape vectors
317/// \param[in] inputDTypes vector of ETensorType for data-types of Input tensors
318/// \return Parsed RModel object
319///
320/// The `Parse()` function defined in `TMVA::Experimental::SOFIE::PyTorch` will
321/// parse a trained PyTorch .pt model into a RModel Object. The parser uses
322/// internal functions of PyTorch to convert any PyTorch model into its
323/// equivalent ONNX Graph. For this conversion, dummy inputs are built which are
324/// passed through the model and the applied operators are recorded for populating
325/// the ONNX graph. The `Parse()` function requires the shapes and data-types of
326/// the input tensors which are used for building the dummy inputs.
327/// After the said conversion, the nodes of the ONNX graph are then traversed to
328/// extract properties like Node type, Attributes, input & output tensor names.
329/// Function `AddOperator()` is then called on the extracted nodes to add the
330/// operator into the RModel object. The nodes are also checked for adding any
331/// required routines for executing the generated Inference code.
332///
333/// The internal function used to convert the model to graph object returns a list
334/// which contains a Graph object and a dictionary of weights. This dictionary is
335/// used to extract the Initialized tensors for the model. The names and data-types
336/// of the Initialized tensors are extracted along with their values in NumPy array,
337/// and after approapriate type-conversions, they are added into the RModel object.
338///
339/// For adding the Input tensor infos, the names of the input tensors are extracted
340/// from the PyTorch ONNX graph object. The vector of shapes & data-types passed
341/// into the `Parse()` function are used to extract the data-type and the shape
342/// of the input tensors. Extracted input tensor infos are then added into the
343/// RModel object by calling the `AddInputTensorInfo()` function.
344///
345/// For the output tensor infos, names of the output tensors are also extracted
346/// from the Graph object and are then added into the RModel object by calling the
347/// AddOutputTensorNameList() function.
348///
349/// Example Usage:
350/// ~~~ {.cpp}
351/// using TMVA::Experimental::SOFIE;
352/// //Building the vector of input tensor shapes
353/// std::vector<size_t> s1{120,1};
354/// std::vector<std::vector<size_t>> inputShape{s1};
355/// RModel model = PyTorch::Parse("trained_model_dense.pt",inputShape);
356/// ~~~
357RModel Parse(std::string filename, std::vector<std::vector<size_t>> inputShapes, std::vector<ETensorType> inputDTypes){
358
359 char sep = '/';
360 #ifdef _WIN32
361 sep = '\\';
362 #endif
363
364 size_t isep = filename.rfind(sep, filename.length());
365 std::string filename_nodir = filename;
366 if (isep != std::string::npos){
367 filename_nodir = (filename.substr(isep+1, filename.length() - isep));
368 }
369
370 //Check on whether the PyTorch .pt file exists
371 if(!std::ifstream(filename).good()){
372 throw std::runtime_error("Model file "+filename_nodir+" not found!");
373 }
374
375
376 std::time_t ttime = std::time(0);
377 std::tm* gmt_time = std::gmtime(&ttime);
378 std::string parsetime (std::asctime(gmt_time));
379
380 RModel rmodel(filename_nodir, parsetime);
381
382 //Intializing Python Interpreter and scope dictionaries
383 Py_Initialize();
384 PyObject* main = PyImport_AddModule("__main__");
385 PyObject* fGlobalNS = PyModule_GetDict(main);
386 PyObject* fLocalNS = PyDict_New();
387 if (!fGlobalNS) {
388 throw std::runtime_error("Can't init global namespace for Python");
389 }
390 if (!fLocalNS) {
391 throw std::runtime_error("Can't init local namespace for Python");
392 }
393
394
395 //Extracting model information
396 //Model is converted to ONNX graph format
397 //using PyTorch's internal function with the input shape provided
398 PyRunString("import torch",fGlobalNS,fLocalNS);
399 PyRunString("print('Torch Version: '+torch.__version__)",fGlobalNS,fLocalNS);
400 PyRunString("from torch.onnx.utils import _model_to_graph",fGlobalNS,fLocalNS);
401 PyRunString("from torch.onnx.symbolic_helper import _set_onnx_shape_inference",fGlobalNS,fLocalNS);
402 PyRunString(TString::Format("model= torch.jit.load('%s')",filename.c_str()),fGlobalNS,fLocalNS);
403 PyRunString("globals().update(locals())",fGlobalNS,fLocalNS);
404 PyRunString("model.cpu()",fGlobalNS,fLocalNS);
405 PyRunString("model.eval()",fGlobalNS,fLocalNS);
406
407 //Building dummy inputs for the model
408 PyRunString("dummyInputs=[]",fGlobalNS,fLocalNS);
409 for(long unsigned int it=0;it<inputShapes.size();++it){
410 PyRunString("inputShape=[]",fGlobalNS,fLocalNS);
411 for(long unsigned int itr=0;itr<inputShapes[it].size();++itr){
412 PyRunString(TString::Format("inputShape.append(%d)",(int)inputShapes[it][itr]),fGlobalNS,fLocalNS);
413 }
414 PyRunString("dummyInputs.append(torch.rand(*inputShape))",fGlobalNS,fLocalNS);
415 }
416
417 //Getting the ONNX graph from model using the dummy inputs and example outputs
418 PyRunString("_set_onnx_shape_inference(True)",fGlobalNS,fLocalNS);
419 PyRunString("graph=_model_to_graph(model,dummyInputs)",fGlobalNS,fLocalNS);
420
421
422 //Extracting the model information in list modelData
423 PyRunString("modelData=[]",fGlobalNS,fLocalNS);
424 PyRunString("for i in graph[0].nodes():\n"
425 " globals().update(locals())\n"
426 " nodeData={}\n"
427 " nodeData['nodeType']=i.kind()\n"
428 " nodeAttributeNames=[x for x in i.attributeNames()]\n"
429 " nodeAttributes={j:i[j] for j in nodeAttributeNames}\n"
430 " nodeData['nodeAttributes']=nodeAttributes\n"
431 " nodeInputs=[x for x in i.inputs()]\n"
432 " nodeInputNames=[x.debugName() for x in nodeInputs]\n"
433 " nodeData['nodeInputs']=nodeInputNames\n"
434 " nodeOutputs=[x for x in i.outputs()]\n"
435 " nodeOutputNames=[x.debugName() for x in nodeOutputs]\n"
436 " nodeData['nodeOutputs']=nodeOutputNames\n"
437 " nodeDType=[x.type().scalarType() for x in nodeOutputs]\n"
438 " nodeData['nodeDType']=nodeDType\n"
439 " modelData.append(nodeData)",fGlobalNS,fLocalNS);
440
441 PyObject* fPModel = PyDict_GetItemString(fLocalNS,"modelData");
442 Py_ssize_t fPModelSize = PyList_Size(fPModel);
443 PyObject *fNode;
444 std::string fNodeType;
445
446 //Adding operators into the RModel object
447 for(Py_ssize_t fModelIterator=0;fModelIterator<fPModelSize;++fModelIterator){
448 fNode = PyList_GetItem(fPModel,fModelIterator);
449 fNodeType = PyStringAsString(PyDict_GetItemString(fNode,"nodeType"));
450
451 // Adding required routines for inference code generation
452 if(fNodeType == "onnx::Gemm"){
453 rmodel.AddBlasRoutines({"Gemm", "Gemv"});
454 }
455 else if(fNodeType == "onnx::Selu" || fNodeType == "onnx::Sigmoid"){
456 rmodel.AddNeededStdLib("cmath");
457 }
458 else if (fNodeType == "onnx::Conv") {
459 rmodel.AddBlasRoutines({"Gemm", "Axpy"});
460 }
462 }
463
464
465 //Extracting model weights to add the initialized tensors to the RModel
466 PyRunString("weightNames=[k for k in graph[1].keys()]",fGlobalNS,fLocalNS);
467 PyRunString("weights=[v.numpy() for v in graph[1].values()]",fGlobalNS,fLocalNS);
468 PyRunString("weightDTypes=[v.type()[6:-6] for v in graph[1].values()]",fGlobalNS,fLocalNS);
469 PyObject* fPWeightNames = PyDict_GetItemString(fLocalNS,"weightNames");
470 PyObject* fPWeightTensors = PyDict_GetItemString(fLocalNS,"weights");
471 PyObject* fPWeightDTypes = PyDict_GetItemString(fLocalNS,"weightDTypes");
472 PyArrayObject* fWeightTensor;
473 std::string fWeightName;
474 ETensorType fWeightDType;
475 std::vector<std::size_t> fWeightShape;
476 std::size_t fWeightSize;
477
478 for(Py_ssize_t weightIter=0; weightIter<PyList_Size(fPWeightTensors);++weightIter){
479 fWeightTensor = (PyArrayObject*)PyList_GetItem(fPWeightTensors,weightIter);
480 fWeightName = PyStringAsString(PyList_GetItem(fPWeightNames,weightIter));
481 fWeightDType = ConvertStringToType(PyStringAsString(PyList_GetItem(fPWeightDTypes,weightIter)));
482 fWeightSize = 1;
483 fWeightShape.clear();
484 for(int j=0; j<PyArray_NDIM(fWeightTensor); ++j){
485 fWeightShape.push_back((std::size_t)(PyArray_DIM(fWeightTensor,j)));
486 fWeightSize*=(std::size_t)(PyArray_DIM(fWeightTensor,j));
487 }
488 switch(fWeightDType){
489 case ETensorType::FLOAT:{
490 float* fWeightValue = (float*)PyArray_DATA(fWeightTensor);
491 std::shared_ptr<void> fData(malloc(fWeightSize * sizeof(float)), free);
492 std::memcpy(fData.get(),fWeightValue,fWeightSize * sizeof(float));
493 rmodel.AddInitializedTensor(fWeightName, ETensorType::FLOAT,fWeightShape,fData);
494 break;
495 }
496 default:
497 throw std::runtime_error("Type error: TMVA SOFIE does not yet supports weights of data type"+ConvertTypeToString(fWeightDType));
498 }
499 }
500
501
502 //Extracting Input tensor info
503 PyRunString("inputs=[x for x in model.graph.inputs()]",fGlobalNS,fLocalNS);
504 PyRunString("inputs=inputs[1:]",fGlobalNS,fLocalNS);
505 PyRunString("inputNames=[x.debugName() for x in inputs]",fGlobalNS,fLocalNS);
506 PyObject* fPInputs= PyDict_GetItemString(fLocalNS,"inputNames");
507 std::string fInputName;
508 std::vector<size_t>fInputShape;
509 ETensorType fInputDType;
510 for(Py_ssize_t inputIter=0; inputIter<PyList_Size(fPInputs);++inputIter){
511 fInputName = PyStringAsString(PyList_GetItem(fPInputs,inputIter));
512 fInputShape = inputShapes[inputIter];
513 fInputDType = inputDTypes[inputIter];
514 switch(fInputDType){
515 case(ETensorType::FLOAT): {
516 rmodel.AddInputTensorInfo(fInputName, ETensorType::FLOAT, fInputShape);
517 break;
518 }
519 default:
520 throw std::runtime_error("Type Error: TMVA SOFIE does not yet support the input tensor data type"+ConvertTypeToString(fInputDType));
521 }
522 }
523
524
525 //Extracting output tensor names
526 PyRunString("outputs=[x for x in graph[0].outputs()]",fGlobalNS,fLocalNS);
527 PyRunString("outputNames=[x.debugName() for x in outputs]",fGlobalNS,fLocalNS);
528 PyObject* fPOutputs= PyDict_GetItemString(fLocalNS,"outputNames");
529 std::vector<std::string> fOutputNames;
530 for(Py_ssize_t outputIter = 0; outputIter < PyList_Size(fPOutputs);++outputIter){
531 fOutputNames.push_back(PyStringAsString(PyList_GetItem(fPOutputs,outputIter)));
532 }
533 rmodel.AddOutputTensorNameList(fOutputNames);
534
535 return rmodel;
536}
537
538//////////////////////////////////////////////////////////////////////////////////
539/// \param[in] filename file location of PyTorch .pt model
540/// \param[in] inputShapes vector of input shape vectors
541/// \return Parsed RModel object
542///
543/// Overloaded Parser function for translating PyTorch .pt model to RModel object.
544/// Function only requires the inputShapes vector as a parameter. Function
545/// builds the vector of Data-types for the input tensors using Float as default,
546/// Function calls the `Parse()` function with the vector of data-types included,
547/// subsequently returning the parsed RModel object.
548RModel Parse(std::string filepath,std::vector<std::vector<size_t>> inputShapes){
549 std::vector<ETensorType> dtype(inputShapes.size(),ETensorType::FLOAT);
550 return Parse(filepath,inputShapes,dtype);
551}
552}//PyTorch
553}//SOFIE
554}//Experimental
555}//TMVA
PyFloat_AsDouble
typedef void(GLAPIENTRYP _GLUfuncptr)(void)
int main()
Definition Prototype.cxx:12
_object PyObject
#define free
Definition civetweb.c:1539
#define malloc
Definition civetweb.c:1536
void AddOutputTensorNameList(std::vector< std::string > outputtensornames)
Definition RModel.cxx:145
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:98
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:125
void AddBlasRoutines(std::vector< std::string > routines)
Definition RModel.hxx:72
void AddNeededStdLib(std::string libname)
Definition RModel.hxx:77
void AddOperator(std::unique_ptr< ROperator > op, int order_execution=-1)
Definition RModel.cxx:117
static std::vector< size_t > GetDataFromList(PyObject *listObject)
Utility function which retrieves and returns the values of the List object as a vector of size_t.
static const char * PyStringAsString(PyObject *string)
Returns const char* from Python string in PyObject.
void PyRunString(TString code, TString errorMessage="Failed to run python code", int start=Py_single_input)
Execute Python code from string.
Basic string class.
Definition TString.h:136
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
Definition TString.cxx:2336
std::unique_ptr< ROperator > MakePyTorchGemm(PyObject *fNode)
Prepares a ROperator_Gemm object.
std::unique_ptr< ROperator > MakePyTorchNode(PyObject *fNode)
Prepares equivalent ROperator with respect to PyTorch ONNX node.
std::unique_ptr< ROperator > MakePyTorchConv(PyObject *fNode)
Prepares a ROperator_Conv object.
std::unique_ptr< ROperator > MakePyTorchSigmoid(PyObject *fNode)
Prepares a ROperator_Sigmoid object.
std::unique_ptr< ROperator > MakePyTorchSelu(PyObject *fNode)
Prepares a ROperator_Selu object.
std::unique_ptr< ROperator > MakePyTorchRelu(PyObject *fNode)
Prepares a ROperator_Relu object.
std::unordered_map< std::string, std::unique_ptr< ROperator >(*)(PyObject *fNode)> PyTorchMethodMap
std::unique_ptr< ROperator > MakePyTorchTranspose(PyObject *fNode)
Prepares a ROperator_Transpose object.
static const char *(&) PyStringAsString(PyObject *)
static void(&) PyRunString(TString, PyObject *, PyObject *)
RModel Parse(std::string filepath, std::vector< std::vector< size_t > > inputShapes, std::vector< ETensorType > dtype)
Parser function for translating PyTorch .pt model into a RModel object.
std::string ConvertTypeToString(ETensorType type)
ETensorType ConvertStringToType(std::string type)
create variable transformations