Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RModelParser_ONNX.cxx
Go to the documentation of this file.
1#include "Byteswap.h"
3#include "onnx_proto3.pb.h"
4
5#include <stdexcept>
6#include <string>
7#include <memory>
8#include <cassert>
9#include <iostream>
10#include <unordered_map>
11#include <functional>
12#include "TMVA/SOFIE_common.hxx"
13
14namespace TMVA {
15namespace Experimental {
16namespace SOFIE {
17
18// Declaration of operators
19// Unary operators
25// Binary operators
31// Nary operators
36//Comparision Operators
42// Reduce operators
47// Others
78// Decalaration of fused operators
82
83// Definition of RModelParser_ONNX::OperatorsMap
85 // Registered operators
86 std::unordered_map<std::string, ParserFuncSignature> fOperatorsMap;
87};
88
89// Constructor of the parser
90RModelParser_ONNX::RModelParser_ONNX() noexcept : fOperatorsMapImpl(std::make_unique<OperatorsMapImpl>()) {
91 // Register operators
92 // Unary operators
94 RegisterOperator("Reciprocal", ParseReciprocal);
98 // Binary operators
104 // Nary operators
109 //Comparision Operators
110 RegisterOperator("Equal", ParseEq);
112 RegisterOperator("LessOrEqual", ParseLessEq);
113 RegisterOperator("Greater", ParseGreater);
114 RegisterOperator("GreaterOrEqual", ParseGreaterEq);
115 // Reduce operators
116 RegisterOperator("ReduceMean", ParseReduceMean);
117 RegisterOperator("ReduceSum", ParseReduceSum);
118 RegisterOperator("ReduceSumsquare", ParseReduceSumsquare);
119 RegisterOperator("ReduceProd", ParseReduceProd);
120 // Others
121 RegisterOperator("BatchNormalization", ParseBatchNormalization);
122 RegisterOperator("Constant", ParseConstant);
123 RegisterOperator("ConstantOfShape", ParseConstant);
125 RegisterOperator("Concat", ParseConcat);
127 RegisterOperator("ConvTranspose", ParseConvTranspose);
130 RegisterOperator("Identity", ParseIdentity);
131 RegisterOperator("LeakyRelu", ParseLeakyRelu);
133 RegisterOperator("AveragePool", ParsePool);
134 RegisterOperator("GlobalAveragePool", ParsePool);
135 RegisterOperator("MaxPool", ParsePool);
137 RegisterOperator("Reshape", ParseReshape);
138 RegisterOperator("Flatten", ParseReshape);
139 RegisterOperator("Squeeze", ParseReshape);
140 RegisterOperator("Unsqueeze", ParseReshape);
144 RegisterOperator("Sigmoid", ParseSigmoid);
146 RegisterOperator("Softmax", ParseSoftmax);
148 RegisterOperator("Softmax", ParseSoftmax);
150 RegisterOperator("Transpose", ParseTranspose);
151 RegisterOperator("MatMul", ParseMatMul);
152 RegisterOperator("LayerNormalization", ParseLayerNormalization);
153 RegisterOperator("Expand", ParseExpand);
154 RegisterOperator("Gather", ParseGather);
157 RegisterOperator("EyeLike", ParseEyeLike);
159}
160
161// Destructor of the parser
163
165{
166 fOperatorsMapImpl->fOperatorsMap[name] = func;
167}
168
170{
171 return fOperatorsMapImpl->fOperatorsMap.find(name) != fOperatorsMapImpl->fOperatorsMap.end();
172}
173
175{
176 std::vector<std::string> ops;
177 ops.reserve(fOperatorsMapImpl->fOperatorsMap.size());
178 for (auto &it : fOperatorsMapImpl->fOperatorsMap) {
179 ops.emplace_back(it.first);
180 }
181 return ops;
182}
183
185{
187}
188
190{
192}
193
195{
197}
198
199// Parse an operator
200std::unique_ptr<ROperator>
201RModelParser_ONNX::ParseOperator(const size_t i, const onnx::GraphProto &graphproto, const std::vector<size_t> &nodes)
202{
203 if (i >= nodes.size())
204 throw std::runtime_error("TMVA::SOFIE - Error in parsing ordered operators " + std::to_string(i) + " is >= " + std::to_string(nodes.size()));
205 int idx = nodes[i];
206 const auto &nodeproto = graphproto.node(idx);
207 const std::string op_type = nodeproto.op_type();
208 if (fVerbose)
209 std::cout << "Parsing an operator " << op_type << std::endl;
210
211 // try to fuse with following operator in case it is not last one
212 if (i < nodes.size() - 1) {
213 int idx2 = nodes[i+1];
214 if (op_type == "MatMul") {
215 // Fuse MatMul and Add
216 if (idx2 < graphproto.node_size() && graphproto.node(idx2).op_type() == "Add") {
217 return ParseFuseMatMulAdd(*this, graphproto.node(idx), graphproto.node(idx2));
218 }
219 else {
220 return ParseMatMul(*this, graphproto.node(idx));
221 }
222 } else if (nodeproto.op_type() == "Conv" || nodeproto.op_type() == "ConvTranspose") {
223 // Fuse Conv or ConvTranspose without bias and Add
224 if (idx2 < graphproto.node_size() && graphproto.node(idx2).op_type() == "Add") {
225 if (nodeproto.op_type() == "Conv") {
226 return ParseFuseConvAdd(*this, graphproto.node(idx), graphproto.node(idx2));
227 } else {
228 return ParseFuseConvTransposeAdd(*this, graphproto.node(idx), graphproto.node(idx2));
229 }
230 }
231 }
232 }
233
234 // skip then the following Add if it was fused before
235 if (idx > 0 && op_type == "Add") {
236 int idx0 = nodes[i - 1];
237 if (graphproto.node(idx0).op_type() == "MatMul")
238 return nullptr;
239 else if (graphproto.node(idx0).op_type() == "ConvTranspose")
240 return nullptr;
241 }
242
243 auto it = fOperatorsMapImpl->fOperatorsMap.find(op_type);
244 if (it == fOperatorsMapImpl->fOperatorsMap.end()) {
245 throw std::runtime_error("TMVA::SOFIE Operator type " + op_type + " is not yet supported");
246 }
247 if (fVerbose) {
248 std::cout << "\tCreating operator " << op_type << std::endl;
249 }
250 return it->second(*this, nodeproto);
251}
252
253// Parse a model
254RModel RModelParser_ONNX::Parse(std::string filename, bool verbose)
255{
256 fVerbose = verbose;
257 char sep = '/';
258#ifdef _WIN32
259 sep = '\\';
260#endif
261 size_t isep = filename.rfind(sep, filename.length());
262 std::string filename_nodir = filename;
263 if (isep != std::string::npos) {
264 filename_nodir = (filename.substr(isep + 1, filename.length() - isep));
265 }
266
267 std::time_t ttime = std::time(0);
268 std::tm *gmt_time = std::gmtime(&ttime);
269 std::string parsetime(std::asctime(gmt_time));
270
271 GOOGLE_PROTOBUF_VERIFY_VERSION;
272 // model I/O
273 onnx::ModelProto model;
274 RModel rmodel(filename_nodir, parsetime);
275
276 fTensorTypeMap.clear();
277
278 std::fstream input(filename, std::ios::in | std::ios::binary);
279 if (!model.ParseFromIstream(&input)) {
280 throw std::runtime_error("TMVA::SOFIE - Failed to parse onnx file " + filename);
281 }
282
283 const onnx::GraphProto &graph = model.graph(); // not a memory leak. model freed automatically at the end.
284 google::protobuf::ShutdownProtobufLibrary();
285
286 // ONNX version is ir_version() - model_version() returns 0
287 if (fVerbose) {
288 std::cout << "ONNX Version " << model.ir_version() << std::endl;
289 }
290
291 std::unordered_set<std::string> initializer_names;
292 for (int i = 0; i < graph.initializer_size(); i++) {
293 initializer_names.insert(graph.initializer(i).name());
294 }
295
296 if (verbose)
297 std::cout << "Parsing model inputs...." << std::endl;
298 /// Loop on model inputs
299 for (int i = 0; i < graph.input_size(); i++) {
300 RegisterTensorType(graph.input(i).name(),
301 static_cast<ETensorType>(graph.input(i).type().tensor_type().elem_type()));
302
303 if (verbose)
304 std::cout << "\tgraph input " << i << " name " << graph.input(i).name() << " type "
305 << graph.input(i).type().tensor_type().elem_type() << std::endl;
306
307 if (initializer_names.find(graph.input(i).name()) != initializer_names.end())
308 continue;
309
310 // input data node is not a weight node (has no initializer)
311 const onnx::ValueInfoProto &valueinfoproto = graph.input(i);
312 std::string input_name = valueinfoproto.name();
313
314 ETensorType type = static_cast<ETensorType>(valueinfoproto.type().tensor_type().elem_type());
316 throw std::runtime_error("TMVA::SOFIE Data type in input tensor " + input_name + " not supported!\n");
317 }
318
319 std::vector<Dim> fShape;
320 bool existParam = false;
321 if (!valueinfoproto.type().tensor_type().has_shape())
322 throw std::runtime_error("TMVA::SOFIE datanode with no shape restrictions is not supported yet");
323 for (int j = 0; j < valueinfoproto.type().tensor_type().shape().dim_size(); j++) {
324 Dim dim;
325 if (valueinfoproto.type().tensor_type().shape().dim(j).value_case() ==
326 onnx::TensorShapeProto_Dimension::ValueCase::kDimValue) {
327 dim.dim = valueinfoproto.type().tensor_type().shape().dim(j).dim_value();
328 } else if (valueinfoproto.type().tensor_type().shape().dim(j).value_case() ==
329 onnx::TensorShapeProto_Dimension::ValueCase::kDimParam) {
330 dim.isParam = true;
331 existParam = true;
332 dim.param = valueinfoproto.type().tensor_type().shape().dim(j).dim_param();
333 } else {
334 throw std::runtime_error("TMVA::SOFIE ONNX file error: Valueinfoproto " + input_name +
335 " has neither dim_value nor dim_param! \n");
336 }
337 fShape.push_back(dim);
338 }
339 if (valueinfoproto.type().tensor_type().shape().dim_size() == 0) {
340 Dim dim;
341 dim.dim = 1;
342 fShape.push_back(dim);
343 } // in case this TensorShapeProto has no dimension message: ONNX IR defines this to be a scalar
344
345 if (!existParam) {
346 std::vector<size_t> fShape_sizet;
347 for (auto &j : fShape) {
348 fShape_sizet.push_back(j.dim);
349 }
350
351 rmodel.AddInputTensorInfo(input_name, type, fShape_sizet);
352 } else {
353 rmodel.AddInputTensorInfo(input_name, type, fShape);
354 }
355 rmodel.AddInputTensorName(input_name); // store also names in given order
356 }
357
358 std::map<std::string, int> allInitializedTensors;
359
360 if (verbose)
361 std::cout << "\nParsing graph initializer list and fill model initialized tensors" << std::endl;
362
363 for (int i = 0; i < graph.initializer_size(); i++) {
364 onnx::TensorProto *tensorproto = const_cast<onnx::TensorProto *>(&graph.initializer(i));
365 std::vector<std::size_t> shape;
366 std::size_t fLength = 1;
367 for (int j = 0; j < tensorproto->dims_size(); j++) {
368 shape.push_back(tensorproto->dims(j));
369 fLength *= tensorproto->dims(j);
370 }
371 // in case of scalars keep an empty shape but with length =1
372
373 std::string input_name = graph.initializer(i).name();
374
375 if (verbose)
376 std::cout << "\t initializer " << i << " name " << input_name << " type " << graph.initializer(i).data_type()
377 << std::endl;
378
379 switch (static_cast<ETensorType>(graph.initializer(i).data_type())) {
380 case ETensorType::FLOAT: {
381 std::shared_ptr<void> data(malloc(fLength * sizeof(float)), free);
382
383 if (!tensorproto->raw_data().empty()) {
384#ifdef R__BYTESWAP
385 std::memcpy(data.get(), tensorproto->raw_data().c_str(), fLength * sizeof(float));
386#else
387 for (std::size_t k = 0; k < fLength; ++k)
388 (reinterpret_cast<uint32_t *>(data.get()))[k] =
389 Rbswap_32((reinterpret_cast<const uint32_t *>(tensorproto->raw_data().c_str()))[k]);
390#endif
391 } else {
392 tensorproto->mutable_float_data()->ExtractSubrange(0, tensorproto->float_data_size(),
393 static_cast<float *>(data.get()));
394 }
395
396 if (verbose) std::cout << "add FLOAT initialized tensor " << input_name << " shape " << ConvertShapeToString(shape) << std::endl;
397 rmodel.AddInitializedTensor(input_name, ETensorType::FLOAT, shape, data);
398 allInitializedTensors[input_name] = i;
399 break;
400 }
401 case ETensorType::INT64: {
402 std::shared_ptr<void> data(malloc(fLength * sizeof(int64_t)), free);
403
404 if (!tensorproto->raw_data().empty()) {
405#ifdef R__BYTESWAP
406 std::memcpy(data.get(), tensorproto->raw_data().c_str(), fLength * sizeof(int64_t));
407#else
408 for (std::size_t k = 0; k < fLength; ++k)
409 (reinterpret_cast<uint64_t *>(data.get()))[k] =
410 Rbswap_64((reinterpret_cast<const uint64_t *>(tensorproto->raw_data().c_str()))[k]);
411#endif
412 } else {
413 tensorproto->mutable_int64_data()->ExtractSubrange(0, tensorproto->int64_data_size(),
414 static_cast<int64_t *>(data.get()));
415 }
416
417 if (verbose) std::cout << "add INT64 initialized tensor " << input_name << " shape " << ConvertShapeToString(shape) << std::endl;
418 rmodel.AddInitializedTensor(input_name, ETensorType::INT64, shape, data);
419 allInitializedTensors[input_name] = i;
420 break;
421 }
422 default:
423 throw std::runtime_error("Data type in weight tensor " + graph.initializer(i).name() + " not supported!\n");
424 }
425 }
426
427 // Initial operator order
428 if (verbose) {
429 std::cout << "\nGraph operator list (ONNX order)\n";
430 for (int i = 0; i < graph.node_size(); i++) {
431 std::cout << "\tOperator " << i << " : " << graph.node(i).op_type() << " , " << graph.node(i).input_size()
432 << " inputs : {";
433 for (int j = 0; j < graph.node(i).input_size(); j++) {
434 std::cout << graph.node(i).input(j);
435 if (j < graph.node(i).input_size() - 1)
436 std::cout << ", ";
437 }
438 std::cout << " }" << std::endl;
439 }
440 }
441
442 // make order of nodes:
443 if (verbose)
444 std::cout << "\nRe-Order graph operator list\n";
445 std::vector<size_t> nodesOrder;
446 nodesOrder.reserve(graph.node_size());
447 std::vector<bool> foundNodes(graph.node_size());
448 // loop at graph inputs
449 std::map<std::string, int> allInputs;
450 for (int i = 0; i < graph.input_size(); i++) {
451 allInputs[graph.input(i).name()] = -1;
452 }
453 do {
454 auto psize = nodesOrder.size();
455 for (int i = 0; i < graph.node_size(); i++) {
456 if (foundNodes[i])
457 continue;
458 // check if all input exists add to list
459 bool existInputs = true;
460 int input_size = graph.node(i).input_size();
461 // special case for Reshape where shape is input and not a weight tensor
462 for (int j = 0; j < input_size; j++) {
463 std::string name = graph.node(i).input(j);
464 // skip empty names
465 if (!name.empty()) {
466 existInputs &= (allInputs.find(name) != allInputs.end() ||
467 allInitializedTensors.find(name) != allInitializedTensors.end());
468 if (fVerbose) {
469 std::cout << graph.node(i).op_type() << " input " << name << " "
470 << bool(allInputs.find(name) != allInputs.end()) << " " <<
471 bool(allInitializedTensors.find(name) != allInitializedTensors.end()) <<
472 existInputs << std::endl;
473 }
474 }
475 }
476 if (!existInputs) {
477 if (fVerbose) {
478 std::cout << "skip op " << graph.node(i).op_type() << " inputs are ";
479 for (int j = 0; j < input_size; j++) {
480 std::cout << graph.node(i).input(j) << " ";
481 }
482 std::cout << std::endl;
483 }
484 continue;
485 }
486 if (verbose)
487 std::cout << "\tadd node " << graph.node(i).op_type() << " order " << i << std::endl;
488
489 nodesOrder.push_back(i);
490 foundNodes[i] = true;
491 // register the outputs
492 for (int j = 0; j < graph.node(i).output_size(); j++) {
493 allInputs[graph.node(i).output(j)] = i;
494 }
495 }
496 // no increment in nodes - something wrong
497 if (nodesOrder.size() == psize) {
498 throw std::runtime_error("TMVA::SOFIE - cannot find a new node ");
499 }
500 } while ((int)nodesOrder.size() < graph.node_size());
501
502 // scan operators for orders
503 if (verbose) {
504 std::cout << "\nGraph operator list (re-ordered)\n";
505 for (int k = 0; k < graph.node_size(); k++) {
506 int i = nodesOrder[k];
507 std::cout << "\tOperator " << i << " : " << graph.node(i).op_type() << " , " << graph.node(i).input_size()
508 << " inputs : {";
509 for (int j = 0; j < graph.node(i).input_size(); j++) {
510 std::cout << graph.node(i).input(j);
511 if (j < graph.node(i).input_size() - 1)
512 std::cout << ", ";
513 }
514 std::cout << " }" << std::endl;
515 }
516 }
517
518 // fill model with operators
519 if (verbose) {
520 std::cout << "Fill RModel with operators...\n";
521 }
522 for (int i = 0; i < graph.node_size(); i++) {
523 std::string op_type = graph.node(nodesOrder[i]).op_type();
524
525 if (verbose) {
526 std::cout << "\t" << i << " " << nodesOrder[i] << " parsing operator " << op_type << std::endl;
527 }
528
529 std::unique_ptr<ROperator> op = ParseOperator(i, graph, nodesOrder);
530 if (!op) {
531 if (verbose) {
532 std::cout << "\t\tskipping operator since it is fused with previous one" << std::endl;
533 }
534 // for skipping the fused nodes like Add after MatMul
535 continue;
536 }
537 rmodel.AddOperator(std::move(op));
538 }
539
540 std::vector<std::string> outputnames;
541 if (verbose)
542 std::cout << "\nParsing Graph output list\n";
543 for (int i = 0; i < graph.output_size(); i++) {
544 if (verbose)
545 std::cout << "\toutput " << i << " name " << graph.output(i).name() << std::endl;
546 outputnames.push_back(graph.output(i).name());
547 }
548 rmodel.AddOutputTensorNameList(outputnames);
549
550 return rmodel;
551}
552
553} // namespace SOFIE
554} // namespace Experimental
555} // namespace TMVA
#define Rbswap_32(x)
Definition Byteswap.h:108
#define Rbswap_64(x)
Definition Byteswap.h:111
dims_t fShape
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char filename
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
char name[80]
Definition TGX11.cxx:110
#define free
Definition civetweb.c:1539
#define malloc
Definition civetweb.c:1536
void RegisterOperator(const std::string &name, ParserFuncSignature func)
bool IsRegisteredOperator(const std::string &name)
std::unordered_map< std::string, ETensorType > fTensorTypeMap
RModel Parse(std::string filename, bool verbose=false)
std::unique_ptr< ROperator > ParseOperator(const size_t, const onnx::GraphProto &, const std::vector< size_t > &)
void RegisterTensorType(const std::string &, ETensorType)
ETensorType GetTensorType(const std::string &name)
std::vector< std::string > GetRegisteredOperators()
std::unique_ptr< OperatorsMapImpl > fOperatorsMapImpl
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:125
void AddOutputTensorNameList(std::vector< std::string > output_tensor_names)
Definition RModel.cxx:234
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:161
void AddInputTensorName(std::string name)
Definition RModel.cxx:144
void AddOperator(std::unique_ptr< ROperator > op, int order_execution=-1)
Definition RModel.cxx:148
std::string Clean_name(std::string input_tensor_name)
std::function< std::unique_ptr< ROperator >(RModelParser_ONNX &, const onnx::NodeProto &, const onnx::NodeProto &)> ParserFuseFuncSignature
ParserFuncSignature ParseSqrt
ParserFuncSignature ParseBatchNormalization
ParserFuncSignature ParseGreater
ParserFuncSignature ParseReshape
ParserFuseFuncSignature ParseFuseConvTransposeAdd
ParserFuncSignature ParseReduceMean
ParserFuseFuncSignature ParseFuseMatMulAdd
ParserFuncSignature ParseGather
ParserFuncSignature ParseNeg
ParserFuncSignature ParseLog
ParserFuncSignature ParseLeakyRelu
ParserFuncSignature ParseExp
ParserFuncSignature ParsePool
Definition ParsePool.cxx:9
ParserFuncSignature ParseDiv
ParserFuncSignature ParseLayerNormalization
ParserFuncSignature ParseConcat
ParserFuncSignature ParseMax
ParserFuncSignature ParseEq
ParserFuncSignature ParseIdentity
ParserFuncSignature ParseConvTranspose
ParserFuncSignature ParseReduceProd
ParserFuncSignature ParseSlice
Definition ParseSlice.cxx:9
ParserFuncSignature ParseTranspose
ParserFuncSignature ParseLess
ParserFuncSignature ParseShape
Definition ParseShape.cxx:9
ParserFuncSignature ParseGRU
Definition ParseGRU.cxx:9
ParserFuncSignature ParseMatMul
ParserFuncSignature ParseErf
Definition ParseErf.cxx:9
ParserFuncSignature ParseSub
ParserFuncSignature ParseReduceSumsquare
ParserFuncSignature ParseAdd
ParserFuncSignature ParseRange
Definition ParseRange.cxx:9
ParserFuncSignature ParseExpand
ParserFuncSignature ParseRNN
Definition ParseRNN.cxx:9
std::function< std::unique_ptr< ROperator >(RModelParser_ONNX &, const onnx::NodeProto &)> ParserFuncSignature
ParserFuncSignature ParseLSTM
Definition ParseLSTM.cxx:9
ParserFuncSignature ParseCast
Definition ParseCast.cxx:9
ParserFuncSignature ParseReciprocal
std::string ConvertShapeToString(std::vector< size_t > shape)
ParserFuncSignature ParseSigmoid
ParserFuseFuncSignature ParseFuseConvAdd
ParserFuncSignature ParseSoftmax
ParserFuncSignature ParseGreaterEq
ParserFuncSignature ParseMean
ParserFuncSignature ParseConstant
ParserFuncSignature ParseSelu
Definition ParseSelu.cxx:9
ParserFuncSignature ParseLessEq
ParserFuncSignature ParseSum
ParserFuncSignature ParseEyeLike
ParserFuncSignature ParseElu
Definition ParseElu.cxx:9
ParserFuncSignature ParseMin
ParserFuncSignature ParseRelu
Definition ParseRelu.cxx:9
ParserFuncSignature ParseReduceSum
ParserFuncSignature ParseConv
Definition ParseConv.cxx:9
ParserFuncSignature ParseGemm
Definition ParseGemm.cxx:9
ParserFuncSignature ParseMul
ParserFuncSignature ParsePow
ParserFuncSignature ParseTanh
Definition ParseTanh.cxx:9
create variable transformations
Definition graph.py:1
std::unordered_map< std::string, ParserFuncSignature > fOperatorsMap