Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RModelParser_ONNX.cxx
Go to the documentation of this file.
2#include "onnx_proto3.pb.h"
3
4#include <string>
5#include <memory>
6#include <cassert>
7
8namespace TMVA{
9namespace Experimental{
10namespace SOFIE{
11
12namespace INTERNAL{
13
14std::unique_ptr<ROperator> make_ROperator_Transpose(const onnx::NodeProto& nodeproto, const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
15std::unique_ptr<ROperator> make_ROperator_Relu(const onnx::NodeProto& nodeproto, const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
16std::unique_ptr<ROperator> make_ROperator_Selu(const onnx::NodeProto& nodeproto, const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
17std::unique_ptr<ROperator> make_ROperator_Sigmoid(const onnx::NodeProto& nodeproto, const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
18std::unique_ptr<ROperator> make_ROperator_Gemm(const onnx::NodeProto& nodeproto, const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
19std::unique_ptr<ROperator> make_ROperator_Conv(const onnx::NodeProto& nodeproto, const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
20std::unique_ptr<ROperator> make_ROperator_RNN(const onnx::NodeProto& nodeproto, const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
21std::unique_ptr<ROperator> make_ROperator_LSTM(const onnx::NodeProto& nodeproto, const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
22std::unique_ptr<ROperator> make_ROperator_BatchNormalization(const onnx::NodeProto& nodeproto, const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
23std::unique_ptr<ROperator> make_ROperator_Pool(const onnx::NodeProto& nodeproto, const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
24std::unique_ptr<ROperator> make_ROperator_Add(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
25std::unique_ptr<ROperator> make_ROperator_Reshape(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
26std::unique_ptr<ROperator> make_ROperator_Slice(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
27std::unique_ptr<ROperator> make_ROperator_GRU(const onnx::NodeProto& nodeproto, const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
28
29
30using factoryMethodMap = std::unordered_map<std::string, std::unique_ptr<ROperator> (*)(const onnx::NodeProto&, const onnx::GraphProto&, std::unordered_map<std::string, ETensorType>&)>;
32 {"Gemm", &make_ROperator_Gemm},
33 {"Transpose", &make_ROperator_Transpose},
34 {"Relu", &make_ROperator_Relu},
35 {"Conv", &make_ROperator_Conv},
36 {"RNN", &make_ROperator_RNN},
37 {"Selu", &make_ROperator_Selu},
38 {"Sigmoid", &make_ROperator_Sigmoid},
39 {"LSTM", &make_ROperator_LSTM},
40 {"GRU", &make_ROperator_GRU},
41 {"BatchNormalization", &make_ROperator_BatchNormalization},
42 {"AveragePool", &make_ROperator_Pool},
43 {"GlobalAveragePool", &make_ROperator_Pool},
44 {"MaxPool", &make_ROperator_Pool},
45 {"Add", &make_ROperator_Add},
46 {"Reshape", &make_ROperator_Reshape},
47 {"Flatten", &make_ROperator_Reshape},
48 {"Slice", &make_ROperator_Slice},
49 {"Squeeze", &make_ROperator_Reshape},
50 {"Unsqueeze", &make_ROperator_Reshape},
51 {"Flatten", &make_ROperator_Reshape}
52};
53
54
55
56std::unique_ptr<ROperator> make_ROperator(size_t idx, const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type){
57 const auto& nodeproto = graphproto.node(idx);
58 auto find = mapOptypeOperator.find(nodeproto.op_type());
59 if (find == mapOptypeOperator.end()){
60 throw std::runtime_error("TMVA::SOFIE - Operator type " + nodeproto.op_type() + " is not yet supported");
61 // std::unique_ptr<ROperator> op;
62 // return op;
63 } else {
64 //std::cout << "create operator " << nodeproto.op_type() << std::endl;
65 return (find->second)(nodeproto, graphproto, tensor_type);
66 }
67}
68
69std::unique_ptr<ROperator> make_ROperator_Add(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /*graphproto */, std::unordered_map<std::string, ETensorType>& tensor_type){
70
72
73 for (int i = 0; i < 2; ++i) {
74 auto input_name = nodeproto.input(i);
75 auto it = tensor_type.find(input_name);
76 if (it != tensor_type.end()){
77 // according to ONNX both inputs have same time
78 if (i == 0) input_type = it->second;
79 else
80 assert(it->second == input_type);
81 } else {
82 throw std::runtime_error("TMVA::SOFIE ONNX Parser Add op has input tensor" + input_name + " but its type is not yet registered");
83 }
84 }
85
86 std::unique_ptr<ROperator> op;
87
88 switch(input_type){
90 op.reset(new ROperator_Add<float>(nodeproto.input(0), nodeproto.input(1), nodeproto.output(0)));
91 break;
92 default:
93 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Add does not yet support input type " + std::to_string(static_cast<int>(input_type)));
94 }
95
96 ETensorType output_type = (op->TypeInference({input_type}))[0];
97 auto it2 = tensor_type.find(nodeproto.output(0));
98 if (it2 == tensor_type.end()){
99 tensor_type[nodeproto.output(0)] = output_type;
100 }
101
102 return op;
103}
104std::unique_ptr<ROperator> make_ROperator_Transpose(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /*graphproto*/, std::unordered_map<std::string, ETensorType>& tensor_type){
105
106 ETensorType input_type;
107
108 auto input_name = nodeproto.input(0);
109 auto it = tensor_type.find(input_name);
110 if (it != tensor_type.end()){
111 input_type = it->second;
112 }else{
113 throw std::runtime_error("TMVA::SOFIE ONNX Parser tranpose op has input tensor" + input_name + " but its type is not yet registered");
114 }
115
116 std::unique_ptr<ROperator> op;
117 std::vector<int_t> attr_perm;
118
119 if (nodeproto.attribute_size() == 1){
120 attr_perm.assign(nodeproto.attribute(0).ints().begin(), nodeproto.attribute(0).ints().end());
121 }
122
123 switch(input_type){
125 if (!attr_perm.empty()){
126 op.reset(new ROperator_Transpose<float>(attr_perm, nodeproto.input(0), nodeproto.output(0)));
127 }else{
128 op.reset(new ROperator_Transpose<float> (nodeproto.input(0), nodeproto.output(0)));
129 }
130 break;
131 default:
132 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Transpose does not yet support input type " + std::to_string(static_cast<int>(input_type)));
133 }
134
135 ETensorType output_type = (op->TypeInference({input_type}))[0];
136 auto it2 = tensor_type.find(nodeproto.output(0));
137 if (it2 == tensor_type.end()){
138 tensor_type[nodeproto.output(0)] = output_type;
139 }
140
141 return op;
142}
143
144std::unique_ptr<ROperator> make_ROperator_Relu(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /*graphproto */, std::unordered_map<std::string, ETensorType>& tensor_type){
145
146 ETensorType input_type;
147
148 auto input_name = nodeproto.input(0);
149 auto it = tensor_type.find(input_name);
150 if (it != tensor_type.end()){
151 input_type = it->second;
152 }else{
153 throw std::runtime_error("TMVA::SOFIE ONNX Parser relu op has input tensor" + input_name + " but its type is not yet registered");
154 }
155
156 std::unique_ptr<ROperator> op;
157
158
159 switch(input_type){
161 op.reset(new ROperator_Relu<float>(nodeproto.input(0), nodeproto.output(0)));
162 break;
163 default:
164 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Relu does not yet support input type " + std::to_string(static_cast<int>(input_type)));
165 }
166
167 ETensorType output_type = (op->TypeInference({input_type}))[0];
168 auto it2 = tensor_type.find(nodeproto.output(0));
169 if (it2 == tensor_type.end()){
170 tensor_type[nodeproto.output(0)] = output_type;
171 }
172
173 return op;
174}
175
176std::unique_ptr<ROperator> make_ROperator_Selu(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /*graphproto */, std::unordered_map<std::string, ETensorType>& tensor_type){
177
178 ETensorType input_type;
179
180 auto input_name = nodeproto.input(0);
181 auto it = tensor_type.find(input_name);
182 if (it != tensor_type.end()){
183 input_type = it->second;
184 }else{
185 throw std::runtime_error("TMVA::SOFIE ONNX Parser selu op has input tensor" + input_name + " but its type is not yet registered");
186 }
187
188 std::unique_ptr<ROperator> op;
189
190
191 switch(input_type){
193 op.reset(new ROperator_Selu<float>(nodeproto.input(0), nodeproto.output(0)));
194 break;
195 default:
196 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Selu does not yet support input type " + std::to_string(static_cast<int>(input_type)));
197 }
198
199 ETensorType output_type = (op->TypeInference({input_type}))[0];
200 auto it2 = tensor_type.find(nodeproto.output(0));
201 if (it2 == tensor_type.end()){
202 tensor_type[nodeproto.output(0)] = output_type;
203 }
204
205 return op;
206}
207
208std::unique_ptr<ROperator> make_ROperator_Sigmoid(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /*graphproto */, std::unordered_map<std::string, ETensorType>& tensor_type){
209
210 ETensorType input_type;
211
212 auto input_name = nodeproto.input(0);
213 auto it = tensor_type.find(input_name);
214 if (it != tensor_type.end()){
215 input_type = it->second;
216 }else{
217 throw std::runtime_error("TMVA::SOFIE ONNX Parser Sigmoid op has input tensor" + input_name + " but its type is not yet registered");
218 }
219
220 std::unique_ptr<ROperator> op;
221
222
223 switch(input_type){
225 op.reset(new ROperator_Sigmoid<float>(nodeproto.input(0), nodeproto.output(0)));
226 break;
227 default:
228 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Sigmoid does not yet support input type " + std::to_string(static_cast<int>(input_type)));
229 }
230
231 ETensorType output_type = (op->TypeInference({input_type}))[0];
232 auto it2 = tensor_type.find(nodeproto.output(0));
233 if (it2 == tensor_type.end()){
234 tensor_type[nodeproto.output(0)] = output_type;
235 }
236
237 return op;
238}
239
240std::unique_ptr<ROperator> make_ROperator_Gemm(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /* graphproto */, std::unordered_map<std::string, ETensorType>& tensor_type){
241
242 ETensorType input_type;
243
244 auto input_name = nodeproto.input(0);
245 auto it = tensor_type.find(input_name);
246 if (it != tensor_type.end()){
247 input_type = it->second;
248 }else{
249 throw std::runtime_error("TMVA::SOFIE ONNX Parser gemm op has input tensor" + input_name + " but its type is not yet registered");
250 }
251
252 std::unique_ptr<ROperator> op;
253
254 float attr_alpha =1.0;
255 float attr_beta =1.0;
256 int_t attr_transA =0;
257 int_t attr_transB =0;
258
259 for (int i = 0; i < nodeproto.attribute_size(); i++){
260 std::string attribute_name = nodeproto.attribute(i).name();
261 if (attribute_name == "alpha"){
262 attr_alpha = nodeproto.attribute(i).f();
263 }else if(attribute_name == "beta"){
264 attr_beta = nodeproto.attribute(i).f();
265 }else if(attribute_name == "transA"){
266 attr_transA = nodeproto.attribute(i).i();
267 if (attr_transA != 0 && attr_transA != 1) throw std::runtime_error("TMVA::SOFIE Error - Model Loading - attribute transA in Operator Gemm not 0/1");
268 }else if(attribute_name == "transB"){
269 attr_transB = nodeproto.attribute(i).i();
270 if (attr_transB != 0 && attr_transB != 1) throw std::runtime_error("TMVA::SOFIE Error - Model Loading - attribute transB in Operator Gemm not 0/1");
271 }else{
272 std::cout << "TMVA::SOFIE Warning - Model Loading - Attribute " << attribute_name << " in OperatorNode " << nodeproto.name() << " is not defined in ONNX IR and not applied!\n";
273 }
274 }
275
276
277 switch(input_type){
279 if (nodeproto.input_size() == 2){
280 op.reset(new ROperator_Gemm<float>(attr_alpha, attr_beta, attr_transA, attr_transB, nodeproto.input(0), nodeproto.input(1), nodeproto.output(0)));
281 }else{
282 op.reset(new ROperator_Gemm<float>(attr_alpha, attr_beta, attr_transA, attr_transB, nodeproto.input(0), nodeproto.input(1), nodeproto.input(2), nodeproto.output(0)));
283 }
284 break;
285 default:
286 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Relu does not yet support input type " + std::to_string(static_cast<int>(input_type)));
287 }
288
289 ETensorType output_type = (op->TypeInference({input_type, input_type}))[0];
290 auto it2 = tensor_type.find(nodeproto.output(0));
291 if (it2 == tensor_type.end()){
292 tensor_type[nodeproto.output(0)] = output_type;
293 }
294
295 return op;
296}
297std::unique_ptr<ROperator> make_ROperator_GRU(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /* graphproto */, std::unordered_map<std::string, ETensorType>& tensor_type) {
298
299 ETensorType input_type;
300
301 auto input_name = nodeproto.input(0);
302 auto it = tensor_type.find(input_name);
303 if (it != tensor_type.end()) {
304 input_type = it->second;
305 } else {
306 throw
307 std::runtime_error("TMVA::SOFIE ONNX Parser GRU op has input tensor " + input_name + " but its type is not yet registered");
308 }
309
310 std::unique_ptr<ROperator> op;
311
312 std::vector<float> attr_activation_alpha;
313 std::vector<float> attr_activation_beta;
314 std::vector<std::string> attr_activations;
315 float attr_clip = 0.;
316 std::string attr_direction = "forward";
317 size_t attr_hidden_size = 0;
318 size_t attr_layout = 0;
319 size_t attr_linear_before_reset = 0;
320
321 for (int_t i = 0; i < nodeproto.attribute_size(); i++) {
322 std::string attribute_name = nodeproto.attribute(i).name();
323 if (attribute_name == "activation_alpha") {
324 attr_activation_alpha = {nodeproto.attribute(i).floats().begin(), nodeproto.attribute(i).floats().end()};
325 } else if (attribute_name == "activation_beta") {
326 attr_activation_beta = {nodeproto.attribute(i).floats().begin(), nodeproto.attribute(i).floats().end()};
327 } else if (attribute_name == "activations") {
328 attr_activations = {nodeproto.attribute(i).strings().begin(), nodeproto.attribute(i).strings().end()};
329 } else if (attribute_name == "clip") {
330 attr_clip = nodeproto.attribute(i).f();
331 } else if (attribute_name == "direction") {
332 attr_direction = nodeproto.attribute(i).s();
333 } else if (attribute_name == "hidden_size") {
334 attr_hidden_size = nodeproto.attribute(i).i();
335 } else if (attribute_name == "layout") {
336 attr_layout = nodeproto.attribute(i).i();
337 } else if (attribute_name == "linear_before_reset") {
338 attr_linear_before_reset = nodeproto.attribute(i).i();
339 } else {
340 std::cout << "TMVA SOFIE Warning - Model Loading - Attribute " << attribute_name << " in OperatorNode " << nodeproto.name() << " is not defined in ONNX IR and not applied!\n";
341 }
342 }
343
344 // Optional inputs and outputs
345 std::string name_b;
346 std::string name_sequence_lens;
347 std::string name_initial_h;
348 std::string name_y;
349 std::string name_y_h;
350 if (nodeproto.input_size() > 3) {
351 name_b = nodeproto.input(3);
352 }
353 if (nodeproto.input_size() > 4) {
354 name_sequence_lens = nodeproto.input(4);
355 }
356 if (nodeproto.input_size() > 5) {
357 name_initial_h = nodeproto.input(5);
358 }
359 if (nodeproto.output_size() > 0) {
360 name_y = nodeproto.output(0);
361 }
362 if (nodeproto.output_size() > 1) {
363 name_y_h = nodeproto.output(1);
364 }
365
366 switch(input_type) {
368 op.reset(new ROperator_GRU<float>(attr_activation_alpha, attr_activation_beta, attr_activations,
369 attr_clip, attr_direction, attr_hidden_size, attr_layout, attr_linear_before_reset,
370 nodeproto.input(0), nodeproto.input(1), nodeproto.input(2),
371 name_b, name_sequence_lens, name_initial_h,
372 name_y, name_y_h));
373 break;
374 default:
375 throw
376 std::runtime_error("TMVA::SOFIE - Unsupported - Operator GRU does not yet support input type " + std::to_string(static_cast<int>(input_type)));
377 }
378
379 auto output_type = op->TypeInference({input_type, input_type});
380 for (size_t i = 0; i < 2; i++) {
381 if (tensor_type.find(nodeproto.output(i)) == tensor_type.end()) {
382 tensor_type[nodeproto.output(i)] = output_type[i];
383 }
384 }
385
386 return op;
387}
388
389std::unique_ptr<ROperator> make_ROperator_Conv(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /* graphproto */, std::unordered_map<std::string, ETensorType>& tensor_type) {
390
391 ETensorType input_type;
392
393 auto input_name = nodeproto.input(0);
394 auto it = tensor_type.find(input_name);
395 if (it != tensor_type.end()) {
396 input_type = it->second;
397 } else {
398 throw
399 std::runtime_error("TMVA::SOFIE ONNX Parser Conv op has input tensor " + input_name + " but its type is not yet registered");
400 }
401
402 std::unique_ptr<ROperator> op;
403
404 std::string attr_auto_pad = "NOTSET";
405 std::vector<size_t> attr_dilations;
406 size_t attr_group = 0;
407 std::vector<size_t> attr_kernel_shape;
408 std::vector<size_t> attr_pads;
409 std::vector<size_t> attr_strides;
410
411 for (int_t i = 0; i < nodeproto.attribute_size(); i++) {
412 std::string attribute_name = nodeproto.attribute(i).name();
413 if (attribute_name == "auto_pad") {
414 attr_auto_pad = nodeproto.attribute(i).s();
415 } else if (attribute_name == "dilations") {
416 attr_dilations = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
417 } else if (attribute_name == "group") {
418 attr_group= nodeproto.attribute(i).i();
419 } else if (attribute_name == "kernel_shape") {
420 attr_kernel_shape = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
421 } else if (attribute_name == "pads") {
422 attr_pads = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
423 } else if (attribute_name == "strides") {
424 attr_strides = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
425 } else {
426 std::cout << "TMVA::SOFIE Warning - Model Loading - Attribute " << attribute_name << " in OperatorNode " << nodeproto.name() << " is not defined in ONNX IR and not applied!\n";
427 }
428 }
429
430 std::string name_b = "";
431 if (nodeproto.input_size() > 2) {
432 name_b = nodeproto.input(2);
433 }
434
435 switch(input_type) {
437 op.reset(new ROperator_Conv<float>(attr_auto_pad, attr_dilations, attr_group, attr_kernel_shape, attr_pads, attr_strides, nodeproto.input(0), nodeproto.input(1), name_b, nodeproto.output(0)));
438 break;
439 default:
440 throw
441 std::runtime_error("TMVA::SOFIE - Unsupported - Operator Conv does not yet support input type " + std::to_string(static_cast<int>(input_type)));
442 }
443
444 ETensorType output_type = (op->TypeInference({input_type, input_type}))[0];
445 auto it2 = tensor_type.find(nodeproto.output(0));
446 if (it2 == tensor_type.end()) {
447 tensor_type[nodeproto.output(0)] = output_type;
448 }
449
450 return op;
451}
452
453std::unique_ptr<ROperator> make_ROperator_Pool(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /* graphproto */, std::unordered_map<std::string, ETensorType>& tensor_type) {
454
455 ETensorType input_type;
456
457 PoolOpMode op_mode = InvalidPool;
458 if (nodeproto.op_type() == "MaxPool")
459 op_mode = MaxPool;
460 else if (nodeproto.op_type() == "AveragePool")
461 op_mode = AveragePool;
462 else if (nodeproto.op_type() == "GlobalAveragePool")
463 op_mode = GlobalAveragePool;
464
465 assert(op_mode != InvalidPool);
466
467 auto input_name = nodeproto.input(0);
468 auto it = tensor_type.find(input_name);
469 if (it != tensor_type.end()) {
470 input_type = it->second;
471 } else {
472 throw
473 std::runtime_error("TMVA::SOFIE ONNX Parser Pool op has input tensor " + input_name + " but its type is not yet registered");
474 }
475
476 std::unique_ptr<ROperator> op;
477
478 RAttributes_Pool attr;
479 // std::string attr_auto_pad = "NOTSET";
480 // int attr_ceil_mode = 0;
481 // int attr_count_include_pad = 0;
482 // int attr_storage_order = 0; // not for AveragePool
483 // std::vector<size_t> attr_dilations; // not for AveragePool
484 // std::vector<size_t> attr_kernel_shape;
485 // std::vector<size_t> attr_pads;
486 // std::vector<size_t> attr_strides;
487
488 for (int_t i = 0; i < nodeproto.attribute_size(); i++) {
489 std::string attribute_name = nodeproto.attribute(i).name();
490 if (attribute_name == "auto_pad") {
491 attr.auto_pad = nodeproto.attribute(i).s();
492 } else if (attribute_name == "ceil_mode") {
493 attr.ceil_mode = nodeproto.attribute(i).i();
494 } else if (attribute_name == "count_include_pad" && op_mode == AveragePool) {
495 attr.count_include_pad = nodeproto.attribute(i).i();
496 } else if (attribute_name == "storage_order" && op_mode == MaxPool) {
497 attr.storage_order = nodeproto.attribute(i).i();
498 } else if (attribute_name == "dilations" && op_mode == MaxPool) {
499 attr.dilations = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
500 } else if (attribute_name == "kernel_shape") {
501 attr.kernel_shape = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
502 } else if (attribute_name == "pads") {
503 attr.pads = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
504 } else if (attribute_name == "strides") {
505 attr.strides = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
506 } else {
507 std::cout << "TMVA::SOFIE Warning - Model Loading - Attribute " << attribute_name << " in OperatorNode "
508 << nodeproto.name() << " is not defined in ONNX IR and not applied!\n";
509 }
510 }
511
512 switch(input_type) {
514 op.reset(new ROperator_Pool<float>(op_mode, attr, nodeproto.input(0), nodeproto.output(0)));
515 break;
516 default:
517 throw
518 std::runtime_error("TMVA::SOFIE - Unsupported - Operator Pool does not yet support input type " + std::to_string(static_cast<int>(input_type)));
519 }
520
521 ETensorType output_type = (op->TypeInference({input_type}))[0];
522 auto it2 = tensor_type.find(nodeproto.output(0));
523 if (it2 == tensor_type.end()) {
524 tensor_type[nodeproto.output(0)] = output_type;
525 }
526
527 return op;
528}
529
530std::unique_ptr<ROperator> make_ROperator_Reshape(const onnx::NodeProto &nodeproto,
531 const onnx::GraphProto & /*graphproto */,
532 std::unordered_map<std::string, ETensorType> &tensor_type)
533{
534 // make Reshape operator
536
537
538 ReshapeOpMode opMode = Reshape;
539 if (nodeproto.op_type() == "Flatten")
540 opMode = Flatten;
541 else if (nodeproto.op_type() == "Squeeze")
542 opMode = Squeeze;
543 else if (nodeproto.op_type() == "Unsqueeze")
544 opMode = Unsqueeze;
545
546
547 //bool hasShapeInput = (opMode == Reshape) ? true : false;
548
549 // reshape has as extra input shape tensor (int64) but
550 // it is not present for Flatten, Squeeze and Unsquueze
551 auto input_name = nodeproto.input(0);
552 // for squeeze is optional ?
553 auto shape_name = (opMode == Reshape || opMode == Unsqueeze) ? nodeproto.input(1) : "";
554 auto it = tensor_type.find(input_name);
555 if (it != tensor_type.end()) {
556 input_type = it->second;
557 } else {
558 throw std::runtime_error("TMVA::SOFIE ONNX Parser Reshape op has input tensor" + input_name +
559 " but its type is not yet registered");
560 }
561
562 // Reshape is having one attribute: allowzero (int) (default = 0)
563 // Flatten is having one attribute: axis (int) (default=1)
564 // old version of reshape and squeeze have axes as attributes
565 std::unique_ptr<ROperator> op;
566 int attr_value = (opMode == Reshape) ? 0 : 1;
567 if (opMode == Reshape && nodeproto.attribute_size() > 0 )
568 attr_value = nodeproto.attribute(0).i();
569
570 std::vector<int64_t> attr_axes = {};
571 if (nodeproto.input_size() == 1 && (opMode == Squeeze || opMode == Unsqueeze)) {
572 std::string attribute_name = nodeproto.attribute(0).name();
573 if (attribute_name == "axes")
574 attr_axes = {nodeproto.attribute(0).ints().begin(), nodeproto.attribute(0).ints().end()};
575 }
576
577 switch (input_type) {
579 if (attr_axes.empty())
580 op.reset(new ROperator_Reshape<float>(opMode, attr_value, input_name, shape_name, nodeproto.output(0)));
581 else // for old Squeeze and Unsqueeze
582 op.reset(new ROperator_Reshape<float>(opMode, attr_axes, input_name, nodeproto.output(0)));
583 break;
584 default:
585 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Reshape does not yet support input type " +
586 std::to_string(static_cast<int>(input_type)));
587 }
588
589 ETensorType output_type = (op->TypeInference({input_type}))[0];
590 auto it2 = tensor_type.find(nodeproto.output(0));
591 if (it2 == tensor_type.end()) {
592 tensor_type[nodeproto.output(0)] = output_type;
593 }
594
595 return op;
596}
597
598std::unique_ptr<ROperator> make_ROperator_Slice(const onnx::NodeProto &nodeproto,
599 const onnx::GraphProto & /*graphproto */,
600 std::unordered_map<std::string, ETensorType> &tensor_type)
601{
602 // make Slice operator
604
605 auto input_name = nodeproto.input(0);
606 auto it = tensor_type.find(input_name);
607 if (it != tensor_type.end()) {
608 input_type = it->second;
609 } else {
610 throw std::runtime_error("TMVA::SOFIE ONNX Parser Slice op has input tensor" + input_name +
611 " but its type is not yet registered");
612 }
613
614 std::vector<std::string> axisTensorNames;
615 if (nodeproto.input_size() > 1)
616 axisTensorNames.push_back(nodeproto.input(1));
617 if (nodeproto.input_size() > 2)
618 axisTensorNames.push_back(nodeproto.input(1));
619 if (nodeproto.input_size() > 3)
620 axisTensorNames.push_back(nodeproto.input(3));
621 if (nodeproto.input_size() > 4)
622 axisTensorNames.push_back(nodeproto.input(4));
623
624 // not sure how to find here type of the integer inputs
625 //std::cout << "Slice input(1) " << nodeproto.input(1) << " " << nodeproto.input(2) << std::endl;
627 //(tensor_type.find(starts_name) != tensor_type.end()) ? tensor_type.find(starts_name)->second
628 // : ETensorType::UNDEFINED;
629 // for version < 10
630 std::vector<int64_t> attr_starts = {};
631 std::vector<int64_t> attr_ends = {};
632 std::vector<int64_t> attr_axes = {};
633 if (nodeproto.input_size() == 1) {
634 for (int_t i = 0; i < nodeproto.attribute_size(); i++) {
635 std::string attribute_name = nodeproto.attribute(i).name();
636 if (attribute_name == "starts")
637 attr_starts = {nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()};
638 if (attribute_name == "ends")
639 attr_ends = {nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()};
640 if (attribute_name == "axes")
641 attr_axes = {nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()};
642 }
643 }
644
645 std::unique_ptr<ROperator> op;
646 switch (input_type) {
648 if (axisTensorNames.size() > 0) {
649 // for version >= 10
650 if (axis_type == ETensorType::INT32)
651 op.reset(new ROperator_Slice<float, int32_t>(input_name, axisTensorNames, nodeproto.output(0)));
652 else if (axis_type == ETensorType::INT64)
653 op.reset(new ROperator_Slice<float, int64_t>(input_name, axisTensorNames, nodeproto.output(0)));
654 else
655 throw std::runtime_error(
656 "TMVA::SOFIE - Unsupported - Operator Slice has invalid input type for input axis descriptors " +
657 std::to_string(static_cast<int>(axis_type)));
658 } else if (attr_starts.size() > 0 && attr_ends.size() > 0) {
659 op.reset(
660 new ROperator_Slice<float, int64_t>(input_name, attr_starts, attr_ends, attr_axes, nodeproto.output(0)));
661 } else {
662 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Slice has invalid attribues");
663 }
664 break;
665 default:
666 throw std::runtime_error("TMVA::SOFIE - Unsupported - Operator Slice does not yet support input type " +
667 std::to_string(static_cast<int>(input_type)));
668 }
669
670 ETensorType output_type = (op->TypeInference({input_type}))[0];
671 auto it2 = tensor_type.find(nodeproto.output(0));
672 if (it2 == tensor_type.end()) {
673 tensor_type[nodeproto.output(0)] = output_type;
674 }
675
676 return op;
677}
678
679std::unique_ptr<ROperator> make_ROperator_RNN(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /* graphproto */, std::unordered_map<std::string, ETensorType>& tensor_type) {
680
681 ETensorType input_type;
682
683 auto input_name = nodeproto.input(0);
684 auto it = tensor_type.find(input_name);
685 if (it != tensor_type.end()) {
686 input_type = it->second;
687 } else {
688 throw
689 std::runtime_error("TMVA::SOFIE ONNX Parser RNN op has input tensor " + input_name + " but its type is not yet registered");
690 }
691
692 std::unique_ptr<ROperator> op;
693
694 std::vector<float> attr_activation_alpha = {};
695 std::vector<float> attr_activation_beta = {};
696 std::vector<std::string> attr_activations = {};
697 float attr_clip = 0.;
698 std::string attr_direction = "forward";
699 size_t attr_hidden_size = 0;
700 size_t attr_layout = 0;
701
702 for (int_t i = 0; i < nodeproto.attribute_size(); i++) {
703 std::string attribute_name = nodeproto.attribute(i).name();
704 if (attribute_name == "activation_alpha") {
705 attr_activation_alpha = {nodeproto.attribute(i).floats().begin(), nodeproto.attribute(i).floats().end()};
706 } else if (attribute_name == "activation_beta") {
707 attr_activation_beta = {nodeproto.attribute(i).floats().begin(), nodeproto.attribute(i).floats().end()};
708 } else if (attribute_name == "activations") {
709 attr_activations = {nodeproto.attribute(i).strings().begin(), nodeproto.attribute(i).strings().end()};
710 } else if (attribute_name == "clip") {
711 attr_clip = nodeproto.attribute(i).i();
712 } else if (attribute_name == "direction") {
713 attr_direction = nodeproto.attribute(i).s();
714 } else if (attribute_name == "hidden_size") {
715 attr_hidden_size = nodeproto.attribute(i).i();
716 } else if (attribute_name == "layout") {
717 attr_layout = nodeproto.attribute(i).i();
718 } else {
719 std::cout << "TMVA SOFIE Warning - Model Loading - Attribute " << attribute_name << " in OperatorNode " << nodeproto.name() << " is not defined in ONNX IR and not applied!\n";
720 }
721 }
722
723 // Optional inputs and outputs
724 std::string name_b = "";
725 std::string name_sequence_lens = "";
726 std::string name_initial_h = "";
727 std::string name_y = "";
728 std::string name_y_h = "";
729 if (nodeproto.input_size() > 3) {
730 name_b = nodeproto.input(3);
731 }
732 if (nodeproto.input_size() > 4) {
733 name_sequence_lens = nodeproto.input(4);
734 }
735 if (nodeproto.input_size() > 5) {
736 name_initial_h = nodeproto.input(5);
737 }
738 if (nodeproto.output_size() > 0) {
739 name_y = nodeproto.output(0);
740 }
741 if (nodeproto.output_size() > 1) {
742 name_y_h = nodeproto.output(1);
743 }
744
745 switch(input_type) {
747 op.reset(new ROperator_RNN<float>(attr_activation_alpha, attr_activation_beta, attr_activations,
748 attr_clip, attr_direction, attr_hidden_size, attr_layout,
749 nodeproto.input(0), nodeproto.input(1), nodeproto.input(2),
750 name_b, name_sequence_lens, name_initial_h, name_y, name_y_h));
751 break;
752 default:
753 throw
754 std::runtime_error("TMVA::SOFIE - Unsupported - Operator RNN does not yet support input type " + std::to_string(static_cast<int>(input_type)));
755 }
756
757 auto output_type = op->TypeInference({input_type, input_type});
758 for (size_t i = 0; i < 2; i++) {
759 if (tensor_type.find(nodeproto.output(i)) == tensor_type.end()) {
760 tensor_type[nodeproto.output(i)] = output_type[i];
761 }
762 }
763
764 return op;
765}
766
767std::unique_ptr<ROperator> make_ROperator_LSTM(const onnx::NodeProto& nodeproto, const onnx::GraphProto& /* graphproto */, std::unordered_map<std::string, ETensorType>& tensor_type) {
768
769 ETensorType input_type;
770
771 auto input_name = nodeproto.input(0);
772 auto it = tensor_type.find(input_name);
773 if (it != tensor_type.end()) {
774 input_type = it->second;
775 } else {
776 throw
777 std::runtime_error("TMVA::SOFIE ONNX Parser LSTM op has input tensor " + input_name + " but its type is not yet registered");
778 }
779
780 std::unique_ptr<ROperator> op;
781
782 std::vector<float> attr_activation_alpha;
783 std::vector<float> attr_activation_beta;
784 std::vector<std::string> attr_activations;
785 float attr_clip = 0.;
786 std::string attr_direction = "forward";
787 size_t attr_hidden_size = 0;
788 size_t attr_input_forget = 0;
789 size_t attr_layout = 0;
790
791 for (int_t i = 0; i < nodeproto.attribute_size(); i++) {
792 std::string attribute_name = nodeproto.attribute(i).name();
793 if (attribute_name == "activation_alpha") {
794 attr_activation_alpha = {nodeproto.attribute(i).floats().begin(), nodeproto.attribute(i).floats().end()};
795 } else if (attribute_name == "activation_beta") {
796 attr_activation_beta = {nodeproto.attribute(i).floats().begin(), nodeproto.attribute(i).floats().end()};
797 } else if (attribute_name == "activations") {
798 attr_activations = {nodeproto.attribute(i).strings().begin(), nodeproto.attribute(i).strings().end()};
799 } else if (attribute_name == "clip") {
800 attr_clip = nodeproto.attribute(i).f();
801 } else if (attribute_name == "direction") {
802 attr_direction = nodeproto.attribute(i).s();
803 } else if (attribute_name == "hidden_size") {
804 attr_hidden_size = nodeproto.attribute(i).i();
805 } else if (attribute_name == "input_forget") {
806 attr_input_forget = nodeproto.attribute(i).i();
807 } else if (attribute_name == "layout") {
808 attr_layout = nodeproto.attribute(i).i();
809 } else {
810 std::cout << "TMVA SOFIE Warning - Model Loading - Attribute " << attribute_name << " in OperatorNode " << nodeproto.name() << " is not defined in ONNX IR and not applied!\n";
811 }
812 }
813
814 // Optional inputs and outputs
815 std::string name_b;
816 std::string name_sequence_lens;
817 std::string name_initial_h;
818 std::string name_initial_c;
819 std::string name_p;
820 std::string name_y;
821 std::string name_y_h;
822 std::string name_y_c;
823 if (nodeproto.input_size() > 3) {
824 name_b = nodeproto.input(3);
825 }
826 if (nodeproto.input_size() > 4) {
827 name_sequence_lens = nodeproto.input(4);
828 }
829 if (nodeproto.input_size() > 5) {
830 name_initial_h = nodeproto.input(5);
831 }
832 if (nodeproto.input_size() > 6) {
833 name_initial_c = nodeproto.input(6);
834 }
835 if (nodeproto.input_size() > 7) {
836 name_p = nodeproto.input(7);
837 }
838 if (nodeproto.output_size() > 0) {
839 name_y = nodeproto.output(0);
840 }
841 if (nodeproto.output_size() > 1) {
842 name_y_h = nodeproto.output(1);
843 }
844 if (nodeproto.output_size() > 2) {
845 name_y_c = nodeproto.output(2);
846 }
847
848 switch(input_type) {
850 op.reset(new ROperator_LSTM<float>(attr_activation_alpha, attr_activation_beta, attr_activations,
851 attr_clip, attr_direction, attr_hidden_size, attr_input_forget, attr_layout,
852 nodeproto.input(0), nodeproto.input(1), nodeproto.input(2),
853 name_b, name_sequence_lens, name_initial_h, name_initial_c, name_p,
854 name_y, name_y_h, name_y_c));
855 break;
856 default:
857 throw
858 std::runtime_error("TMVA::SOFIE - Unsupported - Operator LSTM does not yet support input type " + std::to_string(static_cast<int>(input_type)));
859 }
860
861 auto output_type = op->TypeInference({input_type, input_type});
862 for (size_t i = 0; i < 2; i++) {
863 if (tensor_type.find(nodeproto.output(i)) == tensor_type.end()) {
864 tensor_type[nodeproto.output(i)] = output_type[i];
865 }
866 }
867
868 return op;
869}
870std::unique_ptr<ROperator> make_ROperator_BatchNormalization(const onnx::NodeProto &nodeproto,
871 const onnx::GraphProto &/*graphproto*/,
872 std::unordered_map<std::string, ETensorType> &tensor_type)
873{
874
875 ETensorType input_type;
876
877 auto input_name = nodeproto.input(0);
878 auto it = tensor_type.find(input_name);
879 if (it != tensor_type.end()) {
880 input_type = it->second;
881 } else {
882 throw std::runtime_error("TMVA::SOFIE ONNX Parser BatchNorm op has input tensor " + input_name +
883 " but its type is not yet registered");
884 }
885
886 std::unique_ptr<ROperator> op;
887 float fepsilon = 1e-05;
888 float fmomentum = 0.9;
889 std::size_t ftraining_mode = 0;
890
891 switch(input_type) {
893 if (nodeproto.input_size() == 5) {
894 op.reset(new ROperator_BatchNormalization<float>(fepsilon, fmomentum, ftraining_mode, nodeproto.input(0), nodeproto.input(1), nodeproto.input(2), nodeproto.input(3), nodeproto.input(4), nodeproto.output(0)));
895 }
896 break;
897 default:
898 throw
899 std::runtime_error("TMVA::SOFIE - Unsupported - Operator BatchNorm does not yet support input type " + std::to_string(static_cast<int>(input_type)));
900 }
901
902 ETensorType output_type = (op->TypeInference({input_type, input_type, input_type, input_type, input_type}))[0];
903 auto it2 = tensor_type.find(nodeproto.output(0));
904 if (it2 == tensor_type.end()) {
905 tensor_type[nodeproto.output(0)] = output_type;
906 }
907
908 return op;
909}
910
911} //INTERNAL
912
913
914
915RModel RModelParser_ONNX::Parse(std::string filename){
916 char sep = '/';
917 #ifdef _WIN32
918 sep = '\\';
919 #endif
920 size_t isep = filename.rfind(sep, filename.length());
921 std::string filename_nodir = filename;
922 if (isep != std::string::npos){
923 filename_nodir = (filename.substr(isep+1, filename.length() - isep));
924 }
925
926
927
928 std::time_t ttime = std::time(0);
929 std::tm* gmt_time = std::gmtime(&ttime);
930 std::string parsetime (std::asctime(gmt_time));
931
932
933
934
935 GOOGLE_PROTOBUF_VERIFY_VERSION;
936 //model I/O
937 onnx::ModelProto model;
938 RModel rmodel(filename_nodir, parsetime);
939
940 std::unordered_map<std::string, ETensorType> tensor_type;
941
942 std::fstream input(filename, std::ios::in | std::ios::binary);
943 if (!model.ParseFromIstream(&input)){
944 throw std::runtime_error("TMVA::SOFIE - Failed to parse onnx file");
945 }
946
947 const onnx::GraphProto& graph = model.graph(); //not a memory leak. model freed automatically at the end.
948 google::protobuf::ShutdownProtobufLibrary();
949
950 // ONNX version is ir_version() - model_version() returns 0
951 // std::cout << "ONNX Version " << model.ir_version() << std::endl;
952
953 std::unordered_set<std::string> initializer_names;
954 for (int i=0; i < graph.initializer_size(); i++){
955 initializer_names.insert(graph.initializer(i).name());
956 }
957
958
959 for (int i=0; i < graph.input_size(); i++){
960
961 tensor_type[graph.input(i).name()] = static_cast<ETensorType>(graph.input(i).type().tensor_type().elem_type());
962
963 if (initializer_names.find(graph.input(i).name()) != initializer_names.end()) continue;
964
965 //input datanode is not a weight node (has no initializer)
966 const onnx::ValueInfoProto& valueinfoproto = graph.input(i);
967 std::string input_name = valueinfoproto.name();
968
969 ETensorType type = static_cast<ETensorType>(valueinfoproto.type().tensor_type().elem_type());
971 throw std::runtime_error("TMVA::SOFIE Data type in input tensor " + input_name + " not supported!\n");
972 }
973
974 std::vector<Dim> fShape;
975 bool existParam = false;
976 if (!valueinfoproto.type().tensor_type().has_shape()) throw std::runtime_error("TMVA::SOFIE datanode with no shape restrictions is not supported yet");
977 for (int j = 0; j < valueinfoproto.type().tensor_type().shape().dim_size(); j++){
978 Dim dim;
979 if (valueinfoproto.type().tensor_type().shape().dim(j).value_case() == onnx::TensorShapeProto_Dimension::ValueCase::kDimValue){
980 dim.dim = valueinfoproto.type().tensor_type().shape().dim(j).dim_value();
981 }else if (valueinfoproto.type().tensor_type().shape().dim(j).value_case() == onnx::TensorShapeProto_Dimension::ValueCase::kDimParam){
982 dim.isParam = true;
983 existParam = true;
984 dim.param = valueinfoproto.type().tensor_type().shape().dim(j).dim_param();
985 }else{
986 throw std::runtime_error("TMVA::SOFIE ONNX file error: Valueinfoproto " + input_name + " has neither dim_value nor dim_param! \n");
987 }
988 fShape.push_back(dim);
989 }
990 if (valueinfoproto.type().tensor_type().shape().dim_size() == 0){
991 Dim dim;
992 dim.dim = 1;
993 fShape.push_back(dim);
994 } //in case this TensorShapeProto has no dimension message: ONNX IR defines this to be a scalar
995
996 if (!existParam){
997 std::vector<size_t> fShape_sizet;
998 for (auto& j: fShape){
999 fShape_sizet.push_back(j.dim);
1000 }
1001
1002 rmodel.AddInputTensorInfo(input_name, type, fShape_sizet);
1003 }else{
1004 rmodel.AddInputTensorInfo(input_name, type, fShape);
1005 }
1006
1007 }
1008
1009 for (int i=0; i < graph.initializer_size(); i++){
1010 onnx::TensorProto* tensorproto = const_cast<onnx::TensorProto*>(&graph.initializer(i));
1011 std::vector<std::size_t> fShape;
1012 std::size_t fLength = 1;
1013 for (int j = 0; j < tensorproto->dims_size(); j++){
1014 fShape.push_back(tensorproto->dims(j));
1015 fLength *= tensorproto->dims(j);
1016 }
1017
1018 std::string input_name = graph.initializer(i).name();
1019
1020 switch(static_cast<ETensorType>(graph.initializer(i).data_type())){
1021 case ETensorType::FLOAT : {
1022 //void* data = malloc (fLength * sizeof(float));
1023 std::shared_ptr<void> data(malloc(fLength * sizeof(float)), free);
1024
1025 if (tensorproto->raw_data().empty() == false){
1026 auto raw_data_ptr = reinterpret_cast<float*>(const_cast<char*>(tensorproto->raw_data().c_str()));
1027 std::memcpy(data.get(), raw_data_ptr, fLength * sizeof(float));
1028 }else{
1029 tensorproto->mutable_float_data()->ExtractSubrange(0, tensorproto->float_data_size(), static_cast<float*>(data.get()));
1030 }
1031
1032 rmodel.AddInitializedTensor(input_name, ETensorType::FLOAT, fShape, data);
1033 break;
1034 }
1035 default: throw std::runtime_error("Data type in weight tensor " + graph.initializer(i).name() + " not supported!\n");
1036 }
1037 }
1038
1039
1040
1041 for (int i=0; i < graph.node_size(); i++){
1042 auto op = INTERNAL::make_ROperator(i, graph, tensor_type);
1043 if (!op) {
1044 break;
1045 }
1046 rmodel.AddOperator(std::move(op));
1047 std::string op_type = graph.node(i).op_type();
1048 if (op_type == "Gemm") {
1049 rmodel.AddBlasRoutines({"Gemm", "Gemv"});
1050 } else if (op_type == "Conv") {
1051 rmodel.AddBlasRoutines({"Gemm", "Axpy"});
1052 } else if (op_type == "RNN") {
1053 rmodel.AddBlasRoutines({"Gemm", "Axpy"});
1054 } else if (op_type == "Selu" || op_type == "Sigmoid") {
1055 rmodel.AddNeededStdLib("cmath");
1056 } else if (op_type == "LSTM") {
1057 rmodel.AddBlasRoutines({"Gemm", "Axpy"});
1058 } else if (op_type == "BatchNormalization") {
1059 rmodel.AddBlasRoutines({"Copy", "Axpy"});
1060 } else if (op_type == "GRU") {
1061 rmodel.AddBlasRoutines({"Gemm", "Axpy"});
1062 }
1063 }
1064
1065 std::vector<std::string> outputnames;
1066 for (int i=0; i < graph.output_size(); i++){
1067 outputnames.push_back(graph.output(i).name());
1068 }
1069 rmodel.AddOutputTensorNameList(outputnames);
1070
1071 return rmodel;
1072
1073}
1074
1075
1076
1077}//SOFIE
1078}//Experimental
1079}//TMVA
#define e(i)
Definition RSha256.hxx:103
int type
Definition TGX11.cxx:121
#define free
Definition civetweb.c:1539
#define malloc
Definition civetweb.c:1536
void AddOutputTensorNameList(std::vector< std::string > outputtensornames)
Definition RModel.cxx:145
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:98
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:125
void AddBlasRoutines(std::vector< std::string > routines)
Definition RModel.hxx:72
void AddNeededStdLib(std::string libname)
Definition RModel.hxx:77
void AddOperator(std::unique_ptr< ROperator > op, int order_execution=-1)
Definition RModel.cxx:117
Gated Recurrent Unit operator.
Long Short-Term Memory operator.
Recurrent Neural Network operator.
std::unique_ptr< ROperator > make_ROperator_Conv(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Gemm(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_LSTM(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Selu(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Pool(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_RNN(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unordered_map< std::string, std::unique_ptr< ROperator >(*)(const onnx::NodeProto &, const onnx::GraphProto &, std::unordered_map< std::string, ETensorType > &)> factoryMethodMap
std::unique_ptr< ROperator > make_ROperator(size_t idx, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Relu(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_GRU(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Reshape(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_BatchNormalization(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Sigmoid(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Add(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Slice(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Transpose(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
create variable transformations
Definition graph.py:1