2#include "onnx_proto3.pb.h"
14std::unique_ptr<ROperator>
make_ROperator_Transpose(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
15std::unique_ptr<ROperator>
make_ROperator_Relu(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
16std::unique_ptr<ROperator>
make_ROperator_Selu(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
17std::unique_ptr<ROperator>
make_ROperator_Sigmoid(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
18std::unique_ptr<ROperator>
make_ROperator_Gemm(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
19std::unique_ptr<ROperator>
make_ROperator_Conv(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
20std::unique_ptr<ROperator>
make_ROperator_RNN(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
21std::unique_ptr<ROperator>
make_ROperator_LSTM(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
22std::unique_ptr<ROperator>
make_ROperator_BatchNormalization(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
23std::unique_ptr<ROperator>
make_ROperator_Pool(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
24std::unique_ptr<ROperator>
make_ROperator_Add(
const onnx::NodeProto &nodeproto,
const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
25std::unique_ptr<ROperator>
make_ROperator_Reshape(
const onnx::NodeProto &nodeproto,
const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
26std::unique_ptr<ROperator>
make_ROperator_Slice(
const onnx::NodeProto &nodeproto,
const onnx::GraphProto &graphproto, std::unordered_map<std::string, ETensorType> &tensor_type);
27std::unique_ptr<ROperator>
make_ROperator_GRU(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type);
30using factoryMethodMap = std::unordered_map<std::string, std::unique_ptr<ROperator> (*)(
const onnx::NodeProto&,
const onnx::GraphProto&, std::unordered_map<std::string, ETensorType>&)>;
56std::unique_ptr<ROperator>
make_ROperator(
size_t idx,
const onnx::GraphProto& graphproto, std::unordered_map<std::string, ETensorType>& tensor_type){
57 const auto& nodeproto = graphproto.node(idx);
60 throw std::runtime_error(
"TMVA::SOFIE - Operator type " + nodeproto.op_type() +
" is not yet supported");
65 return (find->second)(nodeproto, graphproto, tensor_type);
69std::unique_ptr<ROperator>
make_ROperator_Add(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& , std::unordered_map<std::string, ETensorType>& tensor_type){
73 for (
int i = 0; i < 2; ++i) {
74 auto input_name = nodeproto.input(i);
75 auto it = tensor_type.find(input_name);
76 if (it != tensor_type.end()){
78 if (i == 0) input_type = it->second;
80 assert(it->second == input_type);
82 throw std::runtime_error(
"TMVA::SOFIE ONNX Parser Add op has input tensor" + input_name +
" but its type is not yet registered");
86 std::unique_ptr<ROperator> op;
93 throw std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator Add does not yet support input type " + std::to_string(
static_cast<int>(input_type)));
96 ETensorType output_type = (op->TypeInference({input_type}))[0];
97 auto it2 = tensor_type.find(nodeproto.output(0));
98 if (it2 == tensor_type.end()){
99 tensor_type[nodeproto.output(0)] = output_type;
104std::unique_ptr<ROperator>
make_ROperator_Transpose(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& , std::unordered_map<std::string, ETensorType>& tensor_type){
108 auto input_name = nodeproto.input(0);
109 auto it = tensor_type.find(input_name);
110 if (it != tensor_type.end()){
111 input_type = it->second;
113 throw std::runtime_error(
"TMVA::SOFIE ONNX Parser tranpose op has input tensor" + input_name +
" but its type is not yet registered");
116 std::unique_ptr<ROperator> op;
117 std::vector<int_t> attr_perm;
119 if (nodeproto.attribute_size() == 1){
120 attr_perm.assign(nodeproto.attribute(0).ints().begin(), nodeproto.attribute(0).ints().end());
125 if (!attr_perm.empty()){
132 throw std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator Transpose does not yet support input type " + std::to_string(
static_cast<int>(input_type)));
135 ETensorType output_type = (op->TypeInference({input_type}))[0];
136 auto it2 = tensor_type.find(nodeproto.output(0));
137 if (it2 == tensor_type.end()){
138 tensor_type[nodeproto.output(0)] = output_type;
144std::unique_ptr<ROperator>
make_ROperator_Relu(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& , std::unordered_map<std::string, ETensorType>& tensor_type){
148 auto input_name = nodeproto.input(0);
149 auto it = tensor_type.find(input_name);
150 if (it != tensor_type.end()){
151 input_type = it->second;
153 throw std::runtime_error(
"TMVA::SOFIE ONNX Parser relu op has input tensor" + input_name +
" but its type is not yet registered");
156 std::unique_ptr<ROperator> op;
164 throw std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator Relu does not yet support input type " + std::to_string(
static_cast<int>(input_type)));
167 ETensorType output_type = (op->TypeInference({input_type}))[0];
168 auto it2 = tensor_type.find(nodeproto.output(0));
169 if (it2 == tensor_type.end()){
170 tensor_type[nodeproto.output(0)] = output_type;
176std::unique_ptr<ROperator>
make_ROperator_Selu(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& , std::unordered_map<std::string, ETensorType>& tensor_type){
180 auto input_name = nodeproto.input(0);
181 auto it = tensor_type.find(input_name);
182 if (it != tensor_type.end()){
183 input_type = it->second;
185 throw std::runtime_error(
"TMVA::SOFIE ONNX Parser selu op has input tensor" + input_name +
" but its type is not yet registered");
188 std::unique_ptr<ROperator> op;
196 throw std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator Selu does not yet support input type " + std::to_string(
static_cast<int>(input_type)));
199 ETensorType output_type = (op->TypeInference({input_type}))[0];
200 auto it2 = tensor_type.find(nodeproto.output(0));
201 if (it2 == tensor_type.end()){
202 tensor_type[nodeproto.output(0)] = output_type;
208std::unique_ptr<ROperator>
make_ROperator_Sigmoid(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& , std::unordered_map<std::string, ETensorType>& tensor_type){
212 auto input_name = nodeproto.input(0);
213 auto it = tensor_type.find(input_name);
214 if (it != tensor_type.end()){
215 input_type = it->second;
217 throw std::runtime_error(
"TMVA::SOFIE ONNX Parser Sigmoid op has input tensor" + input_name +
" but its type is not yet registered");
220 std::unique_ptr<ROperator> op;
228 throw std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator Sigmoid does not yet support input type " + std::to_string(
static_cast<int>(input_type)));
231 ETensorType output_type = (op->TypeInference({input_type}))[0];
232 auto it2 = tensor_type.find(nodeproto.output(0));
233 if (it2 == tensor_type.end()){
234 tensor_type[nodeproto.output(0)] = output_type;
240std::unique_ptr<ROperator>
make_ROperator_Gemm(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& , std::unordered_map<std::string, ETensorType>& tensor_type){
244 auto input_name = nodeproto.input(0);
245 auto it = tensor_type.find(input_name);
246 if (it != tensor_type.end()){
247 input_type = it->second;
249 throw std::runtime_error(
"TMVA::SOFIE ONNX Parser gemm op has input tensor" + input_name +
" but its type is not yet registered");
252 std::unique_ptr<ROperator> op;
254 float attr_alpha =1.0;
255 float attr_beta =1.0;
256 int_t attr_transA =0;
257 int_t attr_transB =0;
259 for (
int i = 0; i < nodeproto.attribute_size(); i++){
260 std::string attribute_name = nodeproto.attribute(i).name();
261 if (attribute_name ==
"alpha"){
262 attr_alpha = nodeproto.attribute(i).f();
263 }
else if(attribute_name ==
"beta"){
264 attr_beta = nodeproto.attribute(i).f();
265 }
else if(attribute_name ==
"transA"){
266 attr_transA = nodeproto.attribute(i).i();
267 if (attr_transA != 0 && attr_transA != 1)
throw std::runtime_error(
"TMVA::SOFIE Error - Model Loading - attribute transA in Operator Gemm not 0/1");
268 }
else if(attribute_name ==
"transB"){
269 attr_transB = nodeproto.attribute(i).i();
270 if (attr_transB != 0 && attr_transB != 1)
throw std::runtime_error(
"TMVA::SOFIE Error - Model Loading - attribute transB in Operator Gemm not 0/1");
272 std::cout <<
"TMVA::SOFIE Warning - Model Loading - Attribute " << attribute_name <<
" in OperatorNode " << nodeproto.name() <<
" is not defined in ONNX IR and not applied!\n";
279 if (nodeproto.input_size() == 2){
280 op.reset(
new ROperator_Gemm<float>(attr_alpha, attr_beta, attr_transA, attr_transB, nodeproto.input(0), nodeproto.input(1), nodeproto.output(0)));
282 op.reset(
new ROperator_Gemm<float>(attr_alpha, attr_beta, attr_transA, attr_transB, nodeproto.input(0), nodeproto.input(1), nodeproto.input(2), nodeproto.output(0)));
286 throw std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator Relu does not yet support input type " + std::to_string(
static_cast<int>(input_type)));
289 ETensorType output_type = (op->TypeInference({input_type, input_type}))[0];
290 auto it2 = tensor_type.find(nodeproto.output(0));
291 if (it2 == tensor_type.end()){
292 tensor_type[nodeproto.output(0)] = output_type;
297std::unique_ptr<ROperator>
make_ROperator_GRU(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& , std::unordered_map<std::string, ETensorType>& tensor_type) {
301 auto input_name = nodeproto.input(0);
302 auto it = tensor_type.find(input_name);
303 if (it != tensor_type.end()) {
304 input_type = it->second;
307 std::runtime_error(
"TMVA::SOFIE ONNX Parser GRU op has input tensor " + input_name +
" but its type is not yet registered");
310 std::unique_ptr<ROperator> op;
312 std::vector<float> attr_activation_alpha;
313 std::vector<float> attr_activation_beta;
314 std::vector<std::string> attr_activations;
315 float attr_clip = 0.;
316 std::string attr_direction =
"forward";
317 size_t attr_hidden_size = 0;
318 size_t attr_layout = 0;
319 size_t attr_linear_before_reset = 0;
321 for (
int_t i = 0; i < nodeproto.attribute_size(); i++) {
322 std::string attribute_name = nodeproto.attribute(i).name();
323 if (attribute_name ==
"activation_alpha") {
324 attr_activation_alpha = {nodeproto.attribute(i).floats().begin(), nodeproto.attribute(i).floats().end()};
325 }
else if (attribute_name ==
"activation_beta") {
326 attr_activation_beta = {nodeproto.attribute(i).floats().begin(), nodeproto.attribute(i).floats().end()};
327 }
else if (attribute_name ==
"activations") {
328 attr_activations = {nodeproto.attribute(i).strings().begin(), nodeproto.attribute(i).strings().end()};
329 }
else if (attribute_name ==
"clip") {
330 attr_clip = nodeproto.attribute(i).f();
331 }
else if (attribute_name ==
"direction") {
332 attr_direction = nodeproto.attribute(i).s();
333 }
else if (attribute_name ==
"hidden_size") {
334 attr_hidden_size = nodeproto.attribute(i).i();
335 }
else if (attribute_name ==
"layout") {
336 attr_layout = nodeproto.attribute(i).i();
337 }
else if (attribute_name ==
"linear_before_reset") {
338 attr_linear_before_reset = nodeproto.attribute(i).i();
340 std::cout <<
"TMVA SOFIE Warning - Model Loading - Attribute " << attribute_name <<
" in OperatorNode " << nodeproto.name() <<
" is not defined in ONNX IR and not applied!\n";
346 std::string name_sequence_lens;
347 std::string name_initial_h;
349 std::string name_y_h;
350 if (nodeproto.input_size() > 3) {
351 name_b = nodeproto.input(3);
353 if (nodeproto.input_size() > 4) {
354 name_sequence_lens = nodeproto.input(4);
356 if (nodeproto.input_size() > 5) {
357 name_initial_h = nodeproto.input(5);
359 if (nodeproto.output_size() > 0) {
360 name_y = nodeproto.output(0);
362 if (nodeproto.output_size() > 1) {
363 name_y_h = nodeproto.output(1);
368 op.reset(
new ROperator_GRU<float>(attr_activation_alpha, attr_activation_beta, attr_activations,
369 attr_clip, attr_direction, attr_hidden_size, attr_layout, attr_linear_before_reset,
370 nodeproto.input(0), nodeproto.input(1), nodeproto.input(2),
371 name_b, name_sequence_lens, name_initial_h,
376 std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator GRU does not yet support input type " + std::to_string(
static_cast<int>(input_type)));
379 auto output_type = op->TypeInference({input_type, input_type});
380 for (
size_t i = 0; i < 2; i++) {
381 if (tensor_type.find(nodeproto.output(i)) == tensor_type.end()) {
382 tensor_type[nodeproto.output(i)] = output_type[i];
389std::unique_ptr<ROperator>
make_ROperator_Conv(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& , std::unordered_map<std::string, ETensorType>& tensor_type) {
393 auto input_name = nodeproto.input(0);
394 auto it = tensor_type.find(input_name);
395 if (it != tensor_type.end()) {
396 input_type = it->second;
399 std::runtime_error(
"TMVA::SOFIE ONNX Parser Conv op has input tensor " + input_name +
" but its type is not yet registered");
402 std::unique_ptr<ROperator> op;
404 std::string attr_auto_pad =
"NOTSET";
405 std::vector<size_t> attr_dilations;
406 size_t attr_group = 0;
407 std::vector<size_t> attr_kernel_shape;
408 std::vector<size_t> attr_pads;
409 std::vector<size_t> attr_strides;
411 for (
int_t i = 0; i < nodeproto.attribute_size(); i++) {
412 std::string attribute_name = nodeproto.attribute(i).name();
413 if (attribute_name ==
"auto_pad") {
414 attr_auto_pad = nodeproto.attribute(i).s();
415 }
else if (attribute_name ==
"dilations") {
416 attr_dilations = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
417 }
else if (attribute_name ==
"group") {
418 attr_group= nodeproto.attribute(i).i();
419 }
else if (attribute_name ==
"kernel_shape") {
420 attr_kernel_shape = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
421 }
else if (attribute_name ==
"pads") {
422 attr_pads = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
423 }
else if (attribute_name ==
"strides") {
424 attr_strides = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
426 std::cout <<
"TMVA::SOFIE Warning - Model Loading - Attribute " << attribute_name <<
" in OperatorNode " << nodeproto.name() <<
" is not defined in ONNX IR and not applied!\n";
430 std::string name_b =
"";
431 if (nodeproto.input_size() > 2) {
432 name_b = nodeproto.input(2);
437 op.reset(
new ROperator_Conv<float>(attr_auto_pad, attr_dilations, attr_group, attr_kernel_shape, attr_pads, attr_strides, nodeproto.input(0), nodeproto.input(1), name_b, nodeproto.output(0)));
441 std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator Conv does not yet support input type " + std::to_string(
static_cast<int>(input_type)));
444 ETensorType output_type = (op->TypeInference({input_type, input_type}))[0];
445 auto it2 = tensor_type.find(nodeproto.output(0));
446 if (it2 == tensor_type.end()) {
447 tensor_type[nodeproto.output(0)] = output_type;
453std::unique_ptr<ROperator>
make_ROperator_Pool(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& , std::unordered_map<std::string, ETensorType>& tensor_type) {
458 if (nodeproto.op_type() ==
"MaxPool")
460 else if (nodeproto.op_type() ==
"AveragePool")
462 else if (nodeproto.op_type() ==
"GlobalAveragePool")
467 auto input_name = nodeproto.input(0);
468 auto it = tensor_type.find(input_name);
469 if (it != tensor_type.end()) {
470 input_type = it->second;
473 std::runtime_error(
"TMVA::SOFIE ONNX Parser Pool op has input tensor " + input_name +
" but its type is not yet registered");
476 std::unique_ptr<ROperator> op;
488 for (
int_t i = 0; i < nodeproto.attribute_size(); i++) {
489 std::string attribute_name = nodeproto.attribute(i).name();
490 if (attribute_name ==
"auto_pad") {
491 attr.
auto_pad = nodeproto.attribute(i).s();
492 }
else if (attribute_name ==
"ceil_mode") {
493 attr.
ceil_mode = nodeproto.attribute(i).i();
494 }
else if (attribute_name ==
"count_include_pad" && op_mode ==
AveragePool) {
496 }
else if (attribute_name ==
"storage_order" && op_mode ==
MaxPool) {
498 }
else if (attribute_name ==
"dilations" && op_mode ==
MaxPool) {
499 attr.
dilations = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
500 }
else if (attribute_name ==
"kernel_shape") {
501 attr.
kernel_shape = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
502 }
else if (attribute_name ==
"pads") {
503 attr.
pads = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
504 }
else if (attribute_name ==
"strides") {
505 attr.
strides = std::vector<size_t>({nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()});
507 std::cout <<
"TMVA::SOFIE Warning - Model Loading - Attribute " << attribute_name <<
" in OperatorNode "
508 << nodeproto.name() <<
" is not defined in ONNX IR and not applied!\n";
518 std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator Pool does not yet support input type " + std::to_string(
static_cast<int>(input_type)));
521 ETensorType output_type = (op->TypeInference({input_type}))[0];
522 auto it2 = tensor_type.find(nodeproto.output(0));
523 if (it2 == tensor_type.end()) {
524 tensor_type[nodeproto.output(0)] = output_type;
531 const onnx::GraphProto & ,
532 std::unordered_map<std::string, ETensorType> &tensor_type)
539 if (nodeproto.op_type() ==
"Flatten")
541 else if (nodeproto.op_type() ==
"Squeeze")
543 else if (nodeproto.op_type() ==
"Unsqueeze")
551 auto input_name = nodeproto.input(0);
553 auto shape_name = (opMode ==
Reshape || opMode ==
Unsqueeze) ? nodeproto.input(1) :
"";
554 auto it = tensor_type.find(input_name);
555 if (it != tensor_type.end()) {
556 input_type = it->second;
558 throw std::runtime_error(
"TMVA::SOFIE ONNX Parser Reshape op has input tensor" + input_name +
559 " but its type is not yet registered");
565 std::unique_ptr<ROperator> op;
566 int attr_value = (opMode ==
Reshape) ? 0 : 1;
567 if (opMode ==
Reshape && nodeproto.attribute_size() > 0 )
568 attr_value = nodeproto.attribute(0).i();
570 std::vector<int64_t> attr_axes = {};
571 if (nodeproto.input_size() == 1 && (opMode ==
Squeeze || opMode ==
Unsqueeze)) {
572 std::string attribute_name = nodeproto.attribute(0).name();
573 if (attribute_name ==
"axes")
574 attr_axes = {nodeproto.attribute(0).ints().begin(), nodeproto.attribute(0).ints().end()};
577 switch (input_type) {
579 if (attr_axes.empty())
585 throw std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator Reshape does not yet support input type " +
586 std::to_string(
static_cast<int>(input_type)));
589 ETensorType output_type = (op->TypeInference({input_type}))[0];
590 auto it2 = tensor_type.find(nodeproto.output(0));
591 if (it2 == tensor_type.end()) {
592 tensor_type[nodeproto.output(0)] = output_type;
599 const onnx::GraphProto & ,
600 std::unordered_map<std::string, ETensorType> &tensor_type)
605 auto input_name = nodeproto.input(0);
606 auto it = tensor_type.find(input_name);
607 if (it != tensor_type.end()) {
608 input_type = it->second;
610 throw std::runtime_error(
"TMVA::SOFIE ONNX Parser Slice op has input tensor" + input_name +
611 " but its type is not yet registered");
614 std::vector<std::string> axisTensorNames;
615 if (nodeproto.input_size() > 1)
616 axisTensorNames.push_back(nodeproto.input(1));
617 if (nodeproto.input_size() > 2)
618 axisTensorNames.push_back(nodeproto.input(1));
619 if (nodeproto.input_size() > 3)
620 axisTensorNames.push_back(nodeproto.input(3));
621 if (nodeproto.input_size() > 4)
622 axisTensorNames.push_back(nodeproto.input(4));
630 std::vector<int64_t> attr_starts = {};
631 std::vector<int64_t> attr_ends = {};
632 std::vector<int64_t> attr_axes = {};
633 if (nodeproto.input_size() == 1) {
634 for (
int_t i = 0; i < nodeproto.attribute_size(); i++) {
635 std::string attribute_name = nodeproto.attribute(i).name();
636 if (attribute_name ==
"starts")
637 attr_starts = {nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()};
638 if (attribute_name ==
"ends")
639 attr_ends = {nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()};
640 if (attribute_name ==
"axes")
641 attr_axes = {nodeproto.attribute(i).ints().begin(), nodeproto.attribute(i).ints().end()};
645 std::unique_ptr<ROperator> op;
646 switch (input_type) {
648 if (axisTensorNames.size() > 0) {
655 throw std::runtime_error(
656 "TMVA::SOFIE - Unsupported - Operator Slice has invalid input type for input axis descriptors " +
657 std::to_string(
static_cast<int>(axis_type)));
658 }
else if (attr_starts.size() > 0 && attr_ends.size() > 0) {
662 throw std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator Slice has invalid attribues");
666 throw std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator Slice does not yet support input type " +
667 std::to_string(
static_cast<int>(input_type)));
670 ETensorType output_type = (op->TypeInference({input_type}))[0];
671 auto it2 = tensor_type.find(nodeproto.output(0));
672 if (it2 == tensor_type.end()) {
673 tensor_type[nodeproto.output(0)] = output_type;
679std::unique_ptr<ROperator>
make_ROperator_RNN(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& , std::unordered_map<std::string, ETensorType>& tensor_type) {
683 auto input_name = nodeproto.input(0);
684 auto it = tensor_type.find(input_name);
685 if (it != tensor_type.end()) {
686 input_type = it->second;
689 std::runtime_error(
"TMVA::SOFIE ONNX Parser RNN op has input tensor " + input_name +
" but its type is not yet registered");
692 std::unique_ptr<ROperator> op;
694 std::vector<float> attr_activation_alpha = {};
695 std::vector<float> attr_activation_beta = {};
696 std::vector<std::string> attr_activations = {};
697 float attr_clip = 0.;
698 std::string attr_direction =
"forward";
699 size_t attr_hidden_size = 0;
700 size_t attr_layout = 0;
702 for (
int_t i = 0; i < nodeproto.attribute_size(); i++) {
703 std::string attribute_name = nodeproto.attribute(i).name();
704 if (attribute_name ==
"activation_alpha") {
705 attr_activation_alpha = {nodeproto.attribute(i).floats().begin(), nodeproto.attribute(i).floats().end()};
706 }
else if (attribute_name ==
"activation_beta") {
707 attr_activation_beta = {nodeproto.attribute(i).floats().begin(), nodeproto.attribute(i).floats().end()};
708 }
else if (attribute_name ==
"activations") {
709 attr_activations = {nodeproto.attribute(i).strings().begin(), nodeproto.attribute(i).strings().end()};
710 }
else if (attribute_name ==
"clip") {
711 attr_clip = nodeproto.attribute(i).i();
712 }
else if (attribute_name ==
"direction") {
713 attr_direction = nodeproto.attribute(i).s();
714 }
else if (attribute_name ==
"hidden_size") {
715 attr_hidden_size = nodeproto.attribute(i).i();
716 }
else if (attribute_name ==
"layout") {
717 attr_layout = nodeproto.attribute(i).i();
719 std::cout <<
"TMVA SOFIE Warning - Model Loading - Attribute " << attribute_name <<
" in OperatorNode " << nodeproto.name() <<
" is not defined in ONNX IR and not applied!\n";
724 std::string name_b =
"";
725 std::string name_sequence_lens =
"";
726 std::string name_initial_h =
"";
727 std::string name_y =
"";
728 std::string name_y_h =
"";
729 if (nodeproto.input_size() > 3) {
730 name_b = nodeproto.input(3);
732 if (nodeproto.input_size() > 4) {
733 name_sequence_lens = nodeproto.input(4);
735 if (nodeproto.input_size() > 5) {
736 name_initial_h = nodeproto.input(5);
738 if (nodeproto.output_size() > 0) {
739 name_y = nodeproto.output(0);
741 if (nodeproto.output_size() > 1) {
742 name_y_h = nodeproto.output(1);
747 op.reset(
new ROperator_RNN<float>(attr_activation_alpha, attr_activation_beta, attr_activations,
748 attr_clip, attr_direction, attr_hidden_size, attr_layout,
749 nodeproto.input(0), nodeproto.input(1), nodeproto.input(2),
750 name_b, name_sequence_lens, name_initial_h, name_y, name_y_h));
754 std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator RNN does not yet support input type " + std::to_string(
static_cast<int>(input_type)));
757 auto output_type = op->TypeInference({input_type, input_type});
758 for (
size_t i = 0; i < 2; i++) {
759 if (tensor_type.find(nodeproto.output(i)) == tensor_type.end()) {
760 tensor_type[nodeproto.output(i)] = output_type[i];
767std::unique_ptr<ROperator>
make_ROperator_LSTM(
const onnx::NodeProto& nodeproto,
const onnx::GraphProto& , std::unordered_map<std::string, ETensorType>& tensor_type) {
771 auto input_name = nodeproto.input(0);
772 auto it = tensor_type.find(input_name);
773 if (it != tensor_type.end()) {
774 input_type = it->second;
777 std::runtime_error(
"TMVA::SOFIE ONNX Parser LSTM op has input tensor " + input_name +
" but its type is not yet registered");
780 std::unique_ptr<ROperator> op;
782 std::vector<float> attr_activation_alpha;
783 std::vector<float> attr_activation_beta;
784 std::vector<std::string> attr_activations;
785 float attr_clip = 0.;
786 std::string attr_direction =
"forward";
787 size_t attr_hidden_size = 0;
788 size_t attr_input_forget = 0;
789 size_t attr_layout = 0;
791 for (
int_t i = 0; i < nodeproto.attribute_size(); i++) {
792 std::string attribute_name = nodeproto.attribute(i).name();
793 if (attribute_name ==
"activation_alpha") {
794 attr_activation_alpha = {nodeproto.attribute(i).floats().begin(), nodeproto.attribute(i).floats().end()};
795 }
else if (attribute_name ==
"activation_beta") {
796 attr_activation_beta = {nodeproto.attribute(i).floats().begin(), nodeproto.attribute(i).floats().end()};
797 }
else if (attribute_name ==
"activations") {
798 attr_activations = {nodeproto.attribute(i).strings().begin(), nodeproto.attribute(i).strings().end()};
799 }
else if (attribute_name ==
"clip") {
800 attr_clip = nodeproto.attribute(i).f();
801 }
else if (attribute_name ==
"direction") {
802 attr_direction = nodeproto.attribute(i).s();
803 }
else if (attribute_name ==
"hidden_size") {
804 attr_hidden_size = nodeproto.attribute(i).i();
805 }
else if (attribute_name ==
"input_forget") {
806 attr_input_forget = nodeproto.attribute(i).i();
807 }
else if (attribute_name ==
"layout") {
808 attr_layout = nodeproto.attribute(i).i();
810 std::cout <<
"TMVA SOFIE Warning - Model Loading - Attribute " << attribute_name <<
" in OperatorNode " << nodeproto.name() <<
" is not defined in ONNX IR and not applied!\n";
816 std::string name_sequence_lens;
817 std::string name_initial_h;
818 std::string name_initial_c;
821 std::string name_y_h;
822 std::string name_y_c;
823 if (nodeproto.input_size() > 3) {
824 name_b = nodeproto.input(3);
826 if (nodeproto.input_size() > 4) {
827 name_sequence_lens = nodeproto.input(4);
829 if (nodeproto.input_size() > 5) {
830 name_initial_h = nodeproto.input(5);
832 if (nodeproto.input_size() > 6) {
833 name_initial_c = nodeproto.input(6);
835 if (nodeproto.input_size() > 7) {
836 name_p = nodeproto.input(7);
838 if (nodeproto.output_size() > 0) {
839 name_y = nodeproto.output(0);
841 if (nodeproto.output_size() > 1) {
842 name_y_h = nodeproto.output(1);
844 if (nodeproto.output_size() > 2) {
845 name_y_c = nodeproto.output(2);
851 attr_clip, attr_direction, attr_hidden_size, attr_input_forget, attr_layout,
852 nodeproto.input(0), nodeproto.input(1), nodeproto.input(2),
853 name_b, name_sequence_lens, name_initial_h, name_initial_c, name_p,
854 name_y, name_y_h, name_y_c));
858 std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator LSTM does not yet support input type " + std::to_string(
static_cast<int>(input_type)));
861 auto output_type = op->TypeInference({input_type, input_type});
862 for (
size_t i = 0; i < 2; i++) {
863 if (tensor_type.find(nodeproto.output(i)) == tensor_type.end()) {
864 tensor_type[nodeproto.output(i)] = output_type[i];
871 const onnx::GraphProto &,
872 std::unordered_map<std::string, ETensorType> &tensor_type)
877 auto input_name = nodeproto.input(0);
878 auto it = tensor_type.find(input_name);
879 if (it != tensor_type.end()) {
880 input_type = it->second;
882 throw std::runtime_error(
"TMVA::SOFIE ONNX Parser BatchNorm op has input tensor " + input_name +
883 " but its type is not yet registered");
886 std::unique_ptr<ROperator> op;
887 float fepsilon = 1
e-05;
888 float fmomentum = 0.9;
889 std::size_t ftraining_mode = 0;
893 if (nodeproto.input_size() == 5) {
894 op.reset(
new ROperator_BatchNormalization<float>(fepsilon, fmomentum, ftraining_mode, nodeproto.input(0), nodeproto.input(1), nodeproto.input(2), nodeproto.input(3), nodeproto.input(4), nodeproto.output(0)));
899 std::runtime_error(
"TMVA::SOFIE - Unsupported - Operator BatchNorm does not yet support input type " + std::to_string(
static_cast<int>(input_type)));
902 ETensorType output_type = (op->TypeInference({input_type, input_type, input_type, input_type, input_type}))[0];
903 auto it2 = tensor_type.find(nodeproto.output(0));
904 if (it2 == tensor_type.end()) {
905 tensor_type[nodeproto.output(0)] = output_type;
920 size_t isep = filename.rfind(sep, filename.length());
921 std::string filename_nodir = filename;
922 if (isep != std::string::npos){
923 filename_nodir = (filename.substr(isep+1, filename.length() - isep));
928 std::time_t ttime = std::time(0);
929 std::tm* gmt_time = std::gmtime(&ttime);
930 std::string parsetime (std::asctime(gmt_time));
935 GOOGLE_PROTOBUF_VERIFY_VERSION;
937 onnx::ModelProto model;
938 RModel rmodel(filename_nodir, parsetime);
940 std::unordered_map<std::string, ETensorType> tensor_type;
942 std::fstream input(filename, std::ios::in | std::ios::binary);
943 if (!model.ParseFromIstream(&input)){
944 throw std::runtime_error(
"TMVA::SOFIE - Failed to parse onnx file");
947 const onnx::GraphProto&
graph = model.graph();
948 google::protobuf::ShutdownProtobufLibrary();
953 std::unordered_set<std::string> initializer_names;
954 for (
int i=0; i <
graph.initializer_size(); i++){
955 initializer_names.insert(
graph.initializer(i).name());
959 for (
int i=0; i <
graph.input_size(); i++){
961 tensor_type[
graph.input(i).name()] =
static_cast<ETensorType>(
graph.input(i).type().tensor_type().elem_type());
963 if (initializer_names.find(
graph.input(i).name()) != initializer_names.end())
continue;
966 const onnx::ValueInfoProto& valueinfoproto =
graph.input(i);
967 std::string input_name = valueinfoproto.name();
971 throw std::runtime_error(
"TMVA::SOFIE Data type in input tensor " + input_name +
" not supported!\n");
974 std::vector<Dim> fShape;
975 bool existParam =
false;
976 if (!valueinfoproto.type().tensor_type().has_shape())
throw std::runtime_error(
"TMVA::SOFIE datanode with no shape restrictions is not supported yet");
977 for (
int j = 0; j < valueinfoproto.type().tensor_type().shape().dim_size(); j++){
979 if (valueinfoproto.type().tensor_type().shape().dim(j).value_case() == onnx::TensorShapeProto_Dimension::ValueCase::kDimValue){
980 dim.
dim = valueinfoproto.type().tensor_type().shape().dim(j).dim_value();
981 }
else if (valueinfoproto.type().tensor_type().shape().dim(j).value_case() == onnx::TensorShapeProto_Dimension::ValueCase::kDimParam){
984 dim.
param = valueinfoproto.type().tensor_type().shape().dim(j).dim_param();
986 throw std::runtime_error(
"TMVA::SOFIE ONNX file error: Valueinfoproto " + input_name +
" has neither dim_value nor dim_param! \n");
988 fShape.push_back(dim);
990 if (valueinfoproto.type().tensor_type().shape().dim_size() == 0){
993 fShape.push_back(dim);
997 std::vector<size_t> fShape_sizet;
998 for (
auto& j: fShape){
999 fShape_sizet.push_back(j.dim);
1009 for (
int i=0; i <
graph.initializer_size(); i++){
1010 onnx::TensorProto* tensorproto =
const_cast<onnx::TensorProto*
>(&
graph.initializer(i));
1011 std::vector<std::size_t> fShape;
1012 std::size_t fLength = 1;
1013 for (
int j = 0; j < tensorproto->dims_size(); j++){
1014 fShape.push_back(tensorproto->dims(j));
1015 fLength *= tensorproto->dims(j);
1018 std::string input_name =
graph.initializer(i).name();
1023 std::shared_ptr<void> data(
malloc(fLength *
sizeof(
float)),
free);
1025 if (tensorproto->raw_data().empty() ==
false){
1026 auto raw_data_ptr =
reinterpret_cast<float*
>(
const_cast<char*
>(tensorproto->raw_data().c_str()));
1027 std::memcpy(data.get(), raw_data_ptr, fLength *
sizeof(
float));
1029 tensorproto->mutable_float_data()->ExtractSubrange(0, tensorproto->float_data_size(),
static_cast<float*
>(data.get()));
1035 default:
throw std::runtime_error(
"Data type in weight tensor " +
graph.initializer(i).name() +
" not supported!\n");
1041 for (
int i=0; i <
graph.node_size(); i++){
1047 std::string op_type =
graph.node(i).op_type();
1048 if (op_type ==
"Gemm") {
1050 }
else if (op_type ==
"Conv") {
1052 }
else if (op_type ==
"RNN") {
1054 }
else if (op_type ==
"Selu" || op_type ==
"Sigmoid") {
1056 }
else if (op_type ==
"LSTM") {
1058 }
else if (op_type ==
"BatchNormalization") {
1060 }
else if (op_type ==
"GRU") {
1065 std::vector<std::string> outputnames;
1066 for (
int i=0; i <
graph.output_size(); i++){
1067 outputnames.push_back(
graph.output(i).name());
RModel Parse(std::string filename)
void AddOutputTensorNameList(std::vector< std::string > outputtensornames)
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector< Dim > shape)
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
void AddBlasRoutines(std::vector< std::string > routines)
void AddNeededStdLib(std::string libname)
void AddOperator(std::unique_ptr< ROperator > op, int order_execution=-1)
Gated Recurrent Unit operator.
Long Short-Term Memory operator.
Recurrent Neural Network operator.
std::unique_ptr< ROperator > make_ROperator_Conv(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Gemm(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_LSTM(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Selu(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Pool(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_RNN(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unordered_map< std::string, std::unique_ptr< ROperator >(*)(const onnx::NodeProto &, const onnx::GraphProto &, std::unordered_map< std::string, ETensorType > &)> factoryMethodMap
std::unique_ptr< ROperator > make_ROperator(size_t idx, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Relu(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_GRU(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Reshape(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_BatchNormalization(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Sigmoid(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Add(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
std::unique_ptr< ROperator > make_ROperator_Slice(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
const factoryMethodMap mapOptypeOperator
std::unique_ptr< ROperator > make_ROperator_Transpose(const onnx::NodeProto &nodeproto, const onnx::GraphProto &graphproto, std::unordered_map< std::string, ETensorType > &tensor_type)
create variable transformations
std::vector< size_t > pads
std::vector< size_t > kernel_shape
std::vector< size_t > dilations
std::vector< size_t > strides