Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RModel.cxx
Go to the documentation of this file.
1#include <limits>
2#include <algorithm>
3#include <cctype>
4#include <memory>
5#include <string>
6
7#include "TFile.h"
8
9#include "TMVA/RModel.hxx"
10#include "TMVA/SOFIE_common.hxx"
11
12namespace TMVA {
13namespace Experimental {
14namespace SOFIE {
15
16std::underlying_type_t<Options> operator|(Options opA, Options opB) {
17 return static_cast<std::underlying_type_t<Options>>(opA) | static_cast<std::underlying_type_t<Options>>(opB);
18}
19std::underlying_type_t<Options> operator|(std::underlying_type_t<Options> opA, Options opB) {
20 return opA | static_cast<std::underlying_type_t<Options>>(opB);
21}
22
24 fInputTensorInfos = std::move(other.fInputTensorInfos);
25 fReadyInputTensorInfos = std::move(other.fReadyInputTensorInfos);
26 fOutputTensorNames = other.fOutputTensorNames;
27 fInputTensorNames = other.fInputTensorNames;
28 fOperators = std::move(other.fOperators);
29 fInitializedTensors = std::move(other.fInitializedTensors);
30 fIntermediateTensorInfos = std::move(other.fIntermediateTensorInfos);
31 fName = other.fName;
32 fFileName = other.fFileName;
33 fParseTime = other.fParseTime;
34 fGC = other.fGC;
35 fNeededBlasRoutines = other.fNeededBlasRoutines;
36 fNeededStdLib = other.fNeededStdLib;
37}
38
40 fInputTensorInfos = std::move(other.fInputTensorInfos);
41 fReadyInputTensorInfos = std::move(other.fReadyInputTensorInfos);
42 fOutputTensorNames = other.fOutputTensorNames;
43 fInputTensorNames = other.fInputTensorNames;
44 fOperators = std::move(other.fOperators);
45 fInitializedTensors = std::move(other.fInitializedTensors);
46 fIntermediateTensorInfos = std::move(other.fIntermediateTensorInfos);
47 fName = other.fName;
48 fFileName = other.fFileName;
49 fParseTime = other.fParseTime;
50 fGC = other.fGC;
51 fNeededBlasRoutines = other.fNeededBlasRoutines;
52 fNeededStdLib = other.fNeededStdLib;
53 return *this;
54}
55
56const std::vector<size_t>& RModel::GetTensorShape(std::string name) {
57 auto f = fReadyInputTensorInfos.find(name);
58 if (f != fReadyInputTensorInfos.end()) {
59 return f->second.shape;
60 }
61 auto f2 = fInitializedTensors.find(name);
62 if (f2 != fInitializedTensors.end()) {
63 return f2->second.shape();
64 }
65 auto f3 = fInputTensorInfos.find(name);
66 if (f3 != fInputTensorInfos.end()) {
67 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] is an input tensor with unspecified dimension parameter");
68 }
69 auto f4 = fIntermediateTensorInfos.find(name);
70 if (f4 != fIntermediateTensorInfos.end()) {
71 return f4->second.shape;
72 }
74 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] is a dynamic tensor. Use GetDynamicTensorShape instead of GetTensorShape");
75
76 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] for which the shape is requested is not found");
77}
78
79std::vector<Dim> RModel::GetDynamicTensorShape(std::string name) {
80 if (auto f = fDynamicTensorInfos.find(name); f != fDynamicTensorInfos.end()) {
81 return f->second.shape;
82 }
83 if (auto f = fInputTensorInfos.find(name); f != fInputTensorInfos.end()) {
84 return f->second.shape;
85 }
86 // in case is not a dynamic tensor convert normal shape to Dim one
87 // for this we need to return the vector by value
89}
90
92 auto f = fReadyInputTensorInfos.find(name);
93 if (f != fReadyInputTensorInfos.end()) {
94 return f->second.type;
95 }
96 auto f2 = fInitializedTensors.find(name);
97 if (f2 != fInitializedTensors.end()) {
98 return f2->second.type();
99 }
100 auto f3 = fInputTensorInfos.find(name);
101 if (f3 != fInputTensorInfos.end()) {
102 return f3->second.type;
103 }
104 auto f4 = fIntermediateTensorInfos.find(name);
105 if (f4 != fIntermediateTensorInfos.end()) {
106 return f4->second.type;
107 }
108 auto f5 = fDynamicTensorInfos.find(name);
109 if (f5 != fDynamicTensorInfos.end()){
110 return f5->second.type;
111 }
112
113 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] for which the type is requested is not found");
114}
115
116bool RModel::CheckIfTensorAlreadyExist(std::string tensor_name) {
117 if (fReadyInputTensorInfos.find(tensor_name) != fReadyInputTensorInfos.end()) return true;
118 if (fInputTensorInfos.find(tensor_name) != fInputTensorInfos.end()) return true;
119 if (fInitializedTensors.find(tensor_name) != fInitializedTensors.end()) return true;
120 if (fIntermediateTensorInfos.find(tensor_name) != fIntermediateTensorInfos.end()) return true;
121 if (fDynamicTensorInfos.find(tensor_name) != fDynamicTensorInfos.end()) return true;
122 return false;
123}
124
125void RModel::AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<Dim> shape) {
126 input_name = UTILITY::Clean_name(input_name);
127 if (CheckIfTensorAlreadyExist(input_name)) {
128 throw std::runtime_error("TMVA-SOFIE: input tensor with name " + input_name + " already exists \n");
129 }
130
131 InputTensorInfo inputInfo { type, shape };
132 fInputTensorInfos[input_name] = inputInfo;
133}
134
135void RModel::AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<size_t> shape) {
136 input_name = UTILITY::Clean_name(input_name);
137 if (CheckIfTensorAlreadyExist(input_name)) {
138 throw std::runtime_error("TMVA-SOFIE: input tensor with name " + input_name + " already exists \n");
139 }
140 TensorInfo inputInfo { type, shape };
141 fReadyInputTensorInfos[input_name] = inputInfo;
142}
143
144void RModel::AddInputTensorName(std::string input_name) {
145 fInputTensorNames.push_back(UTILITY::Clean_name(input_name));
146}
147
148void RModel::AddOperator(std::unique_ptr<ROperator> op, int order_execution) {
149 AddBlasRoutines(op->GetBlasRoutines());
150 auto libs = op->GetStdLibs();
151 for (auto& stdlib : libs) {
152 AddNeededStdLib(stdlib);
153 }
154 if (order_execution >= 0) {
155 fOperators.insert(fOperators.begin() + order_execution, std::move(op));
156 } else {
157 fOperators.push_back(std::move(op));
158 }
159}
160
161void RModel::AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
162 tensor_name = UTILITY::Clean_name(tensor_name);
163 //NB: own data
164 if (CheckIfTensorAlreadyExist(tensor_name)) {
165 throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
166 }
167 InitializedTensor new_tensor {type, shape, data};
168 fInitializedTensors[tensor_name] = new_tensor;
169}
170
171void RModel::AddConstantTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
172 tensor_name = UTILITY::Clean_name(tensor_name);
173 //NB: own data
174 if (CheckIfTensorAlreadyExist(tensor_name)) {
175 throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
176 }
177 InitializedTensor new_tensor {type, shape, data, true}; // add here flag to specify is a constant tensor
178 fInitializedTensors[tensor_name] = new_tensor;
179}
180
181bool RModel::IsInitializedTensor(const std::string& tensorName) const {
182 std::string name = UTILITY::Clean_name(tensorName);
183 return fInitializedTensors.find(name) != fInitializedTensors.end();
184}
185
186bool RModel::IsDynamicTensor(const std::string& tensorName) const {
187 std::string name = UTILITY::Clean_name(tensorName);
188 return fDynamicTensorInfos.find(name) != fDynamicTensorInfos.end();
189}
190bool RModel::IsInputTensor(const std::string& tensorName) const {
191 std::string name = UTILITY::Clean_name(tensorName);
192 return fInputTensorInfos.find(name) != fInputTensorInfos.end();
193}
194
195// generic addition of a tensor
196void RModel::AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<Dim> dim_shape) {
197 auto int_shape = ConvertShapeToInt(dim_shape);
198 if (!int_shape.empty())
199 AddIntermediateTensor(tensor_name, type, int_shape);
200 else
201 AddDynamicTensor(tensor_name, type, dim_shape);
202}
203
204void RModel::AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape) {
205 tensor_name = UTILITY::Clean_name(tensor_name);
206 if (CheckIfTensorAlreadyExist(tensor_name)) {
207 throw std::runtime_error("TMVA-SOFIE: intermediate tensor with name " + tensor_name + " already exists \n");
208 }
209 TensorInfo new_tensor {type, shape};
210 fIntermediateTensorInfos[tensor_name] = new_tensor;
211}
212
213void RModel::AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector<Dim> shape){
214 tensor_name = UTILITY::Clean_name(tensor_name);
215 if (CheckIfTensorAlreadyExist(tensor_name)){
216 throw std::runtime_error("TMVA-SOFIE: intermediate tensor with name " + tensor_name + " already exists \n");
217 }
218 DynamicTensorInfo new_tensor {type, shape};
219 fDynamicTensorInfos[tensor_name] = new_tensor;
220 // store shape parameter if not existing
221 for (auto &d : shape) {
222 if (d.isParam) {
223 if (fShapeParams.count(d.param) == 0) {
224 // case parameter is an expression of some other existing parameter, no need to
225 // register it
226 if (d.dim != size_t(-1)) {
227 fShapeParams[d.param] = std::to_string(d.dim);
228 }
229 }
230 }
231 }
232}
233
234void RModel::AddOutputTensorNameList(std::vector<std::string> outputtensornames) {
235 fOutputTensorNames.clear();
236 for(auto& it : outputtensornames) {
238 }
239}
240
241void RModel::UpdateOutputTensorList(std::vector<std::string> curr_output_tensors, std::vector<std::string> new_output_tensors) {
242 for(auto& it:curr_output_tensors) {
243 fOutputTensorNames.erase(std::remove(fOutputTensorNames.begin(), fOutputTensorNames.end(), it), fOutputTensorNames.end());
244 }
245 fOutputTensorNames.insert(fOutputTensorNames.end(), new_output_tensors.begin(), new_output_tensors.end());
246}
247
248void RModel::UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
249 tensor_name = UTILITY::Clean_name(tensor_name);
250 if (!CheckIfTensorAlreadyExist(tensor_name)) {
251 throw std::runtime_error("TMVA-SOFIE: tensor " + tensor_name + " not found when trying to update it");
252 }
253 InitializedTensor new_tensor {type, shape, data};
254 fInitializedTensors[tensor_name] = new_tensor;
255}
256
257std::shared_ptr<void> RModel::GetInitializedTensorData(std::string tensor_name) {
258 auto f = fInitializedTensors.find(tensor_name);
259 if (f == fInitializedTensors.end()) {
260 throw std::runtime_error("TMVA-SOFIE: tensor " + tensor_name + " not found when trying to get its data");
261 } else {
262 return f->second.sharedptr();
263 }
264}
265
266void RModel::SetNotWritableInitializedTensor(const std::string & tensor_name) {
267 auto t = fInitializedTensors.find(tensor_name);
268 if (t == fInitializedTensors.end()) {
269 throw std::runtime_error("TMVA-SOFIE: initialized tensor " + tensor_name + " not found when trying to get its info");
270 }
271 t->second.SetNotWritable();
272 }
273
274void RModel::Initialize(int batchSize, bool verbose) {
275
277 fDynamicTensorInfos.clear();
278
279 // loop on inputs and see if shape can be full specified
280 // if the batch size is provided it can be used to specify the full shape
281 // Add the full specified tensors in fReadyInputTensors collection
282 auto originalInputTensorInfos = fInputTensorInfos; // need to copy because we may delete elements
283 for (auto &input : originalInputTensorInfos) {
284 if (verbose) std::cout << "looking at the tensor " << input.first << std::endl;
285 // if a batch size is provided convert batch size
286 // assume is parameterised as "bs" or "batch_size"
287 if (batchSize > 0) {
288 // std::vector<Dim> shape;
289 // shape.reserve(input.second.shape.size());
290 // assume first parameter is teh batch size
291 if (!input.second.shape.empty()) {
292 auto & d0 = input.second.shape[0];
293 if (d0.isParam) {
294 if (verbose) std::cout << "Fix the batch size to " << batchSize << std::endl;
295 d0 = Dim{static_cast<size_t>(batchSize)};
296 }
297 else { // look for cases that a bs or bath_size is specified in tensor shape
298 for (auto &d : input.second.shape) {
299 if (d.isParam && (d.param == "bs" || d.param == "batch_size")) {
300 d = Dim{static_cast<size_t>(batchSize)};
301 if (verbose) std::cout << "Input shape has bs or batch_size as names. Fix the batch size to " << batchSize << std::endl;
302 }
303 }
304 }
305 }
306 }
307 auto shape = ConvertShapeToInt(input.second.shape);
308 if (!shape.empty()) {
309 // remove from the tensor info old dynamic shape
310 fInputTensorInfos.erase(input.first);
311 // add to the ready input tensor information the new fixed shape
312 AddInputTensorInfo(input.first, input.second.type, shape);
313 }
314 // store the parameters of the input tensors
315 else {
316 // store the found parametric shape parameters
317 for (auto &d : input.second.shape) {
318 if (d.isParam)
319 fShapeParams[d.param] = std::to_string(d.dim);
320 }
321 }
322 }
323
324 if (verbose) {
327 }
328
329 // check if there are initialized tensors to write in a weight file
330 // support for the time being only weight of FLOAT type
331 if (fUseWeightFile) {
332 bool modelHasWeights = false;
333 for (auto &i : fInitializedTensors) {
334 if (i.second.type() == ETensorType::FLOAT) {
335 modelHasWeights = true;
336 break;
337 }
338 }
339 if (!modelHasWeights)
340 fUseWeightFile = false;
341 }
342 // Go through model and initialize each operator
343 int i = 0;
344 for (auto &op : fOperators) {
345 if (verbose) {
346 auto& r = *op.get();
347 std::cout << "Initializing operator " << i << " " << typeid(r).name() << std::endl;
348 }
349 op->Initialize(*this);
350 i++;
351 }
352}
353
355 if (!fInitializedTensors.empty())
356 fGC += "// initialized tensors\n";
357 for (auto& i: fInitializedTensors) {
358
359 size_t length = ConvertShapeToLength(i.second.shape());
360 // in case we are not using weight files or for tensor created from Constant operator
361 if (!fUseWeightFile || i.second.IsConstantTensor() ) {
362 //std::cout << "write tensor " << i.first << std::endl;
363 std::stringstream strs;
364 if (i.second.type() == ETensorType::FLOAT) {
365 strs << "float tensor_" << i.first << "[" << length << "] = {";
366 float const *data = i.second.data<float>();
367 for (size_t idx = 0; idx < length; idx++) {
368 strs << std::setprecision(std::numeric_limits<float>::max_digits10) << data[idx];
369 if (idx < length-1) strs << ", ";
370 }
371 strs << "};\n";
372 }
373 else if (i.second.type() == ETensorType::INT64) {
374 strs << "int64_t tensor_" << i.first << "[" << length << "] = {";
375 int64_t const *data = i.second.data<int64_t>();
376 for (size_t idx = 0; idx < length; idx++) {
377 strs << data[idx];
378 if (idx < length-1) strs << ", ";
379 }
380 strs << "};\n";
381 }
382 fGC += strs.str();
383 }
384 // case of tensors which are read from a file
385 else {
386 if (i.second.type() == ETensorType::FLOAT) {
387 fGC += "std::vector<float> fTensor_" + i.first + " = std::vector<float>(" + std::to_string(length) + ");\n";
388 fGC += "float * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
389 }
390 }
391 }
392}
393
395 if (!fIntermediateTensorInfos.empty()) {
396 fGC += "\n//--- declare and allocate the intermediate tensors\n";
397 for (auto &i : fIntermediateTensorInfos) {
398 size_t length = ConvertShapeToLength(i.second.shape);
399 if (i.second.type == ETensorType::FLOAT) {
400 fGC += "std::vector<float> fTensor_" + i.first + " = std::vector<float>(" + std::to_string(length) + ");\n";
401 fGC += "float * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
402 }
403 if (i.second.type == ETensorType::DOUBLE) {
404 fGC += "std::vector<double> fTensor_" + i.first + " = std::vector<double>(" + std::to_string(length) + ");\n";
405 fGC += "double * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
406 }
407 if (i.second.type == ETensorType::INT64) {
408 fGC += "std::vector<int64_t> fTensor_" + i.first + " = std::vector<int64_t>(" + std::to_string(length) + ");\n";
409 fGC += "int64_t * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
410 }
411 if (i.second.type == ETensorType::BOOL) {
412 fGC += "std::vector<bool> fTensor_" + i.first + " = std::vector<bool>(" + std::to_string(length) + ");\n";
413 // don't allocate pointer since boolean vector don't have the .data() member
414 }
415 }
416 }
417 // add also the dynamic tensors (only declarations, allocation will be done later)
418 if (!fDynamicTensorInfos.empty()) {
419 fGC += "//--- declare the dynamic tensors\n";
420 for (auto &i : fDynamicTensorInfos) {
421 if (i.second.type == ETensorType::FLOAT) {
422 fGC += "std::vector<float> fTensor_" + i.first + ";\n";
423 fGC += "float * tensor_" + i.first + " = nullptr;\n";
424 } else if (i.second.type == ETensorType::DOUBLE) {
425 fGC += "std::vector<double> fTensor_" + i.first + ";\n";
426 fGC += "double * tensor_" + i.first + " = nullptr;\n";
427 } else if (i.second.type == ETensorType::INT64) {
428 fGC += "std::vector<int64_t> fTensor_" + i.first + ";\n";
429 fGC += "int64_t * tensor_" + i.first + " = nullptr;\n";
430 }
431 }
432 }
433}
434
436 fGC += "//---- allocate the intermediate dynamic tensors\n";
437 std::stringstream out;
438 for (auto & i: fDynamicTensorInfos) {
439 auto length = ConvertDynamicShapeToLength(i.second.shape);
440 out << SP << "if (" << length << " > 0) {\n";
441 out << SP << SP << "fTensor_" << i.first << ".resize(" << length << ");\n";
442 out << SP << SP << "tensor_" << i.first << " = fTensor_" << i.first << ".data();\n";
443 out << SP << "}\n";
444 }
445 fGC += out.str();
446}
447
449
450 size_t outputSize = fOutputTensorNames.size();
451 // assume output types are all the same
452 if (outputSize == 0)
453 throw std::runtime_error("TMVA-SOFIE: output size=0 are not supported");
454
455 std::string outputType;
456 ETensorType eOutputType;
457 eOutputType = GetTensorType(fOutputTensorNames[0]);
458 outputType = ConvertTypeToString(eOutputType);
459 if (outputSize == 1) {
460 fGC += "std::vector<" + outputType + "> ";
461 } else {
462 // we assume all output types are the same
463 for (size_t i = 1; i < outputSize; i++) {
464 if (GetTensorType(fOutputTensorNames[i]) != eOutputType)
465 throw std::runtime_error("TMVA-SOFIE: different output tensor types are not supported");
466 }
467 fGC += "std::vector<std::vector<" + outputType + ">> ";
468 }
469
470 fGC += "infer(";
471
472 std::unordered_map<std::string, int> inputParams;
473 int i_input = 0;
474 for (auto &name : fInputTensorNames) {
475 // if is a dynamic tensor pass initial parameters
476 if (IsInputTensor(name)) {
477 auto shape = GetDynamicTensorShape(name);
478 for (auto &d : shape) {
479 std::string pName = d.param;
480 // need to check if the input parameters is already existing in another input tensor
481 if (d.isParam && inputParams.count(pName) == 0) {
482 fGC += "size_t " + d.param + ",";
483 inputParams[pName] = i_input;
484 }
485 }
486 }
487 switch (GetTensorType(name)) {
488 case ETensorType::FLOAT: {
489 fGC += "float* tensor_" + name + ",";
490 break;
491 }
492 case ETensorType::INT32: {
493 fGC += "int32_t* tensor_" + name + ",";
494 break;
495 }
496 case ETensorType::INT64: {
497 fGC += "int64_t* tensor_" + name + ",";
498 break;
499 }
500 case ETensorType::DOUBLE: {
501 fGC += "double* tensor_" + name + ",";
502 break;
503 }
504 case ETensorType::BOOL: {
505 fGC += "bool* tensor_" + name + ",";
506 break;
507 }
508 default: {
509 throw std::runtime_error("TMVA-SOFIE: input tensor " + name +
510 " is of a data type which is not yet supported.");
511 }
512 }
513 i_input++;
514 }
515
516 if (fInputTensorNames.size() > 0) fGC.pop_back();// remove last ","
517 fGC += "){\n";
518
519 for (size_t id = 0; id < fOperators.size(); id++) {
520 fGC += (fOperators[id]->Generate(std::to_string(id)));
521 }
522
523 if (outputSize == 1) {
524 std::string tensorName = fOutputTensorNames[0];
525 if (fIntermediateTensorInfos.count(tensorName) > 0) {
526 // need to check is size is the same(don't want to return a vector with larger size)
527 // in that case better to copy
528 fGC += SP + "return fTensor_" + tensorName + ";\n";
529 } else {
530 // include also dynamic tensors since the vectors can be allocated with a size larger than their output
531 // we need a special handling for bool type allocated as vector<bool>
532 auto outputLength = ConvertDynamicShapeToLength(GetDynamicTensorShape(tensorName));
533 if (IsDynamicTensor(tensorName) && eOutputType == ETensorType::BOOL) {
534 fGC += SP + "std::vector<bool> ret (fTensor_" + tensorName + ".begin(), fTensor_" + tensorName +
535 ".begin() + " + outputLength + ");\n";
536 } else {
537 fGC += SP + "std::vector<" + outputType + "> ret (tensor_" + tensorName + ", tensor_" + tensorName + " + " +
538 outputLength + ");\n";
539 }
540 fGC += SP + "return ret;\n";
541 }
542 } else {
543 // here we assume all outputs have same type
544 fGC += SP + "std::vector<std::vector<" + outputType + ">> ret({";
545 for (size_t i = 0; i < outputSize; i++) {
546 std::string tensorName = fOutputTensorNames[i];
547 if (!tensorName.empty()) {
548 if (fIntermediateTensorInfos.count(tensorName) > 0) {
549 fGC += "fTensor_" + tensorName;
550 } else {
551 auto outputLength = ConvertDynamicShapeToLength(GetDynamicTensorShape(tensorName));
552 if (IsDynamicTensor(tensorName) && eOutputType == ETensorType::BOOL) {
553 fGC += "std::vector<bool>(fTensor_" + tensorName + ".begin(), fTensor_" + tensorName + ".begin() + " +
554 outputLength + ");\n";
555 } else {
556 fGC += "std::vector<" + outputType + ">(tensor_" + tensorName + ", tensor_" + tensorName + " + " +
557 outputLength + ")";
558 }
559 }
560 if (i < outputSize - 1)
561 fGC += ",";
562 } else {
563 fGC += "{}";
564 }
565 }
566 fGC += "});\n";
567 fGC += SP + "return ret;\n";
568 }
569 fGC += "}\n";
570}
571
572void RModel::Generate(std::underlying_type_t<Options> options, int batchSize, long pos, bool verbose) {
573 // session flag is used in operator initialize
574 if (static_cast<std::underlying_type_t<Options>>(Options::kNoSession) & options) {
575 fUseSession = false;
577 }
578 if (static_cast<std::underlying_type_t<Options>>(Options::kNoWeightFile) & options) {
579 fUseWeightFile = false;
581 }
582 if (static_cast<std::underlying_type_t<Options>>(Options::kRootBinaryWeightFile) & options) {
583 fUseWeightFile = true;
585 }
586 if (fUseWeightFile && !fUseSession) {
587 throw
588 std::runtime_error("TMVA-SOFIE: RModel::Generate: cannot use a separate weight file without generating a Session class");
589 }
590
591 if (static_cast<std::underlying_type_t<Options>>(Options::kGNN) & options)
592 fIsGNN = true;
593 if (static_cast<std::underlying_type_t<Options>>(Options::kGNNComponent) & options)
594 fIsGNNComponent = true;
595
596 Initialize(batchSize, verbose);
597 std::string hgname;
598 if(!fIsGNNComponent) {
599 fGC.clear();
600 GenerateHeaderInfo(hgname);
601 if (fUseSession) {
602 fGC += "struct Session {\n";
603 }
604 }
605
608
609 if (fUseSession) {
610 // add here specific operator code that needs to define session data members
611 fGC += "\n";
612 for (size_t id = 0; id < fOperators.size(); id++) {
613 std::string opName = std::to_string(id);
614 fGC += fOperators[id]->GenerateSessionMembersCode(opName);
615 }
616 fGC += "\n";
617 // here add initialization and reading of weight tensors
618 if (fUseWeightFile) {
619 std::string fileName = fName;
621 fileName += ".dat";
622 }
624 fileName += ".root";
625 }
626 fGC += "Session(std::string filename =\"" + fileName + "\"";
627 } else {
628 // no need to pass weight file since it is not used
629 // keep passing a string for compatibility
630 fGC += "Session(std::string = \"\"";
631 }
632 // add initialization of shape parameters
633 // assume all parameters are of type size_t
634 if (!fShapeParams.empty()) {
635 for (auto & p : fShapeParams) {
636 fGC += ",\n";
637 fGC += " size_t " + p.first + " = " + p.second;
638 }
639 }
640 fGC += ") {\n";
641
642 if (fUseWeightFile) {
643 fGC += "\n//--- reading weights from file\n";
645 fGC += "\n";
646 //fUseWeightFile = fUseWeightFile;
647 }
648
649 // now we have passed the parameters we can allocate the dynamic tensors
651
652 // add here initialization code for operator
653 for (size_t id = 0; id < fOperators.size() ; id++) {
654 fGC += fOperators[id]->GenerateInitCode();
655 }
656
657 fGC += "}\n\n";
658 }
659
661
662 if(!fIsGNNComponent) {
663 if (fUseSession) {
664 fGC += "};\n";
665 }
666 fGC += ("} //TMVA_SOFIE_" + fName + "\n");
667 fGC += "\n#endif // " + hgname + "\n";
668 }
669}
670
672 // generate the code to read initialized tensors from a text data file
674 if (fInitializedTensors.empty()) return;
675
676 fGC += " std::ifstream f;\n";
677 fGC += " f.open(filename);\n";
678 fGC += " if (!f.is_open()) {\n";
679 fGC += " throw std::runtime_error(\"tmva-sofie failed to open file \" + filename + \" for input weights\");\n";
680 fGC += " }\n";
681
682 if(fIsGNNComponent) {
683 fGC += " f.seekg(" + std::to_string(pos) + ");\n";
684 }
685
686 fGC += " std::string tensor_name;\n";
687 fGC += " size_t length;\n";
688
689 // loop on tensors and parse the file
690 for (auto& i: fInitializedTensors) {
691 // skip Constant and shape tensors
692 if (!i.second.IsWeightTensor()) continue;
693 std::string tensor_name = "tensor_" + i.first;
694 if (i.second.type() == ETensorType::FLOAT) {
695 size_t length = 1;
696 length = ConvertShapeToLength(i.second.shape());
697 std::string slength = std::to_string(length);
698 fGC += " f >> tensor_name >> length;\n";
699 fGC += " if (tensor_name != \"" + tensor_name + "\" ) {\n";
700 fGC += " std::string err_msg = \"TMVA-SOFIE failed to read the correct tensor name; expected name is " +
701 tensor_name + " , read \" + tensor_name;\n";
702 fGC += " throw std::runtime_error(err_msg);\n";
703 fGC += " }\n";
704 fGC += " if (length != " + slength + ") {\n";
705 fGC += " std::string err_msg = \"TMVA-SOFIE failed to read the correct tensor size; expected size is " +
706 slength + " , read \" + std::to_string(length) ;\n";
707 fGC += " throw std::runtime_error(err_msg);\n";
708 fGC += " }\n";
709 fGC += " for (size_t i = 0; i < length; ++i)\n";
710 fGC += " f >> " + tensor_name + "[i];\n";
711 fGC += " if (f.fail()) {\n";
712 fGC += " throw std::runtime_error(\"TMVA-SOFIE failed to read the values for tensor " + tensor_name + "\");\n";
713 fGC += " }\n";
714 } else {
715 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be read from a file");
716 }
717 }
718 fGC += " f.close();\n";
719 }
720
721 // generate the code to read initialized tensors from a ROOT data file
723 fGC += " {\n";
724 fGC += " std::unique_ptr<TFile> rootFile(TFile::Open(filename.c_str(), \"READ\"));\n";
725 fGC += " if (!rootFile->IsOpen()) {\n";
726 fGC += " throw std::runtime_error(\"tmva-sofie failed to open ROOT file for input weights\");\n";
727 fGC += " }\n";
728
729 std::string dirName = fName + "_weights";
730 fGC += " if (!rootFile->GetKey(\"" + dirName + "\")) {\n";
731 fGC += " throw std::runtime_error(\"tmva-sofie failed to open ROOT directory for input weights\");\n";
732 fGC += " }\n";
733
734 for (auto &i : fInitializedTensors) {
735 // skip Constant and shape tensors
736 if (!i.second.IsWeightTensor()) continue;
737 fGC += " {\n";
738 std::string tensor_name = "tensor_" + i.first;
739 if (i.second.type() == ETensorType::FLOAT) {
740 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<float>*>(rootFile->Get(\"";
741 fGC += dirName + "/" + tensor_name + "\"));\n";
742 } else if (i.second.type() == ETensorType::DOUBLE) {
743 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<double>*>(rootFile->Get(\"";
744 fGC += dirName + + "/" + tensor_name + "\"));\n";
745 } else if (i.second.type() == ETensorType::INT64) {
746 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<int64_t>*>(rootFile->Get(\"";
747 fGC += dirName + "/" + tensor_name + "\"));\n";
748 } else {
749 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be read from a ROOT file");
750 }
751 fGC += " }\n";
752 }
753 fGC += " }\n";
754 }
755}
756
758 // Determine the file extension based on the weight file type
759 std::string fileExtension;
760 switch (fWeightFile) {
762 fileExtension = ".dat";
763 break;
765 fileExtension = ".root";
766 break;
768 fileExtension = ".dat";
769 break;
770 }
771
772 // If filename is empty, use the model name as the base filename
773 if (filename.empty()) {
774 filename = fFileName + fileExtension;
775 }
776
777 // Write the initialized tensors to the file
779 if(fIsGNNComponent || fIsGNN) {
780 throw std::runtime_error("SOFIE-GNN yet not supports writing to a ROOT file.");
781 }
782 std::unique_ptr<TFile> outputFile(TFile::Open(filename.c_str(), "UPDATE"));
783
784 std::string dirName = fName + "_weights";
785 // check if directory exists, in case delete to replace with new one
786 if (outputFile->GetKey(dirName.c_str()))
787 outputFile->rmdir(dirName.c_str());
788
789 auto outputDir = outputFile->mkdir(dirName.c_str());
790
791 for (const auto& item : fInitializedTensors) {
792 // skip Constant tensors and tensors which are not writable (e.g. shape tensors)
793 if (!item.second.IsWeightTensor()) continue;
794 std::string tensorName = "tensor_" + item.first;
795 size_t length = 1;
796 length = ConvertShapeToLength(item.second.shape());
797 if(item.second.type() == ETensorType::FLOAT) {
798 const float* data = item.second.data<float>();
799 std::vector<float> tensorDataVector(data, data + length);
800 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<float>", tensorName.c_str());
801 }
802 else if(item.second.type() == ETensorType::DOUBLE) {
803 const double* data = item.second.data<double>();
804 std::vector<double> tensorDataVector(data, data + length);
805 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<double>", tensorName.c_str());
806 }
807 else if(item.second.type() == ETensorType::INT64) {
808 const int64_t* data = item.second.data<int64_t>();
809 std::vector<int64_t> tensorDataVector(data, data + length);
810 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<int64_t>", tensorName.c_str());
811 }
812 else {
813 std::runtime_error("tmva-sofie tensor " + tensorName + " with type " + ConvertTypeToString(item.second.type()) +
814 " cannot be written to a ROOT file");
815 }
816 }
817 outputFile->Write(filename.c_str());
818
819 // this needs to be changed, similar to the text file
820 return -1;
821
822 } else if (fWeightFile == WeightFileType::Text) {
823 std::ofstream f;
824 if(fIsGNNComponent) {
825 // appending all GNN components into the same file
826 f.open(filename, std::ios::app);
827 } else {
828 f.open(filename);
829 }
830 if (!f.is_open())
831 throw
832 std::runtime_error("tmva-sofie failed to open file " + filename + " for tensor weight data");
833 for (auto& i: fInitializedTensors) {
834 // skip Constant tensors and not writable tensors (e.g. shape tensors)
835 if (!i.second.IsWeightTensor()) {
836 continue;
837 }
838 size_t length = ConvertShapeToLength(i.second.shape());
839 std::string tensor_name = "tensor_" + i.first;
840 f << tensor_name << " " << length << "\n";
841 if (i.second.type() == ETensorType::FLOAT) {
842 const float * data = i.second.data<float>();
843 for (size_t idx = 0; idx < length; idx++) {
844 // round to zero sub-normal values
845 float value = data[idx];
846 if (value != 0. && std::abs(value) < std::numeric_limits<float>::min() ) value = 0;
847 f << std::setprecision(std::numeric_limits<float>::max_digits10) << value;
848 f << ( (idx < length-1) ? " " : "\n" );
849 }
850 }
851 else {
852 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be written to a file");
853 }
854 if (f.fail())
855 std::runtime_error("tmva-sofie failed to write tensor data to file for " + tensor_name);
856 }
857 long curr_pos = f.tellp();
858 f.close();
859 return curr_pos;
860 } else {
861 return -1;
862 }
863}
864
866 std::cout << "Model requires following inputs:\n";
867 for (auto& inputInfo: fInputTensorInfos) {
868 std::cout << "Parametraised Tensor name: " << inputInfo.first << "\t";
869 std::cout << "type: " << ConvertTypeToString(inputInfo.second.type) << "\t";
870 std::cout << "shape: [";
871 for (size_t i = 0; i < inputInfo.second.shape.size(); i++) {
872 if (inputInfo.second.shape[i].isParam) {
873 std::cout << inputInfo.second.shape[i].param;
874 } else {
875 std::cout << inputInfo.second.shape[i].dim ;
876 }
877 if (i < inputInfo.second.shape.size() - 1) std::cout << ",";
878 }
879 std::cout << "]" << std::endl;
880 }
881
882 for (auto& inputInfo: fReadyInputTensorInfos) {
883 std::cout << "Fully Specified Tensor name: " << inputInfo.first << "\t";
884 std::cout << "type: " << ConvertTypeToString(inputInfo.second.type) << "\t";
885 std::cout << "shape: [";
886 for (size_t i = 0; i < inputInfo.second.shape.size(); i++) {
887 std::cout << inputInfo.second.shape[i];
888 if (i < inputInfo.second.shape.size() - 1) std::cout << ",";
889 }
890 std::cout << "]" << std::endl;
891 }
892 std::cout << "\n";
893}
894
896 std::cout << "Model initialized the following tensors:\n";
897 for (auto& it: fInitializedTensors) {
898 std::cout << "Tensor name: \"" << it.first << "\"\t";
899 std::cout << "type: " << ConvertTypeToString(it.second.type()) << "\t";
900 std::cout << "shape: [";
901 for (size_t i = 0; i < it.second.shape().size(); i++) {
902 std::cout << it.second.shape()[i];
903 if (i < it.second.shape().size() - 1) std::cout << ",";
904 }
905 std::cout << "]";
906 if (it.second.IsConstantTensor()) std::cout << " (Constant)";
907 else if (!it.second.IsWeightTensor()) std::cout << " (Not Writable)";
908 std::cout << std::endl;
909 }
910 std::cout << "\n";
911}
912
914 std::cout << "Model specify the following intermediate tensors:\n";
915 for (auto& it: fIntermediateTensorInfos) {
916 std::cout << "Tensor name: \"" << it.first << "\"\t";
917 std::cout << "type: " << ConvertTypeToString(it.second.type) << "\t";
918 std::cout << "shape: [";
919 for (size_t i = 0; i < it.second.shape.size(); i++) {
920 std::cout << it.second.shape[i];
921 if (i < it.second.shape.size() - 1) std::cout << ",";
922 }
923 std::cout << "]" << std::endl;
924 }
925 std::cout << "\n";
926}
927
929 std::cout << "Model specify the following dynamic tensors:\n";
930 for (auto& it: fDynamicTensorInfos) {
931 std::cout << "Tensor name: \"" << it.first << "\"\t";
932 std::cout << "type: " << ConvertTypeToString(it.second.type) << "\t";
933 std::cout << "shape: [";
934 for (size_t i = 0; i < it.second.shape.size(); i++) {
935 std::cout << it.second.shape[i].GetVal();
936 if (i < it.second.shape.size() - 1) std::cout << ",";
937 }
938 std::cout << "]" << std::endl;
939 }
940 std::cout << "\n";
941}
942
944 std::cout << "Model specify the following output tensors:\n";
945 for (auto& it: fOutputTensorNames) {
946 std::cout << "Tensor name: \"" << it << "\"\t";
947 if (!IsDynamicTensor(it))
948 std::cout << "shape: " << ConvertShapeToString(GetTensorShape(it)) << std::endl;
949 else
950 std::cout << "shape: " << ConvertDynamicShapeToString(GetDynamicTensorShape(it)) << std::endl;
951 }
952 std::cout << "\n";
953}
954
955void RModel::HeadInitializedTensors(std::string name, int n_print) {
956 auto it = fInitializedTensors.find(name);
957 if (it == fInitializedTensors.end()) {
958 std::cout << "Tensor " << name << " not found in model's initialized tensor list" << std::endl;
959 return;
960 }
961
962 std::cout << "Tensor name: " << it->first << "\t";
963 std::cout << "type: " << ConvertTypeToString(it->second.type()) << "\t";
964 int length =1;
965 std::cout << "shape: [";
966 for (size_t i = 0; i < it->second.shape().size(); i++) {
967 std::cout << it->second.shape()[i];
968 length *= it->second.shape()[i];
969 if (i < it->second.shape().size() - 1) std::cout << ",";
970 }
971 std::cout << "]" << std::endl;
972 bool ellipsis = true;
973 if (n_print > length) {
974 n_print = length;
975 ellipsis = false;
976 }
977
978 std::cout << "data: [" << std::endl;
979 if (it->second.type() == ETensorType::FLOAT) {
980 auto converted_data = it->second.data<float>();
981 for (int i =0; i < n_print; i++) {
982 std::cout << converted_data[i];
983 if (i < n_print - 1) std::cout << " ,";
984 }
985 }
986 if (ellipsis) std::cout << ", ...";
987 std::cout << "]" << std::endl;
988
989}
990
991void RModel::OutputGenerated(std::string filename, bool append) {
992
994
995 // write weights in a text file
996 if (fUseWeightFile) {
997 if (!filename.empty()) {
998 size_t pos = filename.find(".hxx");
1000 filename.replace(pos, 4, ".dat");
1002 filename = filename.erase(pos, 4);
1003 filename += ".root";
1004 }
1005 } else {
1006 filename = fName;
1007 filename += fWeightFile == WeightFileType::Text ? ".dat" : ".root";
1008 }
1010 }
1011}
1012
1013void RModel::Streamer(TBuffer &R__b) {
1014 if (R__b.IsReading()) {
1015 RModel::Class()->ReadBuffer(R__b, this);
1016 for(auto i=RModel::fInitializedTensors.begin(); i!=RModel::fInitializedTensors.end(); ++i) {
1017 i->second.CastPersistentToShared();
1018 }
1019 }
1020 else {
1021 for(auto i=RModel::fInitializedTensors.begin(); i!=RModel::fInitializedTensors.end(); ++i) {
1022 i->second.CastSharedToPersistent();
1023 }
1024 RModel::Class()->WriteBuffer(R__b, this);
1025 }
1026}
1027
1028}//SOFIE
1029}//Experimental
1030}//TMVA
#define d(i)
Definition RSha256.hxx:102
#define f(i)
Definition RSha256.hxx:104
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char filename
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t r
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize id
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
char name[80]
Definition TGX11.cxx:110
Buffer base class used for serializing objects.
Definition TBuffer.h:43
Bool_t IsReading() const
Definition TBuffer.h:86
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
Definition TFile.cxx:4089
void GenerateHeaderInfo(std::string &hgname)
std::unordered_set< std::string > fNeededBlasRoutines
void OutputGenerated(std::string filename="", bool append=false)
std::unordered_set< std::string > fNeededStdLib
void AddBlasRoutines(std::vector< std::string > routines)
void AddNeededStdLib(std::string libname)
const ETensorType & GetTensorType(std::string name)
Definition RModel.cxx:91
std::unordered_map< std::string, DynamicTensorInfo > fDynamicTensorInfos
Definition RModel.hxx:20
bool IsDynamicTensor(const std::string &name) const
Definition RModel.cxx:186
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
Definition RModel.cxx:196
std::vector< Dim > GetDynamicTensorShape(std::string name)
Definition RModel.cxx:79
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:116
std::vector< std::unique_ptr< ROperator > > fOperators
Definition RModel.hxx:26
void OutputGenerated(std::string filename="", bool append=false)
Definition RModel.cxx:991
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:125
std::unordered_map< std::string, TensorInfo > fIntermediateTensorInfos
Definition RModel.hxx:19
void AddOutputTensorNameList(std::vector< std::string > output_tensor_names)
Definition RModel.cxx:234
std::unordered_map< std::string, TensorInfo > fReadyInputTensorInfos
Definition RModel.hxx:17
void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:171
void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:213
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:161
RModel & operator=(RModel &&other)
Definition RModel.cxx:39
void AddInputTensorName(std::string name)
Definition RModel.cxx:144
std::vector< std::string > fOutputTensorNames
Definition RModel.hxx:23
bool IsInitializedTensor(const std::string &name) const
Definition RModel.cxx:181
void AddOperator(std::unique_ptr< ROperator > op, int order_execution=-1)
Definition RModel.cxx:148
RModel()=default
Default constructor.
void HeadInitializedTensors(std::string name, int n_print=50)
Definition RModel.cxx:955
void Initialize(int batchSize=-1, bool verbose=false)
Definition RModel.cxx:274
const std::vector< size_t > & GetTensorShape(std::string name)
Definition RModel.cxx:56
bool IsInputTensor(const std::string &name) const
Definition RModel.cxx:190
long WriteInitializedTensorsToFile(std::string filename="")
Definition RModel.cxx:757
void Generate(std::underlying_type_t< Options > options, int batchSize=-1, long pos=0, bool verbose=false)
Definition RModel.cxx:572
std::unordered_map< std::string, InputTensorInfo > fInputTensorInfos
Definition RModel.hxx:16
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
Definition RModel.cxx:257
std::unordered_map< std::string, std::string > fShapeParams
Definition RModel.hxx:22
void SetNotWritableInitializedTensor(const std::string &tensor_name)
Definition RModel.cxx:266
std::vector< std::string > fInputTensorNames
Definition RModel.hxx:24
std::unordered_map< std::string, InitializedTensor > fInitializedTensors
Definition RModel.hxx:18
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:248
void UpdateOutputTensorList(std::vector< std::string > curr_output_tensor, std::vector< std::string > modify_output_tensor)
Definition RModel.cxx:241
std::string Clean_name(std::string input_tensor_name)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
std::underlying_type_t< Options > operator|(Options opA, Options opB)
Definition RModel.cxx:16
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations