Logo ROOT  
Reference Guide
 
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
Loading...
Searching...
No Matches
RModel.cxx
Go to the documentation of this file.
1#include <limits>
2#include <algorithm>
3#include <cctype>
4#include <memory>
5#include <string>
6
7#ifdef SOFIE_SUPPORT_ROOT_BINARY
8#include "TFile.h"
9#endif
10
11#include "TMVA/RModel.hxx"
12#include "TMVA/SOFIE_common.hxx"
13
14namespace TMVA {
15namespace Experimental {
16namespace SOFIE {
17
18namespace {
19const std::string SP = " ";
20}
21
22std::underlying_type_t<Options> operator|(Options opA, Options opB) {
23 return static_cast<std::underlying_type_t<Options>>(opA) | static_cast<std::underlying_type_t<Options>>(opB);
24}
25std::underlying_type_t<Options> operator|(std::underlying_type_t<Options> opA, Options opB) {
26 return opA | static_cast<std::underlying_type_t<Options>>(opB);
27}
28
29const std::vector<size_t>& RModel::GetTensorShape(std::string name) const {
30 auto f = fReadyInputTensorInfos.find(name);
31 if (f != fReadyInputTensorInfos.end()) {
32 return f->second.shape;
33 }
34 auto f2 = fInitializedTensors.find(name);
35 if (f2 != fInitializedTensors.end()) {
36 return f2->second.shape();
37 }
38 auto f3 = fInputTensorInfos.find(name);
39 if (f3 != fInputTensorInfos.end()) {
40 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] is an input tensor with unspecified dimension parameter");
41 }
42 auto f4 = fIntermediateTensorInfos.find(name);
43 if (f4 != fIntermediateTensorInfos.end()) {
44 return f4->second.shape;
45 }
47 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] is a dynamic tensor. Use GetDynamicTensorShape instead of GetTensorShape");
48
51
52 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] for which the shape is requested is not found");
53}
54
55std::vector<Dim> RModel::GetDynamicTensorShape(std::string name) const {
56 if (auto f = fDynamicTensorInfos.find(name); f != fDynamicTensorInfos.end()) {
57 return f->second.shape;
58 }
59 if (auto f = fInputTensorInfos.find(name); f != fInputTensorInfos.end()) {
60 return f->second.shape;
61 }
62 // in case is not a dynamic tensor convert normal shape to Dim one
63 // for this we need to return the vector by value
65}
66
67const ETensorType& RModel::GetTensorType(std::string name) const {
68 auto f = fReadyInputTensorInfos.find(name);
69 if (f != fReadyInputTensorInfos.end()) {
70 return f->second.type;
71 }
72 auto f2 = fInitializedTensors.find(name);
73 if (f2 != fInitializedTensors.end()) {
74 return f2->second.type();
75 }
76 auto f3 = fInputTensorInfos.find(name);
77 if (f3 != fInputTensorInfos.end()) {
78 return f3->second.type;
79 }
80 auto f4 = fIntermediateTensorInfos.find(name);
81 if (f4 != fIntermediateTensorInfos.end()) {
82 return f4->second.type;
83 }
84 auto f5 = fDynamicTensorInfos.find(name);
85 if (f5 != fDynamicTensorInfos.end()){
86 return f5->second.type;
87 }
88
91
92 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] for which the type is requested is not found, model name: " + fName);
93}
94
95bool RModel::CheckIfTensorAlreadyExist(std::string tensor_name) {
96 if (fReadyInputTensorInfos.find(tensor_name) != fReadyInputTensorInfos.end()) return true;
97 if (fInputTensorInfos.find(tensor_name) != fInputTensorInfos.end()) return true;
98 if (fInitializedTensors.find(tensor_name) != fInitializedTensors.end()) return true;
99 if (fIntermediateTensorInfos.find(tensor_name) != fIntermediateTensorInfos.end()) return true;
100 if (fDynamicTensorInfos.find(tensor_name) != fDynamicTensorInfos.end()) return true;
102 return false;
103}
104
105void RModel::AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<Dim> shape) {
108 throw std::runtime_error("TMVA-SOFIE: input tensor with name " + input_name + " already exists \n");
109 }
110
111 InputTensorInfo inputInfo { type, shape };
113}
114
115void RModel::AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<size_t> shape) {
118 throw std::runtime_error("TMVA-SOFIE: input tensor with name " + input_name + " already exists \n");
119 }
120 TensorInfo inputInfo { type, shape };
122}
123
127
128void RModel::AddOperator(std::unique_ptr<ROperator> op, int order_execution) {
129 AddBlasRoutines(op->GetBlasRoutines());
130 auto libs = op->GetStdLibs();
131 auto op_input_tensors = op->GetOpInputTensors();
132 for (auto& stdlib : libs) {
134 }
135 if (order_execution >= 0) {
136 fOperators.insert(fOperators.begin() + order_execution, std::move(op));
137 } else {
138 fOperators.push_back(std::move(op));
139 }
140
141 // storing the last usage of tensors which are input to
142 // operators (but are not inputs to the model, i.e. they are intermediate
143 // tensors). This information is needed to keep a check on when a
144 // particular intermediate tensor can be flushed to free up memory for reuse.
145 for(size_t index = 0; index<op_input_tensors.size() &&
147 std::find(fInputTensorNames.begin(), fInputTensorNames.end(),
150 ++index){
152 }
153}
154
155void RModel::AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
156 tensor_name = UTILITY::Clean_name(tensor_name);
157 //NB: own data
158 if (CheckIfTensorAlreadyExist(tensor_name)) {
159 throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
160 }
162 fInitializedTensors[tensor_name] = new_tensor;
163}
164
165void RModel::AddConstantTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
166 tensor_name = UTILITY::Clean_name(tensor_name);
167 //NB: own data
168 if (CheckIfTensorAlreadyExist(tensor_name)) {
169 throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
170 }
171 InitializedTensor new_tensor {type, shape, data, true}; // add here flag to specify is a constant tensor
172 fInitializedTensors[tensor_name] = new_tensor;
173}
174
175bool RModel::IsInitializedTensor(const std::string& tensorName) const {
176 std::string name = UTILITY::Clean_name(tensorName);
177 return fInitializedTensors.find(name) != fInitializedTensors.end();
178}
179bool RModel::IsConstantTensor(const std::string& tensorName) const {
180 std::string name = UTILITY::Clean_name(tensorName);
181 auto itr = fInitializedTensors.find(name);
182 if (itr == fInitializedTensors.end()) return false;
183 return itr->second.IsConstantTensor();
184}
185
186bool RModel::IsDynamicTensor(const std::string& tensorName) const {
187 std::string name = UTILITY::Clean_name(tensorName);
188 return fDynamicTensorInfos.find(name) != fDynamicTensorInfos.end();
189}
190bool RModel::IsDimInputTensor(const std::string& tensorName) const {
191 std::string name = UTILITY::Clean_name(tensorName);
192 return fInputTensorInfos.find(name) != fInputTensorInfos.end();
193}
194bool RModel::IsReadyInputTensor(const std::string& tensorName) const {
195 std::string name = UTILITY::Clean_name(tensorName);
197}
198
199// generic addition of a tensor
200void RModel::AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<Dim> dim_shape) {
202 if (!int_shape.empty())
203 AddIntermediateTensor(tensor_name, type, int_shape);
204 else
205 AddDynamicTensor(tensor_name, type, dim_shape);
206}
207
208void RModel::AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape) {
209 tensor_name = UTILITY::Clean_name(tensor_name);
210 if (CheckIfTensorAlreadyExist(tensor_name)) {
211 throw std::runtime_error("TMVA-SOFIE: intermediate tensor with name " + tensor_name + " already exists \n");
212 }
213 TensorInfo new_tensor {type, shape};
215}
216
217void RModel::AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector<Dim> shape){
218 tensor_name = UTILITY::Clean_name(tensor_name);
219 if (CheckIfTensorAlreadyExist(tensor_name)){
220 throw std::runtime_error("TMVA-SOFIE: intermediate tensor with name " + tensor_name + " already exists \n");
221 }
223 fDynamicTensorInfos[tensor_name] = new_tensor;
224 // store shape parameter if not existing
225 for (auto &d : shape) {
226 if (d.isParam) {
227 if (fShapeParams.count(d.param) == 0) {
228 // case parameter is an expression of some other existing parameter, no need to
229 // register it
230 if (d.dim != size_t(-1)) {
231 fShapeParams[d.param] = std::to_string(d.dim);
232 }
233 }
234 }
235 }
236}
237
239 fOutputTensorNames.clear();
240 for(auto& it : outputtensornames) {
241 fOutputTensorNames.emplace_back(UTILITY::Clean_name(it));
242 }
243}
244
245void RModel::UpdateOutputTensorList(std::vector<std::string> curr_output_tensors, std::vector<std::string> new_output_tensors) {
246 for(auto& it:curr_output_tensors) {
247 fOutputTensorNames.erase(std::remove(fOutputTensorNames.begin(), fOutputTensorNames.end(), it), fOutputTensorNames.end());
248 }
250}
251
252void RModel::UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
253 tensor_name = UTILITY::Clean_name(tensor_name);
254 if (!CheckIfTensorAlreadyExist(tensor_name)) {
255 throw std::runtime_error("TMVA-SOFIE: tensor " + tensor_name + " not found when trying to update it");
256 }
258 fInitializedTensors[tensor_name] = new_tensor;
259}
260
261std::shared_ptr<void> RModel::GetInitializedTensorData(std::string tensor_name) {
262 auto f = fInitializedTensors.find(tensor_name);
263 if (f == fInitializedTensors.end()) {
264 throw std::runtime_error("TMVA-SOFIE: tensor " + tensor_name + " not found when trying to get its data");
265 } else {
266 return f->second.sharedptr();
267 }
268}
269
270void RModel::SetNotWritableInitializedTensor(const std::string & tensor_name) {
271 auto t = fInitializedTensors.find(tensor_name);
272 if (t == fInitializedTensors.end()) {
273 throw std::runtime_error("TMVA-SOFIE: initialized tensor " + tensor_name + " not found when trying to get its info");
274 }
275 t->second.SetNotWritable();
276 }
277
278std::string RModel::AllocateIntermediateMemory(std::span<const std::string_view> op_output_tensors)
279{
280 std::stringstream code;
281
282 auto declareIntermediateTensor = [this, &code](std::string const &name, int size, int location) {
283 std::string typeName = ConvertTypeToString(GetTensorType(name));
284 code << "\n // Allocating memory for intermediate tensor " << name << " with size " << size << " bytes";
285 code << "\n"
286 << typeName << "* tensor_" << name << " = reinterpret_cast<" << typeName
287 << "*>(fIntermediateMemoryPool.data() + " << location << ");\n";
288 };
289
290 for (auto &it : op_output_tensors) {
291 std::string name = std::string{it};
292 bool allocated = false;
295 fDynamicTensorInfos.find(name) != fDynamicTensorInfos.end()) continue;
296
298
300
301 // check if available memory chunks can accommodate the tensor
302 if (chunk->second >= tensor_size) {
303 auto new_chunk = fIntermediateMemoryInfo.total_stack[chunk->first].split(it, tensor_size);
304 auto new_chunk_location = chunk->first+chunk->second-tensor_size;
306
308 chunk->second -= tensor_size;
309
310 allocated = true;
311
312 if (chunk->second == 0) {
314 }
315
316 break;
317 }
318 ++chunk;
319 }
320
321 if (!allocated) {
323 ? 0
324 : fIntermediateMemoryInfo.total_stack.rbegin()->first + fIntermediateMemoryInfo.total_stack.rbegin()->second.tensor_size;
325
326 fIntermediateMemoryInfo.total_stack[chunk_idx] = {it, tensor_size};
327
329 }
330 }
331 return code.str();
332}
333
334void RModel::CheckAndFlushIntermediateMemory(std::span<const std::string_view> op_input_tensors, const size_t& op_idx){
335 for (auto &it : op_input_tensors){
336 // last occurence of the tensor is reached => flush it from memory
338 for (auto chunk = fIntermediateMemoryInfo.total_stack.begin();
340 if (chunk->second.tensor_name == it) {
341
342 // check if nearby chunks in available memory can coalesce
343 auto first_greater = fIntermediateMemoryInfo.available_stack.upper_bound(chunk->first); // smallest element greater than the flushed chunk idx
344 auto last_smaller = (first_greater == fIntermediateMemoryInfo.available_stack.begin()) ? fIntermediateMemoryInfo.available_stack.end() : std::prev(first_greater); // largest element smaller than the flushed chunk idx
345
346 // check if the next stack entry is actually adjacent in memory
347 if (last_smaller->first+last_smaller->second + 1 == chunk->first){
348 last_smaller->second += chunk->second.tensor_size;
350
351 if (last_smaller->first + last_smaller->second + 1 == first_greater->first){
354 }
355 } else{
356 if (chunk->first + chunk->second.tensor_size + 1 == first_greater->first){
359 }
361 chunk->first,
362 chunk->second.tensor_size
363 });
364 }
365 }
366 }
367 }
368 }
369}
370
371
372
373void RModel::Initialize(int batchSize, bool verbose) {
374 std::map<std::string, size_t> inputParams;
375 if (batchSize > 0) {
376 inputParams["input_size"] = batchSize;
377 inputParams["batch_size"] = batchSize;
378 inputParams["bs"] = batchSize;
379 }
380 Initialize(inputParams, verbose);
382}
383void RModel::Initialize(const std::map<std::string, size_t> & inputParams, bool verbose) {
384
385 fVerbose = int(verbose);
386
387 if (fIsInitialized) {
388 if (verbose)
389 std::cout << "Model is already initialized - skip initialization " << std::endl;
390 return;
391 }
393 fDynamicTensorInfos.clear();
394
395 // loop on inputs and see if shape can be full specified
396 // if the batch size is provided it can be used to specify the full shape
397 // Add the full specified tensors in fReadyInputTensors collection
398 auto originalInputTensorInfos = fInputTensorInfos; // need to copy because we may delete elements
399 for (auto &input : originalInputTensorInfos) {
400 if (verbose) std::cout << "looking at the tensor " << input.first << std::endl;
401 // if a parameter (e.g. batch_size) is specified use for converting parametric shape in defined one
402 if (!inputParams.empty()) {
403 for (auto &d : input.second.shape) {
404 if (d.isParam) {
405 std::string pname = d.param;
406 if (pname == input.first + "_size") pname = "input_size";
407 auto itr = inputParams.find(pname);
408 if (itr != inputParams.end() ) {
409 d = Dim{ itr->second };
410 if (verbose)
411 std::cout << "Tensor: " << input.first << " - fix parametric shape " << itr->first << " to " << itr->second << std::endl;
412 }
413 }
414 }
415 }
416 // see if shape now is fully defined
417 auto shape = ConvertShapeToInt(input.second.shape);
418 if (verbose)
419 std::cout << "converting input shape for " << input.first << " " << ConvertShapeToString(shape) << " from "
420 << ConvertDynamicShapeToString(input.second.shape) << std::endl;
421 if (!shape.empty()) {
422 // case shape is defined (not parametric) we add the tensor in the fReadyInputTensorInfos map and
423 // we remove the tensor from the fInputTensorInfo where th eold parametric shape was stored
424 fInputTensorInfos.erase(input.first);
425 // add to the ready input tensor information the new fixed shape
426 AddInputTensorInfo(input.first, input.second.type, shape);
427 // check consistency
429 }
430 // store the parameters of the input tensors
431 else {
432 // store the found parametric shape parameters
433 for (auto &d : input.second.shape) {
434 if (d.isParam)
435 fShapeParams[d.param] = std::to_string(d.dim);
436 }
437 }
438 }
439
440 if (verbose) {
443 }
444
445 // check if there are initialized tensors to write in a weight file
446 // support for the time being only weight of FLOAT type
447 if (fUseWeightFile) {
448 bool modelHasWeights = false;
449 for (auto &i : fInitializedTensors) {
450 if (i.second.type() == ETensorType::FLOAT) {
451 modelHasWeights = true;
452 break;
453 }
454 }
455 if (!modelHasWeights)
456 fUseWeightFile = false;
457 }
458 // Go through model and initialize each operator
459 int i = 0;
460
461 std::vector<size_t> temp_available_stack; // vector stores individual chunks of available memory that maybe reused
462
463 for(size_t op_idx = 0; op_idx < fOperators.size(); ++op_idx){
464 if (verbose) {
465 auto& r = *fOperators[op_idx].get();
466 std::cout << "Initializing operator " << i << " " << typeid(r).name() << std::endl;
467 }
468 fOperators[op_idx]->Initialize(*this);
469 for(auto &it:fOperators[op_idx]->GetOpOutputTensors()){
470 std::string name = std::string{it};
472 std::find(fOutputTensorNames.begin(), fOutputTensorNames.end(), name) == fOutputTensorNames.end() &&
476 }
477 }
478 i++;
479 }
480
481 fIsInitialized = true;
482}
483
484void RModel::InitializeSubGraph(std::shared_ptr<RModel> graph) {
485 // add the subgraph to the list
486 fSubGraphs.push_back(graph);
487 //this needs to be done before initializing
488 graph->fParentGraph = this;
489 graph->fIsSubGraph = true;
490
491 graph->Initialize(fBatchSize, fVerbose);
492 // set the same options as parent model
493 graph->fWeightFile = fWeightFile;
494 graph->fUseWeightFile = fUseWeightFile;
495 graph->fUseSession = fUseSession;
496 // add needed blas routines and libs
497 std::vector<std::string> blasRoutines;
498 for (auto & e : graph->fNeededBlasRoutines)
499 blasRoutines.push_back(e);
501 for (auto e : graph->fNeededStdLib)
503
504 // add parent input tensors to current graph
505 for (auto & name : fInputTensorNames)
506 graph->fInputTensorNames.emplace_back(name);
507
508 // clean graph name
509 graph->fName = UTILITY::Clean_name(graph->fName);
510
511}
512
513// Function to generate the code for declaring and initializing constant tensors
514// This is for tensors which are not part of weight files and can be created from the Constant operator
515template <typename T>
516std::string GenerateConstantTensorCode(const std::pair<std::string, InitializedTensor> &t)
517{
518 std::stringstream strs;
519 std::string type = ConvertTypeToString(t.second.type());
520 size_t length = ConvertShapeToLength(t.second.shape());
521 // avoid using stack sizes for constant tensors to reduce compilation time
522 bool allocateOnStack = (length > 100) ? false : true;
523
524 const T *data = t.second.data<T>();
525
526 // and check if all values are the same
527 bool sameData = false;
528 // for non stack allocation check if data are the same
529 if (!allocateOnStack && length > 1) {
530 size_t idx = 1;
531 do {
532 sameData = (data[idx] == data[idx - 1]);
533 idx++;
534 } while (sameData && idx < length);
535 }
536 if (allocateOnStack) {
537 strs << type << " tensor_" << t.first << "[" << length << "] = " << ConvertValuesToString(length, data) << ";\n";
538 } else {
539 strs << "std::vector<" << type << "> fTensor_" << t.first << " = ";
540 if (sameData)
541 strs << "std::vector<" << type << ">(" << length << ", " << ConvertValToString(data[0]) << ");\n";
542 else {
544 }
545 strs << "const " << type << " * tensor_" + t.first + " = fTensor_" + t.first + ".data();\n";
546 }
547 return strs.str();
548}
549
551{
552 if (!fInitializedTensors.empty())
553 fGC += "// initialized tensors\n";
554
555 for (auto &i : fInitializedTensors) {
556 if (!fUseWeightFile || i.second.IsConstantTensor()) {
557 if (i.second.type() == ETensorType::FLOAT)
559 else if (i.second.type() == ETensorType::INT64)
561
562 } else {
563 // case of tensors which are read from a file
564 size_t length = ConvertShapeToLength(i.second.shape());
565 if (i.second.type() == ETensorType::FLOAT) {
566 fGC += "std::vector<float> fTensor_" + i.first + " = std::vector<float>(" + std::to_string(length) + ");\n";
567 fGC += "float * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
568 }
569 }
570 }
571}
572
574 if (fIntermediateMemoryInfo.total_stack.empty()) return;
575 fGC += "\n//--- Allocating session memory pool to be used for allocating intermediate tensors\n";
576
577 // char memory block is allocated since char takes 1 byte, thus easier to allocate tensors
578 // of other data types
580 const int memPoolSize = totalStack.rbegin()->first + totalStack.rbegin()->second.tensor_size;
581 fGC += "std::vector<char> fIntermediateMemoryPool = std::vector<char>(" + std::to_string(memPoolSize) + ");\n\n";
582}
583
585 if (!fIntermediateTensorInfos.empty()) {
586 std::string tensor_declaration_block = "";
587 for (auto &i : fIntermediateTensorInfos) {
588 if (i.second.type == ETensorType::BOOL) {
589 tensor_declaration_block += "std::vector<bool> fTensor_" + i.first + " = std::vector<bool>(" + std::to_string(ConvertShapeToLength(i.second.shape)) + ");\n";
590 // No pointer allocation possible for BOOL, but we create a reference to the vector to make the data member layout more consistent
591 tensor_declaration_block += "std::vector<bool> & tensor_" + i.first + " = fTensor_" + i.first + ";\n";
592 continue;
593 }
595 bool not_in_freq_map =
598 (std::find(fOutputTensorNames.begin(), fOutputTensorNames.end(), i.first) == fOutputTensorNames.end());
599
601 size_t length = ConvertShapeToLength(i.second.shape);
602
603 if (i.second.type == ETensorType::FLOAT) {
604 tensor_declaration_block += "std::vector<float> fTensor_" + i.first + " = std::vector<float>(" + std::to_string(length) + ");\n";
605 tensor_declaration_block += "float * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
606 }
607 else if (i.second.type == ETensorType::DOUBLE) {
608 tensor_declaration_block += "std::vector<double> fTensor_" + i.first + " = std::vector<double>(" + std::to_string(length) + ");\n";
609 tensor_declaration_block += "double * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
610 }
611 else if (i.second.type == ETensorType::INT64) {
612 tensor_declaration_block += "std::vector<int64_t> fTensor_" + i.first + " = std::vector<int64_t>(" + std::to_string(length) + ");\n";
613 tensor_declaration_block += "int64_t * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
614 }
615 }
616 }
617
618 if (tensor_declaration_block.length()) {
619 fGC += "\n//--- declare and allocate the intermediate tensors\n" + tensor_declaration_block;
620 }
621 }
622 // add also the dynamic tensors (only declarations, allocation will be done later)
623 if (!fDynamicTensorInfos.empty()) {
624 fGC += "//--- declare the dynamic tensors\n";
625 for (auto &i : fDynamicTensorInfos) {
626 if (i.second.type == ETensorType::FLOAT) {
627 fGC += "std::vector<float> fTensor_" + i.first + ";\n";
628 fGC += "float * tensor_" + i.first + " = nullptr;\n";
629 } else if (i.second.type == ETensorType::DOUBLE) {
630 fGC += "std::vector<double> fTensor_" + i.first + ";\n";
631 fGC += "double * tensor_" + i.first + " = nullptr;\n";
632 } else if (i.second.type == ETensorType::INT64) {
633 fGC += "std::vector<int64_t> fTensor_" + i.first + ";\n";
634 fGC += "int64_t * tensor_" + i.first + " = nullptr;\n";
635 }
636 }
637 }
638}
639
640// generate code for specific operator declarations to be defined in the Session class
642 std::string strcode;
643 for (auto & op : fOperators) {
644 strcode += op->GenerateDeclCode();
645 }
646 if (strcode.empty()) return;
647 fGC += "\n//---- operator declarations \n";
648 fGC += strcode;
649 fGC += "\n";
650}
651
653{
654 std::stringstream out;
655 for (auto &i : fDynamicTensorInfos) {
656 auto length = ConvertDynamicShapeToLength(i.second.shape);
657 out << SP << "if (" << length << " > 0) {\n";
658 out << SP << SP << "fTensor_" << i.first << ".resize(" << length << ");\n";
659 out << SP << SP << "tensor_" << i.first << " = fTensor_" << i.first << ".data();\n";
660 out << SP << "}\n";
661 }
662 fGC += out.str();
663}
664
666 // generate the infer signature given the inputs: eg. "float * tensor1, float * tensor2"
667 // if (decl = false) generate only calling signature (tensor1,tensor2,....)
668 std::string rGC;
669 std::unordered_map<std::string, int> inputParams;
670 int i_input = 0;
671 for (auto &name : fInputTensorNames) {
672 // if is a dynamic tensor pass initial parameters
673 if (IsDimInputTensor(name)) {
674 auto shape = GetDynamicTensorShape(name);
675 for (auto &d : shape) {
676 std::string pName = d.param;
677 // need to check if the input parameters is already existing in another input tensor
678 if (d.isParam && inputParams.count(pName) == 0) {
679 if (isdecl) rGC += "size_t ";
680 rGC += d.param + ",";
682 }
683 }
684 }
685 if (isdecl) {
687 if (type == "other")
688 throw std::runtime_error("TMVA-SOFIE: input tensor " + name +
689 " is of a data type which is not yet supported.");
690 rGC += type + "* ";
691 }
692 rGC += "tensor_" + name + ",";
693 i_input++;
694 }
695
696 if (fInputTensorNames.size() > 0) rGC.pop_back();// remove last ","
697 return rGC;
698}
699
700namespace {
701
702std::string typeForOutput(ETensorType t) {
703 // The std::vector<bool> is a special type that is not wrapping continuous memory.
704 // We don't want to use it as a return type.
706 return ConvertTypeToString(t);
707}
708
709}
710
712{
713 size_t outputSize = fOutputTensorNames.size();
714 // assume output types are all the same
715
716 bool sameOutputTypes = true;
717 std::string inferReturnType; // type return by infer function
719 fGC += "\n\n";
720 if (outputSize == 1) {
721 fGC += "std::vector<" + typeForOutput(eFirstOutputType) + ">";
722 } else {
723 // if all output types are the same we return an std::vector - otherwise a tuple
724 for (std::string const &name : fOutputTensorNames) {
726 sameOutputTypes = false;
727 }
728 if (sameOutputTypes)
729 fGC += "std::vector<std::vector<" + typeForOutput(eFirstOutputType) + ">>";
730 else {
731 inferReturnType = "std::tuple<";
732 for (size_t i = 0; i < outputSize; i++) {
733 inferReturnType += "std::vector<" + typeForOutput(GetTensorType(fOutputTensorNames[i])) + ">";
734 if (i < outputSize - 1)
735 inferReturnType += ",";
736 }
737 inferReturnType += ">";
739 }
740 }
741
742 fGC += " infer(" + GenerateInferSignature() + "){\n";
743
744 std::string doInferArgs = GenerateInferSignature(false);
745 if (!doInferArgs.empty())
746 doInferArgs += ",";
747 for (std::string const &name : fOutputTensorNames) {
748 fGC += SP + "std::vector<" + typeForOutput(GetTensorType(name)) + " > output_tensor_" + name + ";\n";
749 doInferArgs += " output_tensor_" + name + ",";
750 }
751 if (!doInferArgs.empty())
752 doInferArgs.back() = ' ';
753
754 fGC += SP + "doInfer(" + doInferArgs + ");\n";
755
756 fGC += SP + "return {";
757 for (size_t i = 0; i < fOutputTensorNames.size(); i++) {
758 fGC += "output_tensor_" + fOutputTensorNames[i];
759 if (i < fOutputTensorNames.size() - 1)
760 fGC += ",";
761 }
762 fGC += "};\n";
763 fGC += "}\n"; // end of infer function scope
764}
765
767{
768 // Determine the signature of the actual inference function
770 if (!doInferSignature.empty())
771 doInferSignature += ", ";
772 for (auto const &name : fOutputTensorNames) {
773 doInferSignature += " std::vector<" + typeForOutput(GetTensorType(name)) + "> &output_tensor_" + name + ",";
774 }
775 doInferSignature.back() = ' ';
776
777 doInferSignature = "void doInfer(" + doInferSignature + ")";
778
779 // define the Session struct (for GNN this is generated in RModel_GNN)
781 if (!fIsSubGraph)
782 fGC += "struct Session {\n";
783 else
784 fGC += "struct Session_" + fName + " {\n";
785 }
786
787 // generate code for declaring the initialized tensors
789
791 // evaluate total intermediate memory and position intermediate tensor addresses
792 std::string intermediate_memory_alloc_string = "";
793 intermediate_memory_alloc_string += "\n// --- Positioning intermediate tensor memory --";
794 for (size_t op_idx = 0; op_idx < fOperators.size(); ++op_idx) {
797 }
798
799 // to check remaining unused fragments after memory allocation (lesser the better)
800 // for (const auto &it: fIntermediateMemoryInfo.available_stack){
801 // std::cout<<"chunk_idx: "<<it.first<<", chunk_size: "<<it.second<<"\n";
802 // }
803
804 // generate the memory pool to be used by intermediate tensors
806
807 // position intermediate tensors
809 }
810
811 // generate the declaring the intermediate tensors
813 // generate code for declarations of some specific operators
815
816
817
818 // add subgraph session
819 if (!fSubGraphs.empty()) fGC += "// subgraph sessions\n";
820 for (auto & graph : fSubGraphs) {
821 fGC += "Session_" + graph->fName + " fSession_" + graph->fName + ";\n";
822 }
823
824 // Generate code for Session constructor
825 if (fUseSession) {
826 std::string sessionName = "Session";
827 if (fIsSubGraph)
828 sessionName += "_" + fName;
829 // add here specific operator code that needs to define session data members
830 fGC += "\n";
831 for (size_t id = 0; id < fOperators.size(); id++) {
832 std::string opName = std::to_string(id);
833 fGC += fOperators[id]->GenerateSessionMembersCode(opName);
834 }
835 fGC += "\n";
836 // here add initialization and reading of weight tensors
837 if (fUseWeightFile) {
838 std::string fileName = fName;
840 fileName += ".dat";
841 }
843 fileName += ".root";
844 }
845 fGC += sessionName + "(std::string filename =\"" + fileName + "\"";
846 } else {
847 // no need to pass weight file since it is not used
848 // keep passing a string for compatibility
849 fGC += sessionName + "(std::string = \"\"";
850 }
851 // add initialization of shape parameters
852 // assume all parameters are of type size_t
853 if (!fShapeParams.empty()) {
854 for (auto &p : fShapeParams) {
855 fGC += ",\n";
856 fGC += " size_t " + p.first + " = " + p.second;
857 }
858 }
859 fGC += ") {\n";
860
861 if (fUseWeightFile) {
862 fGC += "\n//--- reading weights from file\n";
864 fGC += "\n";
865 // fUseWeightFile = fUseWeightFile;
866 }
867
868 // now we have passed the parameters we can allocate the dynamic tensors
870
871 // add here initialization code for operator
872 for (size_t id = 0; id < fOperators.size(); id++) {
873 fGC += fOperators[id]->GenerateInitCode();
874 }
875
876 fGC += "}\n\n";
877 }
878
879 fGC += doInferSignature + "{\n";
880 fGC += "\n";
881
882 // generate the inference code
883 if (fVerbose)
884 std::cout << "Generating main inference code for " << fName << std::endl;
885
886 if (fOutputTensorNames.size() == 0)
887 throw std::runtime_error("TMVA-SOFIE: output size=0 are not supported");
888
889 for (size_t op_idx = 0; op_idx < fOperators.size(); ++op_idx) {
890 if (fVerbose)
891 std::cout << "Generating code for operator .... " << op_idx << std::endl;
892 fGC += (fOperators[op_idx]->Generate(std::to_string(op_idx)));
893 }
894
895 fGC += SP + "using TMVA::Experimental::SOFIE::UTILITY::FillOutput;\n\n";
896
897 for (std::string const &name : fOutputTensorNames) {
898 // need to check is size is the same (don't want to return a vector with
899 // larger size) in that case better to copy
901 std::string n = isIntermediate ? std::to_string(ConvertShapeToLength(GetTensorShape(name)))
903 fGC += SP + "FillOutput(tensor_" + name + ", output_tensor_" + name + ", " + n + ");\n";
904 }
905
906 fGC += "}\n\n";
907
908 // generate the inference overload that returns an output struct
910
911 // end of session
913 fGC += "}; // end of Session\n\n";
914 }
915}
916
917void RModel::Generate(std::underlying_type_t<Options> options, int batchSize, long pos, bool verbose)
918{
919 fVerbose = verbose;
920 fBatchSize = batchSize;
921 fReadPos = pos;
922
923 // session flag is used in operator initialize
924 if (static_cast<std::underlying_type_t<Options>>(Options::kNoSession) & options) {
925 fUseSession = false;
927 }
928 if (static_cast<std::underlying_type_t<Options>>(Options::kNoWeightFile) & options) {
929 fUseWeightFile = false;
931 }
932 if (static_cast<std::underlying_type_t<Options>>(Options::kRootBinaryWeightFile) & options) {
933 fUseWeightFile = true;
935 }
936 if (fUseWeightFile && !fUseSession) {
937 throw std::runtime_error(
938 "TMVA-SOFIE: RModel::Generate: cannot use a separate weight file without generating a Session class");
939 }
940
941 if (static_cast<std::underlying_type_t<Options>>(Options::kGNN) & options)
942 fIsGNN = true;
943 if (static_cast<std::underlying_type_t<Options>>(Options::kGNNComponent) & options)
944 fIsGNNComponent = true;
945
946 // initialize the model including all operators and sub-graphs
947 Initialize(batchSize, verbose);
948
949 std::string hgname;
950 if (!fIsGNNComponent && !fIsSubGraph) {
951 fGC.clear();
953 }
954
955 // generate first code for the subgraphs
956 for (auto &graph : fSubGraphs) {
957 if (fVerbose)
958 std::cout << "generate session code for subgraph " << graph->fName << std::endl;
959 graph->GenerateSessionCode();
960 fGC += graph->fGC;
961 }
962
963 if (fVerbose)
964 std::cout << "generate Main session code - model " << fName << std::endl;
965
966 // generate main session code
968
969 if (!fIsGNNComponent && !fIsSubGraph) {
970 fGC += ("} //TMVA_SOFIE_" + fName + "\n");
971 fGC += "\n#endif // " + hgname + "\n";
972 }
973}
974
976 // generate the code to read initialized tensors from a text data file
978 if (fInitializedTensors.empty()) return;
979
980 fGC += " std::ifstream f;\n";
981 fGC += " f.open(filename);\n";
982 fGC += " if (!f.is_open()) {\n";
983 fGC += " throw std::runtime_error(\"tmva-sofie failed to open file \" + filename + \" for input weights\");\n";
984 fGC += " }\n";
985
986 if(fIsGNNComponent) {
987 fGC += " f.seekg(" + std::to_string(pos) + ");\n";
988 }
989
990 fGC += " std::string tensor_name;\n";
991 fGC += " size_t length;\n";
992
993 // loop on tensors and parse the file
994 for (auto& i: fInitializedTensors) {
995 // skip Constant and shape tensors (not written in a file)
996 if (!i.second.IsWeightTensor()) continue;
997 std::string tensor_name = "tensor_" + i.first;
998 if (i.second.type() == ETensorType::FLOAT) {
999 size_t length = 1;
1000 length = ConvertShapeToLength(i.second.shape());
1001 std::string slength = std::to_string(length);
1002 fGC += " f >> tensor_name >> length;\n";
1003 fGC += " if (tensor_name != \"" + tensor_name + "\" ) {\n";
1004 fGC += " std::string err_msg = \"TMVA-SOFIE failed to read the correct tensor name; expected name is " +
1005 tensor_name + " , read \" + tensor_name;\n";
1006 fGC += " throw std::runtime_error(err_msg);\n";
1007 fGC += " }\n";
1008 fGC += " if (length != " + slength + ") {\n";
1009 fGC += " std::string err_msg = \"TMVA-SOFIE failed to read the correct tensor size; expected size is " +
1010 slength + " , read \" + std::to_string(length) ;\n";
1011 fGC += " throw std::runtime_error(err_msg);\n";
1012 fGC += " }\n";
1013 fGC += " for (size_t i = 0; i < length; ++i)\n";
1014 fGC += " f >> " + tensor_name + "[i];\n";
1015 fGC += " if (f.fail()) {\n";
1016 fGC += " throw std::runtime_error(\"TMVA-SOFIE failed to read the values for tensor " + tensor_name + "\");\n";
1017 fGC += " }\n";
1018 } else {
1019 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be read from a file");
1020 }
1021 }
1022 fGC += " f.close();\n";
1023 }
1024
1025 // generate the code to read initialized tensors from a ROOT data file
1027#ifdef SOFIE_SUPPORT_ROOT_BINARY
1028 fGC += " {\n";
1029 fGC += " std::unique_ptr<TFile> rootFile(TFile::Open(filename.c_str(), \"READ\"));\n";
1030 fGC += " if (!rootFile->IsOpen()) {\n";
1031 fGC += " throw std::runtime_error(\"tmva-sofie failed to open ROOT file for input weights\");\n";
1032 fGC += " }\n";
1033
1034 std::string dirName = fName + "_weights";
1035 fGC += " if (!rootFile->GetKey(\"" + dirName + "\")) {\n";
1036 fGC += " throw std::runtime_error(\"tmva-sofie failed to open ROOT directory for input weights\");\n";
1037 fGC += " }\n";
1038
1039 for (auto &i : fInitializedTensors) {
1040 // skip Constant and shape tensors
1041 if (!i.second.IsWeightTensor()) continue;
1042 fGC += " {\n";
1043 std::string tensor_name = "tensor_" + i.first;
1044 if (i.second.type() == ETensorType::FLOAT) {
1045 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<float>*>(rootFile->Get(\"";
1046 fGC += dirName + "/" + tensor_name + "\"));\n";
1047 } else if (i.second.type() == ETensorType::DOUBLE) {
1048 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<double>*>(rootFile->Get(\"";
1049 fGC += dirName + + "/" + tensor_name + "\"));\n";
1050 } else if (i.second.type() == ETensorType::INT64) {
1051 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<int64_t>*>(rootFile->Get(\"";
1052 fGC += dirName + "/" + tensor_name + "\"));\n";
1053 } else {
1054 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be read from a ROOT file");
1055 }
1056 fGC += " }\n";
1057 }
1058 fGC += " }\n";
1059#else
1060 throw std::runtime_error("SOFIE was not built with ROOT file support.");
1061#endif // SOFIE_SUPPORT_ROOT_BINARY
1062 }
1063}
1064
1066 // Determine the file extension based on the weight file type
1067 std::string fileExtension;
1068 switch (fWeightFile) {
1070 fileExtension = ".dat";
1071 break;
1073 fileExtension = ".root";
1074 break;
1076 fileExtension = ".dat";
1077 break;
1078 }
1079
1080 // If filename is empty, use the model name as the base filename
1081 if (filename.empty()) {
1083 }
1084
1085 // Write the initialized tensors to the file
1087#ifdef SOFIE_SUPPORT_ROOT_BINARY
1088 if(fIsGNNComponent || fIsGNN) {
1089 throw std::runtime_error("SOFIE-GNN yet not supports writing to a ROOT file.");
1090 }
1091 std::unique_ptr<TFile> outputFile(TFile::Open(filename.c_str(), "UPDATE"));
1092
1093 std::string dirName = fName + "_weights";
1094 // check if directory exists, in case delete to replace with new one
1095 if (outputFile->GetKey(dirName.c_str()))
1096 outputFile->rmdir(dirName.c_str());
1097
1098 auto outputDir = outputFile->mkdir(dirName.c_str());
1099
1100 for (const auto& item : fInitializedTensors) {
1101 // skip Constant tensors and tensors which are not writable (e.g. shape tensors)
1102 if (!item.second.IsWeightTensor()) continue;
1103 std::string tensorName = "tensor_" + item.first;
1104 size_t length = 1;
1105 length = ConvertShapeToLength(item.second.shape());
1106 if(item.second.type() == ETensorType::FLOAT) {
1107 const float* data = item.second.data<float>();
1108 std::vector<float> tensorDataVector(data, data + length);
1109 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<float>", tensorName.c_str());
1110 }
1111 else if(item.second.type() == ETensorType::DOUBLE) {
1112 const double* data = item.second.data<double>();
1113 std::vector<double> tensorDataVector(data, data + length);
1114 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<double>", tensorName.c_str());
1115 }
1116 else if(item.second.type() == ETensorType::INT64) {
1117 const int64_t* data = item.second.data<int64_t>();
1118 std::vector<int64_t> tensorDataVector(data, data + length);
1119 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<int64_t>", tensorName.c_str());
1120 }
1121 else {
1122 std::runtime_error("tmva-sofie tensor " + tensorName + " with type " + ConvertTypeToString(item.second.type()) +
1123 " cannot be written to a ROOT file");
1124 }
1125 }
1126 outputFile->Write(filename.c_str());
1127
1128 // this needs to be changed, similar to the text file
1129 return -1;
1130
1131#else
1132 throw std::runtime_error("SOFIE was not built with ROOT file support.");
1133#endif // SOFIE_SUPPORT_ROOT_BINARY
1134 } else if (fWeightFile == WeightFileType::Text) {
1135 std::ofstream f;
1136 if(fIsGNNComponent) {
1137 // appending all GNN components into the same file
1138 f.open(filename, std::ios::app);
1139 } else {
1140 f.open(filename);
1141 }
1142 if (!f.is_open())
1143 throw
1144 std::runtime_error("tmva-sofie failed to open file " + filename + " for tensor weight data");
1145 for (auto& i: fInitializedTensors) {
1146 // skip Constant tensors and not writable tensors (e.g. shape tensors)
1147 if (!i.second.IsWeightTensor()) {
1148 continue;
1149 }
1150 size_t length = ConvertShapeToLength(i.second.shape());
1151 std::string tensor_name = "tensor_" + i.first;
1152 f << tensor_name << " " << length << "\n";
1153 if (i.second.type() == ETensorType::FLOAT) {
1154 const float * data = i.second.data<float>();
1155 for (size_t idx = 0; idx < length; idx++) {
1156 // round to zero sub-normal values
1157 float value = data[idx];
1158 if (value != 0. && std::abs(value) < std::numeric_limits<float>::min() ) value = 0;
1159 f << std::setprecision(std::numeric_limits<float>::max_digits10) << value;
1160 f << ( (idx < length-1) ? " " : "\n" );
1161 }
1162 }
1163 else {
1164 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be written to a file");
1165 }
1166 if (f.fail())
1167 std::runtime_error("tmva-sofie failed to write tensor data to file for " + tensor_name);
1168 }
1169 long curr_pos = f.tellp();
1170 f.close();
1171 return curr_pos;
1172 } else {
1173 return -1;
1174 }
1175}
1176
1178 std::cout << "Model requires following inputs:\n";
1179 for (auto& inputInfo: fInputTensorInfos) {
1180 std::cout << "Parametrised Tensor name: " << inputInfo.first << "\t";
1181 std::cout << "type: " << ConvertTypeToString(inputInfo.second.type) << "\t";
1182 std::cout << "shape: [";
1183 for (size_t i = 0; i < inputInfo.second.shape.size(); i++) {
1184 if (inputInfo.second.shape[i].isParam) {
1185 std::cout << inputInfo.second.shape[i].param;
1186 } else {
1187 std::cout << inputInfo.second.shape[i].dim ;
1188 }
1189 if (i < inputInfo.second.shape.size() - 1) std::cout << ",";
1190 }
1191 std::cout << "]" << std::endl;
1192 }
1193
1194 for (auto& inputInfo: fReadyInputTensorInfos) {
1195 std::cout << "Fully Specified Tensor name: " << inputInfo.first << "\t";
1196 std::cout << "type: " << ConvertTypeToString(inputInfo.second.type) << "\t";
1197 std::cout << "shape: [";
1198 for (size_t i = 0; i < inputInfo.second.shape.size(); i++) {
1199 std::cout << inputInfo.second.shape[i];
1200 if (i < inputInfo.second.shape.size() - 1) std::cout << ",";
1201 }
1202 std::cout << "]" << std::endl;
1203 }
1204 std::cout << "\n";
1205}
1206
1208 std::cout << "Model initialized the following tensors:\n";
1209 for (auto& it: fInitializedTensors) {
1210 std::cout << "Tensor name: \"" << it.first << "\"\t";
1211 std::cout << "type: " << ConvertTypeToString(it.second.type()) << "\t";
1212 std::cout << "shape: [";
1213 for (size_t i = 0; i < it.second.shape().size(); i++) {
1214 std::cout << it.second.shape()[i];
1215 if (i < it.second.shape().size() - 1) std::cout << ",";
1216 }
1217 std::cout << "]";
1218 if (it.second.IsConstantTensor()) std::cout << " (Constant)";
1219 else if (!it.second.IsWeightTensor()) std::cout << " (Not Writable)";
1220 std::cout << std::endl;
1221 }
1222 std::cout << "\n";
1223}
1224
1226 std::cout << "Model specify the following intermediate tensors:\n";
1227 for (auto& it: fIntermediateTensorInfos) {
1228 std::cout << "Tensor name: \"" << it.first << "\"\t";
1229 std::cout << "type: " << ConvertTypeToString(it.second.type) << "\t";
1230 std::cout << "shape: [";
1231 for (size_t i = 0; i < it.second.shape.size(); i++) {
1232 std::cout << it.second.shape[i];
1233 if (i < it.second.shape.size() - 1) std::cout << ",";
1234 }
1235 std::cout << "]" << std::endl;
1236 }
1237 std::cout << "\n";
1238}
1239
1241 std::cout << "Model specify the following dynamic tensors:\n";
1242 for (auto& it: fDynamicTensorInfos) {
1243 std::cout << "Tensor name: \"" << it.first << "\"\t";
1244 std::cout << "type: " << ConvertTypeToString(it.second.type) << "\t";
1245 std::cout << "shape: [";
1246 for (size_t i = 0; i < it.second.shape.size(); i++) {
1247 std::cout << it.second.shape[i].GetVal();
1248 if (i < it.second.shape.size() - 1) std::cout << ",";
1249 }
1250 std::cout << "]" << std::endl;
1251 }
1252 std::cout << "\n";
1253}
1254
1256 std::cout << "Model specify the following output tensors:\n";
1257 for (auto& it: fOutputTensorNames) {
1258 std::cout << "Tensor name: \"" << it << "\"\t";
1259 if (!IsDynamicTensor(it))
1260 std::cout << "shape: " << ConvertShapeToString(GetTensorShape(it)) << std::endl;
1261 else
1262 std::cout << "shape: " << ConvertDynamicShapeToString(GetDynamicTensorShape(it)) << std::endl;
1263 }
1264 std::cout << "\n";
1265}
1266
1268 auto it = fInitializedTensors.find(name);
1269 if (it == fInitializedTensors.end()) {
1270 std::cout << "Tensor " << name << " not found in model's initialized tensor list" << std::endl;
1271 return;
1272 }
1273
1274 std::cout << "Tensor name: " << it->first << "\t";
1275 std::cout << "type: " << ConvertTypeToString(it->second.type()) << "\t";
1276 int length =1;
1277 std::cout << "shape: [";
1278 for (size_t i = 0; i < it->second.shape().size(); i++) {
1279 std::cout << it->second.shape()[i];
1280 length *= it->second.shape()[i];
1281 if (i < it->second.shape().size() - 1) std::cout << ",";
1282 }
1283 std::cout << "]" << std::endl;
1284 bool ellipsis = true;
1285 if (n_print > length) {
1286 n_print = length;
1287 ellipsis = false;
1288 }
1289
1290 std::cout << "data: [" << std::endl;
1291 if (it->second.type() == ETensorType::FLOAT) {
1292 auto converted_data = it->second.data<float>();
1293 for (int i =0; i < n_print; i++) {
1294 std::cout << converted_data[i];
1295 if (i < n_print - 1) std::cout << " ,";
1296 }
1297 }
1298 if (ellipsis) std::cout << ", ...";
1299 std::cout << "]" << std::endl;
1300
1301}
1302
1303void RModel::OutputGenerated(std::string filename, bool append) {
1304
1306
1307 // write weights in a text file
1308 if (fUseWeightFile) {
1309 if (!filename.empty()) {
1310 size_t pos = filename.find(".hxx");
1312 filename.replace(pos, 4, ".dat");
1314 filename = filename.erase(pos, 4);
1315 filename += ".root";
1316 }
1317 } else {
1318 filename = fName;
1319 filename += fWeightFile == WeightFileType::Text ? ".dat" : ".root";
1320 }
1322 }
1323}
1324
1325void RModel::Streamer(TBuffer &R__b) {
1326 if (R__b.IsReading()) {
1327 RModel::Class()->ReadBuffer(R__b, this);
1328 for (auto & i : fInitializedTensors) {
1329 i.second.CastPersistentToShared();
1330 }
1331 }
1332 else {
1333 for (auto & i : fInitializedTensors) {
1334 i.second.CastSharedToPersistent();
1335 }
1336 RModel::Class()->WriteBuffer(R__b, this);
1337 }
1338}
1339
1340}//SOFIE
1341}//Experimental
1342}//TMVA
#define d(i)
Definition RSha256.hxx:102
#define f(i)
Definition RSha256.hxx:104
#define e(i)
Definition RSha256.hxx:103
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char filename
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t r
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize id
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
char name[80]
Definition TGX11.cxx:110
const_iterator begin() const
const_iterator end() const
Buffer base class used for serializing objects.
Definition TBuffer.h:43
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
Definition TFile.cxx:4131
void GenerateHeaderInfo(std::string &hgname)
void OutputGenerated(std::string filename="", bool append=false)
void AddBlasRoutines(std::vector< std::string > routines)
void AddNeededStdLib(std::string libname)
std::unordered_map< std::string, DynamicTensorInfo > fDynamicTensorInfos
Definition RModel.hxx:27
bool IsDynamicTensor(const std::string &name) const
Definition RModel.cxx:186
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
Definition RModel.cxx:200
std::string GenerateInferSignature(bool isdecl=true)
Definition RModel.cxx:665
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:95
std::vector< std::unique_ptr< ROperator > > fOperators
Definition RModel.hxx:33
void OutputGenerated(std::string filename="", bool append=false)
Definition RModel.cxx:1303
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:105
std::unordered_map< std::string, TensorInfo > fIntermediateTensorInfos
Definition RModel.hxx:26
void AddOutputTensorNameList(std::vector< std::string > output_tensor_names)
Definition RModel.cxx:238
std::unordered_map< std::string, TensorInfo > fReadyInputTensorInfos
Definition RModel.hxx:24
void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:165
void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:217
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:155
std::unordered_map< std::string_view, size_t > fIntermediateTensorFrequencyLookup
! lookup table for intermediate tensor frequency (transient)
Definition RModel.hxx:40
void AddInputTensorName(std::string name)
Definition RModel.cxx:124
std::vector< std::string > fOutputTensorNames
Definition RModel.hxx:30
const ETensorType & GetTensorType(std::string name) const
Definition RModel.cxx:67
bool IsDimInputTensor(const std::string &name) const
Definition RModel.cxx:190
bool IsInitializedTensor(const std::string &name) const
Definition RModel.cxx:175
const std::vector< size_t > & GetTensorShape(std::string name) const
Definition RModel.cxx:29
void CheckAndFlushIntermediateMemory(std::span< const std::string_view > op_output_tensors, const size_t &op_idx)
Definition RModel.cxx:334
void AddOperator(std::unique_ptr< ROperator > op, int order_execution=-1)
Definition RModel.cxx:128
void HeadInitializedTensors(std::string name, int n_print=50)
Definition RModel.cxx:1267
bool IsConstantTensor(const std::string &name) const
Definition RModel.cxx:179
void Initialize(int batchSize=-1, bool verbose=false)
Definition RModel.cxx:373
long WriteInitializedTensorsToFile(std::string filename="")
Definition RModel.cxx:1065
OptimizationLevel fOptimizationLevel
Definition RModel.hxx:21
void Generate(std::underlying_type_t< Options > options, int batchSize=-1, long pos=0, bool verbose=false)
Definition RModel.cxx:917
std::unordered_map< std::string, InputTensorInfo > fInputTensorInfos
Definition RModel.hxx:23
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
Definition RModel.cxx:261
std::vector< Dim > GetDynamicTensorShape(std::string name) const
Definition RModel.cxx:55
MemoryPoolInfo fIntermediateMemoryInfo
! intermediate memory info (transient)
Definition RModel.hxx:39
std::string AllocateIntermediateMemory(std::span< const std::string_view > op_output_tensors)
Definition RModel.cxx:278
void InitializeSubGraph(std::shared_ptr< RModel > graph)
Definition RModel.cxx:484
std::unordered_map< std::string, std::string > fShapeParams
Definition RModel.hxx:29
void SetNotWritableInitializedTensor(const std::string &tensor_name)
Definition RModel.cxx:270
std::vector< std::string > fInputTensorNames
Definition RModel.hxx:31
std::unordered_map< std::string, InitializedTensor > fInitializedTensors
Definition RModel.hxx:25
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:252
std::vector< std::shared_ptr< RModel > > fSubGraphs
! sub-graph models (transient)
Definition RModel.hxx:35
bool IsReadyInputTensor(const std::string &name) const
Definition RModel.cxx:194
void UpdateOutputTensorList(std::vector< std::string > curr_output_tensor, std::vector< std::string > modify_output_tensor)
Definition RModel.cxx:245
const Int_t n
Definition legend1.C:16
std::string Clean_name(std::string input_tensor_name)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
constexpr size_t GetTypeSize(ETensorType type)
std::string ConvertValuesToString(size_t n, const T *data)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string GenerateConstantTensorCode(const std::pair< std::string, InitializedTensor > &t)
Definition RModel.cxx:516
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
std::underlying_type_t< Options > operator|(Options opA, Options opB)
Definition RModel.cxx:22
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::string ConvertValToString(T value)
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations
std::map< size_t, TensorMemoryInfo > total_stack
std::map< size_t, size_t > available_stack