Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RModel.cxx
Go to the documentation of this file.
1#include <limits>
2#include <algorithm>
3#include <cctype>
4#include <memory>
5#include <string>
6
7#include "TFile.h"
8
9#include "TMVA/RModel.hxx"
10#include "TMVA/SOFIE_common.hxx"
11
12namespace TMVA {
13namespace Experimental {
14namespace SOFIE {
15
16std::underlying_type_t<Options> operator|(Options opA, Options opB) {
17 return static_cast<std::underlying_type_t<Options>>(opA) | static_cast<std::underlying_type_t<Options>>(opB);
18}
19std::underlying_type_t<Options> operator|(std::underlying_type_t<Options> opA, Options opB) {
20 return opA | static_cast<std::underlying_type_t<Options>>(opB);
21}
22
24 fInputTensorInfos = std::move(other.fInputTensorInfos);
25 fReadyInputTensorInfos = std::move(other.fReadyInputTensorInfos);
26 fOutputTensorNames = other.fOutputTensorNames;
27 fInputTensorNames = other.fInputTensorNames;
28 fOperators = std::move(other.fOperators);
29 fInitializedTensors = std::move(other.fInitializedTensors);
30 fIntermediateTensorInfos = std::move(other.fIntermediateTensorInfos);
31 fName = other.fName;
32 fFileName = other.fFileName;
33 fParseTime = other.fParseTime;
34 fGC = other.fGC;
35 fNeededBlasRoutines = other.fNeededBlasRoutines;
36 fNeededStdLib = other.fNeededStdLib;
37}
38
40 fInputTensorInfos = std::move(other.fInputTensorInfos);
41 fReadyInputTensorInfos = std::move(other.fReadyInputTensorInfos);
42 fOutputTensorNames = other.fOutputTensorNames;
43 fInputTensorNames = other.fInputTensorNames;
44 fOperators = std::move(other.fOperators);
45 fInitializedTensors = std::move(other.fInitializedTensors);
46 fIntermediateTensorInfos = std::move(other.fIntermediateTensorInfos);
47 fName = other.fName;
48 fFileName = other.fFileName;
49 fParseTime = other.fParseTime;
50 fGC = other.fGC;
51 fNeededBlasRoutines = other.fNeededBlasRoutines;
52 fNeededStdLib = other.fNeededStdLib;
53 return *this;
54}
55
56const std::vector<size_t>& RModel::GetTensorShape(std::string name) {
57 auto f = fReadyInputTensorInfos.find(name);
58 if (f != fReadyInputTensorInfos.end()) {
59 return f->second.shape;
60 }
61 auto f2 = fInitializedTensors.find(name);
62 if (f2 != fInitializedTensors.end()) {
63 return f2->second.shape();
64 }
65 auto f3 = fInputTensorInfos.find(name);
66 if (f3 != fInputTensorInfos.end()) {
67 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] is an input tensor with unspecified dimension parameter");
68 }
69 auto f4 = fIntermediateTensorInfos.find(name);
70 if (f4 != fIntermediateTensorInfos.end()) {
71 return f4->second.shape;
72 }
74 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] is a dynamic tensor. Use GetDynamicTensorShape instead of GetTensorShape");
75
78
79 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] for which the shape is requested is not found");
80}
81
82std::vector<Dim> RModel::GetDynamicTensorShape(std::string name) {
83 if (auto f = fDynamicTensorInfos.find(name); f != fDynamicTensorInfos.end()) {
84 return f->second.shape;
85 }
86 if (auto f = fInputTensorInfos.find(name); f != fInputTensorInfos.end()) {
87 return f->second.shape;
88 }
89 // in case is not a dynamic tensor convert normal shape to Dim one
90 // for this we need to return the vector by value
92}
93
95 auto f = fReadyInputTensorInfos.find(name);
96 if (f != fReadyInputTensorInfos.end()) {
97 return f->second.type;
98 }
99 auto f2 = fInitializedTensors.find(name);
100 if (f2 != fInitializedTensors.end()) {
101 return f2->second.type();
102 }
103 auto f3 = fInputTensorInfos.find(name);
104 if (f3 != fInputTensorInfos.end()) {
105 return f3->second.type;
106 }
107 auto f4 = fIntermediateTensorInfos.find(name);
108 if (f4 != fIntermediateTensorInfos.end()) {
109 return f4->second.type;
110 }
111 auto f5 = fDynamicTensorInfos.find(name);
112 if (f5 != fDynamicTensorInfos.end()){
113 return f5->second.type;
114 }
115
118
119 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] for which the type is requested is not found, model name: " + fName);
120}
121
122bool RModel::CheckIfTensorAlreadyExist(std::string tensor_name) {
123 if (fReadyInputTensorInfos.find(tensor_name) != fReadyInputTensorInfos.end()) return true;
124 if (fInputTensorInfos.find(tensor_name) != fInputTensorInfos.end()) return true;
125 if (fInitializedTensors.find(tensor_name) != fInitializedTensors.end()) return true;
126 if (fIntermediateTensorInfos.find(tensor_name) != fIntermediateTensorInfos.end()) return true;
127 if (fDynamicTensorInfos.find(tensor_name) != fDynamicTensorInfos.end()) return true;
129 return false;
130}
131
132void RModel::AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<Dim> shape) {
135 throw std::runtime_error("TMVA-SOFIE: input tensor with name " + input_name + " already exists \n");
136 }
137
138 InputTensorInfo inputInfo { type, shape };
140}
141
142void RModel::AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<size_t> shape) {
145 throw std::runtime_error("TMVA-SOFIE: input tensor with name " + input_name + " already exists \n");
146 }
147 TensorInfo inputInfo { type, shape };
149}
150
154
155void RModel::AddOperator(std::unique_ptr<ROperator> op, int order_execution) {
156 AddBlasRoutines(op->GetBlasRoutines());
157 auto libs = op->GetStdLibs();
158 auto op_input_tensors = op->GetOpInputTensors();
159 for (auto& stdlib : libs) {
161 }
162 if (order_execution >= 0) {
163 fOperators.insert(fOperators.begin() + order_execution, std::move(op));
164 } else {
165 fOperators.push_back(std::move(op));
166 }
167
168 // storing the last usage of tensors which are input to
169 // operators (but are not inputs to the model, i.e. they are intermediate
170 // tensors). This information is needed to keep a check on when a
171 // particular intermediate tensor can be flushed to free up memory for reuse.
172 for(size_t index = 0; index<op_input_tensors.size() &&
174 std::find(fInputTensorNames.begin(), fInputTensorNames.end(),
177 ++index){
179 }
180}
181
182void RModel::AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
183 tensor_name = UTILITY::Clean_name(tensor_name);
184 //NB: own data
185 if (CheckIfTensorAlreadyExist(tensor_name)) {
186 throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
187 }
189 fInitializedTensors[tensor_name] = new_tensor;
190}
191
192void RModel::AddConstantTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
193 tensor_name = UTILITY::Clean_name(tensor_name);
194 //NB: own data
195 if (CheckIfTensorAlreadyExist(tensor_name)) {
196 throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
197 }
198 InitializedTensor new_tensor {type, shape, data, true}; // add here flag to specify is a constant tensor
199 fInitializedTensors[tensor_name] = new_tensor;
200}
201
202bool RModel::IsInitializedTensor(const std::string& tensorName) const {
203 std::string name = UTILITY::Clean_name(tensorName);
204 return fInitializedTensors.find(name) != fInitializedTensors.end();
205}
206bool RModel::IsConstantTensor(const std::string& tensorName) const {
207 std::string name = UTILITY::Clean_name(tensorName);
208 auto itr = fInitializedTensors.find(name);
209 if (itr == fInitializedTensors.end()) return false;
210 return itr->second.IsConstantTensor();
211}
212
213bool RModel::IsDynamicTensor(const std::string& tensorName) const {
214 std::string name = UTILITY::Clean_name(tensorName);
215 return fDynamicTensorInfos.find(name) != fDynamicTensorInfos.end();
216}
217bool RModel::IsDimInputTensor(const std::string& tensorName) const {
218 std::string name = UTILITY::Clean_name(tensorName);
219 return fInputTensorInfos.find(name) != fInputTensorInfos.end();
220}
221bool RModel::IsReadyInputTensor(const std::string& tensorName) const {
222 std::string name = UTILITY::Clean_name(tensorName);
224}
225
226// generic addition of a tensor
227void RModel::AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<Dim> dim_shape) {
229 if (!int_shape.empty())
230 AddIntermediateTensor(tensor_name, type, int_shape);
231 else
232 AddDynamicTensor(tensor_name, type, dim_shape);
233}
234
235void RModel::AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape) {
236 tensor_name = UTILITY::Clean_name(tensor_name);
237 if (CheckIfTensorAlreadyExist(tensor_name)) {
238 throw std::runtime_error("TMVA-SOFIE: intermediate tensor with name " + tensor_name + " already exists \n");
239 }
240 TensorInfo new_tensor {type, shape};
242}
243
244void RModel::AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector<Dim> shape){
245 tensor_name = UTILITY::Clean_name(tensor_name);
246 if (CheckIfTensorAlreadyExist(tensor_name)){
247 throw std::runtime_error("TMVA-SOFIE: intermediate tensor with name " + tensor_name + " already exists \n");
248 }
250 fDynamicTensorInfos[tensor_name] = new_tensor;
251 // store shape parameter if not existing
252 for (auto &d : shape) {
253 if (d.isParam) {
254 if (fShapeParams.count(d.param) == 0) {
255 // case parameter is an expression of some other existing parameter, no need to
256 // register it
257 if (d.dim != size_t(-1)) {
258 fShapeParams[d.param] = std::to_string(d.dim);
259 }
260 }
261 }
262 }
263}
264
266 fOutputTensorNames.clear();
267 for(auto& it : outputtensornames) {
268 fOutputTensorNames.emplace_back(UTILITY::Clean_name(it));
269 }
270}
271
272void RModel::UpdateOutputTensorList(std::vector<std::string> curr_output_tensors, std::vector<std::string> new_output_tensors) {
273 for(auto& it:curr_output_tensors) {
274 fOutputTensorNames.erase(std::remove(fOutputTensorNames.begin(), fOutputTensorNames.end(), it), fOutputTensorNames.end());
275 }
277}
278
279void RModel::UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
280 tensor_name = UTILITY::Clean_name(tensor_name);
281 if (!CheckIfTensorAlreadyExist(tensor_name)) {
282 throw std::runtime_error("TMVA-SOFIE: tensor " + tensor_name + " not found when trying to update it");
283 }
285 fInitializedTensors[tensor_name] = new_tensor;
286}
287
288std::shared_ptr<void> RModel::GetInitializedTensorData(std::string tensor_name) {
289 auto f = fInitializedTensors.find(tensor_name);
290 if (f == fInitializedTensors.end()) {
291 throw std::runtime_error("TMVA-SOFIE: tensor " + tensor_name + " not found when trying to get its data");
292 } else {
293 return f->second.sharedptr();
294 }
295}
296
297void RModel::SetNotWritableInitializedTensor(const std::string & tensor_name) {
298 auto t = fInitializedTensors.find(tensor_name);
299 if (t == fInitializedTensors.end()) {
300 throw std::runtime_error("TMVA-SOFIE: initialized tensor " + tensor_name + " not found when trying to get its info");
301 }
302 t->second.SetNotWritable();
303 }
304
305std::string RModel:: AllocateIntermediateMemory(std::span<const std::string_view> op_output_tensors) {
306
307 std::string memory_allocation_string = "";
308 bool allocated;
309
310 for (auto& it : op_output_tensors) {
311 allocated = false;
312 if (GetTensorType(std::string(it)) == ETensorType::BOOL ||
313 fInitializedTensors.find(std::string(it)) != fInitializedTensors.end() ||
314 fDynamicTensorInfos.find(std::string(it)) != fDynamicTensorInfos.end()) continue;
315
316 auto tensor_size = GetTypeSize(GetTensorType(std::string(it))) * ConvertShapeToLength(GetTensorShape(std::string(it)));
317 memory_allocation_string += "\n // Allocating memory for intermediate tensor " + std::string(it) + " with size " + std::to_string(tensor_size) + " bytes";
318
320
321 // check if available memory chunks can accommodate the tensor
322 if (chunk->second >= tensor_size) {
323 auto new_chunk = fIntermediateMemoryInfo.total_stack[chunk->first].split(it, tensor_size);
324 auto new_chunk_location = chunk->first+chunk->second-tensor_size;
326
328 "* tensor_" + std::string(it) +
329 " = reinterpret_cast<"+ConvertTypeToString(GetTensorType(std::string(it)))+"*>(fIntermediateMemoryPool + " + std::to_string(new_chunk_location) + ");\n";
330 chunk->second -= tensor_size;
331
332 allocated = true;
333
334 if (chunk->second == 0) {
336 }
337
338 break;
339 }
340 ++chunk;
341 }
342
343 if (!allocated) {
345 ? 0
346 : fIntermediateMemoryInfo.total_stack.rbegin()->first + fIntermediateMemoryInfo.total_stack.rbegin()->second.tensor_size;
347
349 {
350 it,
351 tensor_size
352 };
353
354 memory_allocation_string += "\n"+ConvertTypeToString(GetTensorType(std::string(it)))+"* tensor_"+ std::string(it) + "= reinterpret_cast<"+ConvertTypeToString(GetTensorType(std::string(it)))+"*>(fIntermediateMemoryPool + " + std::to_string(chunk_idx) + ");\n";
355 }
356 }
358}
359
360void RModel::CheckAndFlushIntermediateMemory(std::span<const std::string_view> op_input_tensors, const size_t& op_idx){
361 for (auto &it : op_input_tensors){
362 // last occurence of the tensor is reached => flush it from memory
364 for (auto chunk = fIntermediateMemoryInfo.total_stack.begin();
366 if (chunk->second.tensor_name == it) {
367
368 // check if nearby chunks in available memory can coalesce
369 auto first_greater = fIntermediateMemoryInfo.available_stack.upper_bound(chunk->first); // smallest element greater than the flushed chunk idx
370 auto last_smaller = (first_greater == fIntermediateMemoryInfo.available_stack.begin()) ? fIntermediateMemoryInfo.available_stack.end() : std::prev(first_greater); // largest element smaller than the flushed chunk idx
371
372 // check if the next stack entry is actually adjacent in memory
373 if (last_smaller->first+last_smaller->second + 1 == chunk->first){
374 last_smaller->second += chunk->second.tensor_size;
376
377 if (last_smaller->first + last_smaller->second + 1 == first_greater->first){
380 }
381 } else{
382 if (chunk->first + chunk->second.tensor_size + 1 == first_greater->first){
385 }
387 chunk->first,
388 chunk->second.tensor_size
389 });
390 }
391 }
392 }
393 }
394 }
395}
396
397
398
399void RModel::Initialize(int batchSize, bool verbose) {
400 std::map<std::string, size_t> inputParams;
401 if (batchSize > 0) {
402 inputParams["input_size"] = batchSize;
403 inputParams["batch_size"] = batchSize;
404 inputParams["bs"] = batchSize;
405 }
406 Initialize(inputParams, verbose);
408}
409void RModel::Initialize(const std::map<std::string, size_t> & inputParams, bool verbose) {
410
411 fVerbose = int(verbose);
412
413 if (fIsInitialized) {
414 if (verbose)
415 std::cout << "Model is already initialized - skip initialization " << std::endl;
416 return;
417 }
419 fDynamicTensorInfos.clear();
420
421 // loop on inputs and see if shape can be full specified
422 // if the batch size is provided it can be used to specify the full shape
423 // Add the full specified tensors in fReadyInputTensors collection
424 auto originalInputTensorInfos = fInputTensorInfos; // need to copy because we may delete elements
425 for (auto &input : originalInputTensorInfos) {
426 if (verbose) std::cout << "looking at the tensor " << input.first << std::endl;
427 // if a parameter (e.g. batch_size) is specified use for converting parametric shape in defined one
428 if (!inputParams.empty()) {
429 for (auto &d : input.second.shape) {
430 if (d.isParam) {
431 std::string pname = d.param;
432 if (pname == input.first + "_size") pname = "input_size";
433 auto itr = inputParams.find(pname);
434 if (itr != inputParams.end() ) {
435 d = Dim{ itr->second };
436 if (verbose)
437 std::cout << "Tensor: " << input.first << " - fix parametric shape " << itr->first << " to " << itr->second << std::endl;
438 }
439 }
440 }
441 }
442 // see if shape now is fully defined
443 auto shape = ConvertShapeToInt(input.second.shape);
444 if (verbose)
445 std::cout << "converting input shape for " << input.first << " " << ConvertShapeToString(shape) << " from "
446 << ConvertDynamicShapeToString(input.second.shape) << std::endl;
447 if (!shape.empty()) {
448 // case shape is defined (not parametric) we add the tensor in the fReadyInputTensorInfos map and
449 // we remove the tensor from the fInputTensorInfo where th eold parametric shape was stored
450 fInputTensorInfos.erase(input.first);
451 // add to the ready input tensor information the new fixed shape
452 AddInputTensorInfo(input.first, input.second.type, shape);
453 // check consistency
455 }
456 // store the parameters of the input tensors
457 else {
458 // store the found parametric shape parameters
459 for (auto &d : input.second.shape) {
460 if (d.isParam)
461 fShapeParams[d.param] = std::to_string(d.dim);
462 }
463 }
464 }
465
466 if (verbose) {
469 }
470
471 // check if there are initialized tensors to write in a weight file
472 // support for the time being only weight of FLOAT type
473 if (fUseWeightFile) {
474 bool modelHasWeights = false;
475 for (auto &i : fInitializedTensors) {
476 if (i.second.type() == ETensorType::FLOAT) {
477 modelHasWeights = true;
478 break;
479 }
480 }
481 if (!modelHasWeights)
482 fUseWeightFile = false;
483 }
484 // Go through model and initialize each operator
485 int i = 0;
486
487 std::vector<size_t> temp_available_stack; // vector stores individual chunks of available memory that maybe reused
488
489 for(size_t op_idx = 0; op_idx < fOperators.size(); ++op_idx){
490 if (verbose) {
491 auto& r = *fOperators[op_idx].get();
492 std::cout << "Initializing operator " << i << " " << typeid(r).name() << std::endl;
493 }
494 fOperators[op_idx]->Initialize(*this);
495 for(auto &it:fOperators[op_idx]->GetOpOutputTensors()){
497 std::find(fOutputTensorNames.begin(), fOutputTensorNames.end(), std::string(it)) == fOutputTensorNames.end() &&
498 fInitializedTensors.find(std::string(it)) == fInitializedTensors.end() &&
499 fDynamicTensorInfos.find(std::string(it)) == fDynamicTensorInfos.end()){
501 }
502 }
503 i++;
504 }
505
506 fIsInitialized = true;
507}
508
509void RModel::InitializeSubGraph(std::shared_ptr<RModel> graph) {
510 // add the subgraph to the list
511 fSubGraphs.push_back(graph);
512 //this needs to be done before initializing
513 graph->fParentGraph = this;
514 graph->fIsSubGraph = true;
515
516 graph->Initialize(fBatchSize, fVerbose);
517 // set the same options as parent model
518 graph->fWeightFile = fWeightFile;
519 graph->fUseWeightFile = fUseWeightFile;
520 graph->fUseSession = fUseSession;
521 // add needed blas routines and libs
522 std::vector<std::string> blasRoutines;
523 for (auto & e : graph->fNeededBlasRoutines)
524 blasRoutines.push_back(e);
526 for (auto e : graph->fNeededStdLib)
528
529 // add parent input tensors to current graph
530 for (auto & name : fInputTensorNames)
531 graph->fInputTensorNames.emplace_back(name);
532
533 // clean graph name
534 graph->fName = UTILITY::Clean_name(graph->fName);
535
536}
537
538// Function to generate the code for declaring and initializing constant tensors
539// This is for tensors which are not part of weight files and can be created from the Constant operator
540template <typename T>
541std::string GenerateConstantTensorCode(const std::pair<std::string, InitializedTensor> &t)
542{
543 std::stringstream strs;
544 std::string type = ConvertTypeToString(t.second.type());
545 size_t length = ConvertShapeToLength(t.second.shape());
546 // avoid using stack sizes for constant tensors to reduce compilation time
547 bool allocateOnStack = (length > 100) ? false : true;
548
549 const T *data = t.second.data<T>();
550
551 // and check if all values are the same
552 bool sameData = false;
553 // for non stack allocation check if data are the same
554 if (!allocateOnStack && length > 1) {
555 size_t idx = 1;
556 do {
557 sameData = (data[idx] == data[idx - 1]);
558 idx++;
559 } while (sameData && idx < length);
560 }
561 if (allocateOnStack) {
562 strs << type << " tensor_" << t.first << "[" << length << "] = " << ConvertValuesToString(length, data) << ";\n";
563 } else {
564 strs << "std::vector<" << type << "> fTensor_" << t.first << " = ";
565 if (sameData)
566 strs << "std::vector<" << type << ">(" << length << ", " << ConvertValToString(data[0]) << ");\n";
567 else {
569 }
570 strs << "const " << type << " * tensor_" + t.first + " = fTensor_" + t.first + ".data();\n";
571 }
572 return strs.str();
573}
574
576{
577 if (!fInitializedTensors.empty())
578 fGC += "// initialized tensors\n";
579
580 for (auto &i : fInitializedTensors) {
581 if (!fUseWeightFile || i.second.IsConstantTensor()) {
582 if (i.second.type() == ETensorType::FLOAT)
584 else if (i.second.type() == ETensorType::INT64)
586
587 } else {
588 // case of tensors which are read from a file
589 size_t length = ConvertShapeToLength(i.second.shape());
590 if (i.second.type() == ETensorType::FLOAT) {
591 fGC += "std::vector<float> fTensor_" + i.first + " = std::vector<float>(" + std::to_string(length) + ");\n";
592 fGC += "float * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
593 }
594 }
595 }
596}
597
599 if (fIntermediateMemoryInfo.total_stack.size() == 0) return;
600 fGC += "\n//--- Allocating session memory pool to be used for allocating intermediate tensors\n";
601
602 // char memory block is allocated since char takes 1 byte, thus easier to allocate tensors
603 // of other data types
604 fGC += "char* fIntermediateMemoryPool = new char[" + std::to_string(fIntermediateMemoryInfo.total_stack.rbegin()->first + fIntermediateMemoryInfo.total_stack.rbegin()->second.tensor_size)+ "];\n\n";
605}
606
608 if (!fIntermediateTensorInfos.empty()) {
609 std::string tensor_declaration_block = "";
610
611 for (auto &i : fIntermediateTensorInfos) {
612 if (i.second.type == ETensorType::BOOL) {
613 tensor_declaration_block += "std::vector<bool> fTensor_" + i.first + " = std::vector<bool>(" + std::to_string(ConvertShapeToLength(i.second.shape)) + ");\n";
614 // No pointer allocation needed for BOOL
615 }
616 if (fIntermediateTensorFrequencyLookup.find(i.first) == fIntermediateTensorFrequencyLookup.end() && std::find(fOutputTensorNames.begin(), fOutputTensorNames.end(), i.first) == fOutputTensorNames.end()) {
617 size_t length = ConvertShapeToLength(i.second.shape);
618
619 if (i.second.type == ETensorType::FLOAT) {
620 tensor_declaration_block += "std::vector<float> fTensor_" + i.first + " = std::vector<float>(" + std::to_string(length) + ");\n";
621 tensor_declaration_block += "float * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
622 }
623 else if (i.second.type == ETensorType::DOUBLE) {
624 tensor_declaration_block += "std::vector<double> fTensor_" + i.first + " = std::vector<double>(" + std::to_string(length) + ");\n";
625 tensor_declaration_block += "double * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
626 }
627 else if (i.second.type == ETensorType::INT64) {
628 tensor_declaration_block += "std::vector<int64_t> fTensor_" + i.first + " = std::vector<int64_t>(" + std::to_string(length) + ");\n";
629 tensor_declaration_block += "int64_t * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
630 }
631 }
632 }
633
634 if (tensor_declaration_block.length()) {
635 fGC += "\n//--- declare and allocate the intermediate tensors\n" + tensor_declaration_block;
636 }
637 }
638 // add also the dynamic tensors (only declarations, allocation will be done later)
639 if (!fDynamicTensorInfos.empty()) {
640 fGC += "//--- declare the dynamic tensors\n";
641 for (auto &i : fDynamicTensorInfos) {
642 if (i.second.type == ETensorType::FLOAT) {
643 fGC += "std::vector<float> fTensor_" + i.first + ";\n";
644 fGC += "float * tensor_" + i.first + " = nullptr;\n";
645 } else if (i.second.type == ETensorType::DOUBLE) {
646 fGC += "std::vector<double> fTensor_" + i.first + ";\n";
647 fGC += "double * tensor_" + i.first + " = nullptr;\n";
648 } else if (i.second.type == ETensorType::INT64) {
649 fGC += "std::vector<int64_t> fTensor_" + i.first + ";\n";
650 fGC += "int64_t * tensor_" + i.first + " = nullptr;\n";
651 }
652 }
653 }
654}
655
656// generate code for specific operator declarations to be defined in the Session class
658 std::string strcode;
659 for (auto & op : fOperators) {
660 strcode += op->GenerateDeclCode();
661 }
662 if (strcode.empty()) return;
663 fGC += "\n//---- operator declarations \n";
664 fGC += strcode;
665 fGC += "\n";
666}
667
669 fGC += "//---- allocate the intermediate dynamic tensors\n";
670 std::stringstream out;
671 for (auto & i: fDynamicTensorInfos) {
672 auto length = ConvertDynamicShapeToLength(i.second.shape);
673 out << SP << "if (" << length << " > 0) {\n";
674 out << SP << SP << "fTensor_" << i.first << ".resize(" << length << ");\n";
675 out << SP << SP << "tensor_" << i.first << " = fTensor_" << i.first << ".data();\n";
676 out << SP << "}\n";
677 }
678 fGC += out.str();
679}
680
682 // generate the infer signature given the inputs: eg. "float * tensor1, float * tensor2"
683 // if (decl = false) generate only calling signature (tensor1,tensor2,....)
684 std::string rGC;
685 std::unordered_map<std::string, int> inputParams;
686 int i_input = 0;
687 for (auto &name : fInputTensorNames) {
688 // if is a dynamic tensor pass initial parameters
689 if (IsDimInputTensor(name)) {
690 auto shape = GetDynamicTensorShape(name);
691 for (auto &d : shape) {
692 std::string pName = d.param;
693 // need to check if the input parameters is already existing in another input tensor
694 if (d.isParam && inputParams.count(pName) == 0) {
695 if (isdecl) rGC += "size_t ";
696 rGC += d.param + ",";
698 }
699 }
700 }
701 if (isdecl) {
703 if (type == "other")
704 throw std::runtime_error("TMVA-SOFIE: input tensor " + name +
705 " is of a data type which is not yet supported.");
706 rGC += type + "* ";
707 }
708 rGC += "tensor_" + name + ",";
709 i_input++;
710 }
711
712 if (fInputTensorNames.size() > 0) rGC.pop_back();// remove last ","
713 return rGC;
714}
715
717
718 if (fVerbose)
719 std::cout << "Generating main inference code for " << fName << std::endl;
720
721 size_t outputSize = fOutputTensorNames.size();
722 // assume output types are all the same
723 if (outputSize == 0)
724 throw std::runtime_error("TMVA-SOFIE: output size=0 are not supported");
725
726 std::string outputType;
727 bool sameOutputTypes = true;
728 std::string inferReturnType; // type return by infer function
732 fGC += "\n\n";
733 if (outputSize == 1) {
734 fGC += "std::vector<" + outputType + ">";
735 } else {
736 // if all output types are the same we return an std::vector - otherwise a tuple
737 for (size_t i = 1; i < outputSize; i++) {
739 sameOutputTypes = false;
740 }
741 if (sameOutputTypes)
742 fGC += "std::vector<std::vector<" + outputType + ">>";
743 else {
744 inferReturnType = "std::tuple<";
745 for (size_t i = 0; i < outputSize; i++) {
747 if (i < outputSize-1) inferReturnType += ",";
748 }
749 inferReturnType += ">";
751 }
752 }
753
754 fGC += " infer(";
755
757
758 fGC += "){\n";
759
760 for (size_t op_idx = 0; op_idx < fOperators.size(); ++op_idx) {
761 if (fVerbose) std::cout << "Generating code for operator .... " << op_idx << std::endl;
762 fGC += (fOperators[op_idx]->Generate(std::to_string(op_idx)));
763 }
764
765 if (outputSize == 1) {
766 std::string tensorName = fOutputTensorNames[0];
767 if (fIntermediateTensorInfos.count(tensorName) > 0) {
768
770 fGC += SP + "return fTensor_" + tensorName + ";\n";
771 } else {
772 // need to check is size is the same(don't want to return a vector with larger size)
773 // in that case better to copy
774 fGC += SP + "std::vector<"+ ConvertTypeToString(GetTensorType(std::string(tensorName))) +"> ret(tensor_"+tensorName+", tensor_"+tensorName+" + " + ConvertShapeToLength(GetTensorShape(tensorName)) + ");\n";
775 fGC += SP + "return ret;\n";
776 }
777 } else {
778 // include also dynamic tensors since the vectors can be allocated with a size larger than their output
779 // we need a special handling for bool type allocated as vector<bool>
782 fGC += SP + "std::vector<bool> ret (fTensor_" + tensorName + ".begin(), fTensor_" + tensorName +
783 ".begin() + " + outputLength + ");\n";
784 } else {
785 fGC += SP + "std::vector<" + outputType + "> ret (tensor_" + tensorName + ", tensor_" + tensorName + " + " +
786 outputLength + ");\n";
787 }
788 fGC += SP + "return ret;\n";
789 }
790 } else {
791 // here we assume all outputs have same type
792 if (sameOutputTypes)
793 fGC += SP + "std::vector<std::vector<" + outputType + ">> ret({";
794 else
795 fGC += SP + inferReturnType + " ret({";
796 for (size_t i = 0; i < outputSize; i++) {
797 std::string tensorName = *(fOutputTensorNames.begin() + i);
798 if (!tensorName.empty()) {
799
800 if (fIntermediateTensorInfos.count(tensorName) > 0) {
801 fGC += SP + "std::vector<"+ ConvertTypeToString(GetTensorType(std::string(tensorName))) +">(tensor_"+tensorName+", tensor_"+tensorName+" + " + ConvertShapeToLength(GetTensorShape(tensorName)) + ")";
802 } else {
805 fGC += "std::vector<bool>(fTensor_" + tensorName + ".begin(), fTensor_" + tensorName + ".begin() + " +
806 outputLength + ");\n";
807 } else {
808 fGC += "std::vector<" + outputType + ">(tensor_" + tensorName + ", tensor_" + tensorName + " + " +
809 outputLength + ")";
810 }
811 }
812 if (i < outputSize - 1)
813 fGC += ",";
814 } else {
815 fGC += "{}";
816 }
817 }
818 fGC += "});\n";
819 fGC += SP + "return ret;\n";
820 }
821 fGC += "}\n"; // end of infer function scope
822}
823
825{
826
827 // define the Session struct (for GNN this is generated in RModel_GNN)
829 if (!fIsSubGraph)
830 fGC += "struct Session {\n";
831 else
832 fGC += "struct Session_" + fName + " {\n";
833 }
834
835 // generate code for declaring the initialized tensors
837
838 // evaluate total intermediate memory and position intermediate tensor addresses
839 std::string intermediate_memory_alloc_string = "";
840 intermediate_memory_alloc_string += "\n// --- Positioning intermediate tensor memory --";
841 for (size_t op_idx = 0; op_idx < fOperators.size(); ++op_idx) {
844 }
845
846 // to check remaining unused fragments after memory allocation (lesser the better)
847 // for (const auto &it: fIntermediateMemoryInfo.available_stack){
848 // std::cout<<"chunk_idx: "<<it.first<<", chunk_size: "<<it.second<<"\n";
849 // }
850
851 // generate the memory pool to be used by intermediate tensors
853
854 // position intermediate tensors
856
857 // generate the declaring the intermediate tensors
859 // generate code for declarations of some specific operators
861
862
863
864 // add subgraph session
865 if (!fSubGraphs.empty()) fGC += "// subgraph sessions\n";
866 for (auto & graph : fSubGraphs) {
867 fGC += "Session_" + graph->fName + " fSession_" + graph->fName + ";\n";
868 }
869
870 // Generate code for Session constructor
871 if (fUseSession) {
872 std::string sessionName = "Session";
873 if (fIsSubGraph)
874 sessionName += "_" + fName;
875 // add here specific operator code that needs to define session data members
876 fGC += "\n";
877 for (size_t id = 0; id < fOperators.size(); id++) {
878 std::string opName = std::to_string(id);
879 fGC += fOperators[id]->GenerateSessionMembersCode(opName);
880 }
881 fGC += "\n";
882 // here add initialization and reading of weight tensors
883 if (fUseWeightFile) {
884 std::string fileName = fName;
886 fileName += ".dat";
887 }
889 fileName += ".root";
890 }
891 fGC += sessionName + "(std::string filename =\"" + fileName + "\"";
892 } else {
893 // no need to pass weight file since it is not used
894 // keep passing a string for compatibility
895 fGC += sessionName + "(std::string = \"\"";
896 }
897 // add initialization of shape parameters
898 // assume all parameters are of type size_t
899 if (!fShapeParams.empty()) {
900 for (auto &p : fShapeParams) {
901 fGC += ",\n";
902 fGC += " size_t " + p.first + " = " + p.second;
903 }
904 }
905 fGC += ") {\n";
906
907 if (fUseWeightFile) {
908 fGC += "\n//--- reading weights from file\n";
910 fGC += "\n";
911 // fUseWeightFile = fUseWeightFile;
912 }
913
914 // now we have passed the parameters we can allocate the dynamic tensors
916
917 // add here initialization code for operator
918 for (size_t id = 0; id < fOperators.size(); id++) {
919 fGC += fOperators[id]->GenerateInitCode();
920 }
921
922 fGC += "}\n\n";
923 }
924 // generate the inference code
926
927 // end of session
929 fGC += "}; // end of Session\n";
930 }
931}
932
933void RModel::Generate(std::underlying_type_t<Options> options, int batchSize, long pos, bool verbose)
934{
935 fVerbose = verbose;
936 fBatchSize = batchSize;
937 fReadPos = pos;
938
939 // session flag is used in operator initialize
940 if (static_cast<std::underlying_type_t<Options>>(Options::kNoSession) & options) {
941 fUseSession = false;
943 }
944 if (static_cast<std::underlying_type_t<Options>>(Options::kNoWeightFile) & options) {
945 fUseWeightFile = false;
947 }
948 if (static_cast<std::underlying_type_t<Options>>(Options::kRootBinaryWeightFile) & options) {
949 fUseWeightFile = true;
951 }
952 if (fUseWeightFile && !fUseSession) {
953 throw std::runtime_error(
954 "TMVA-SOFIE: RModel::Generate: cannot use a separate weight file without generating a Session class");
955 }
956
957 if (static_cast<std::underlying_type_t<Options>>(Options::kGNN) & options)
958 fIsGNN = true;
959 if (static_cast<std::underlying_type_t<Options>>(Options::kGNNComponent) & options)
960 fIsGNNComponent = true;
961
962 // initialize the model including all operators and sub-graphs
963 Initialize(batchSize, verbose);
964
965 std::string hgname;
966 if (!fIsGNNComponent && !fIsSubGraph) {
967 fGC.clear();
969 }
970
971 // generate first code for the subgraphs
972 for (auto &graph : fSubGraphs) {
973 if (fVerbose)
974 std::cout << "generate session code for subgraph " << graph->fName << std::endl;
975 graph->GenerateSessionCode();
976 fGC += graph->fGC;
977 }
978
979 if (fVerbose)
980 std::cout << "generate Main session code - model " << fName << std::endl;
981
982 // generate main session code
984
985 if (!fIsGNNComponent && !fIsSubGraph) {
986 fGC += ("} //TMVA_SOFIE_" + fName + "\n");
987 fGC += "\n#endif // " + hgname + "\n";
988 }
989}
990
992 // generate the code to read initialized tensors from a text data file
994 if (fInitializedTensors.empty()) return;
995
996 fGC += " std::ifstream f;\n";
997 fGC += " f.open(filename);\n";
998 fGC += " if (!f.is_open()) {\n";
999 fGC += " throw std::runtime_error(\"tmva-sofie failed to open file \" + filename + \" for input weights\");\n";
1000 fGC += " }\n";
1001
1002 if(fIsGNNComponent) {
1003 fGC += " f.seekg(" + std::to_string(pos) + ");\n";
1004 }
1005
1006 fGC += " std::string tensor_name;\n";
1007 fGC += " size_t length;\n";
1008
1009 // loop on tensors and parse the file
1010 for (auto& i: fInitializedTensors) {
1011 // skip Constant and shape tensors (not written in a file)
1012 if (!i.second.IsWeightTensor()) continue;
1013 std::string tensor_name = "tensor_" + i.first;
1014 if (i.second.type() == ETensorType::FLOAT) {
1015 size_t length = 1;
1016 length = ConvertShapeToLength(i.second.shape());
1017 std::string slength = std::to_string(length);
1018 fGC += " f >> tensor_name >> length;\n";
1019 fGC += " if (tensor_name != \"" + tensor_name + "\" ) {\n";
1020 fGC += " std::string err_msg = \"TMVA-SOFIE failed to read the correct tensor name; expected name is " +
1021 tensor_name + " , read \" + tensor_name;\n";
1022 fGC += " throw std::runtime_error(err_msg);\n";
1023 fGC += " }\n";
1024 fGC += " if (length != " + slength + ") {\n";
1025 fGC += " std::string err_msg = \"TMVA-SOFIE failed to read the correct tensor size; expected size is " +
1026 slength + " , read \" + std::to_string(length) ;\n";
1027 fGC += " throw std::runtime_error(err_msg);\n";
1028 fGC += " }\n";
1029 fGC += " for (size_t i = 0; i < length; ++i)\n";
1030 fGC += " f >> " + tensor_name + "[i];\n";
1031 fGC += " if (f.fail()) {\n";
1032 fGC += " throw std::runtime_error(\"TMVA-SOFIE failed to read the values for tensor " + tensor_name + "\");\n";
1033 fGC += " }\n";
1034 } else {
1035 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be read from a file");
1036 }
1037 }
1038 fGC += " f.close();\n";
1039 }
1040
1041 // generate the code to read initialized tensors from a ROOT data file
1043 fGC += " {\n";
1044 fGC += " std::unique_ptr<TFile> rootFile(TFile::Open(filename.c_str(), \"READ\"));\n";
1045 fGC += " if (!rootFile->IsOpen()) {\n";
1046 fGC += " throw std::runtime_error(\"tmva-sofie failed to open ROOT file for input weights\");\n";
1047 fGC += " }\n";
1048
1049 std::string dirName = fName + "_weights";
1050 fGC += " if (!rootFile->GetKey(\"" + dirName + "\")) {\n";
1051 fGC += " throw std::runtime_error(\"tmva-sofie failed to open ROOT directory for input weights\");\n";
1052 fGC += " }\n";
1053
1054 for (auto &i : fInitializedTensors) {
1055 // skip Constant and shape tensors
1056 if (!i.second.IsWeightTensor()) continue;
1057 fGC += " {\n";
1058 std::string tensor_name = "tensor_" + i.first;
1059 if (i.second.type() == ETensorType::FLOAT) {
1060 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<float>*>(rootFile->Get(\"";
1061 fGC += dirName + "/" + tensor_name + "\"));\n";
1062 } else if (i.second.type() == ETensorType::DOUBLE) {
1063 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<double>*>(rootFile->Get(\"";
1064 fGC += dirName + + "/" + tensor_name + "\"));\n";
1065 } else if (i.second.type() == ETensorType::INT64) {
1066 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<int64_t>*>(rootFile->Get(\"";
1067 fGC += dirName + "/" + tensor_name + "\"));\n";
1068 } else {
1069 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be read from a ROOT file");
1070 }
1071 fGC += " }\n";
1072 }
1073 fGC += " }\n";
1074 }
1075}
1076
1078 // Determine the file extension based on the weight file type
1079 std::string fileExtension;
1080 switch (fWeightFile) {
1082 fileExtension = ".dat";
1083 break;
1085 fileExtension = ".root";
1086 break;
1088 fileExtension = ".dat";
1089 break;
1090 }
1091
1092 // If filename is empty, use the model name as the base filename
1093 if (filename.empty()) {
1095 }
1096
1097 // Write the initialized tensors to the file
1099 if(fIsGNNComponent || fIsGNN) {
1100 throw std::runtime_error("SOFIE-GNN yet not supports writing to a ROOT file.");
1101 }
1102 std::unique_ptr<TFile> outputFile(TFile::Open(filename.c_str(), "UPDATE"));
1103
1104 std::string dirName = fName + "_weights";
1105 // check if directory exists, in case delete to replace with new one
1106 if (outputFile->GetKey(dirName.c_str()))
1107 outputFile->rmdir(dirName.c_str());
1108
1109 auto outputDir = outputFile->mkdir(dirName.c_str());
1110
1111 for (const auto& item : fInitializedTensors) {
1112 // skip Constant tensors and tensors which are not writable (e.g. shape tensors)
1113 if (!item.second.IsWeightTensor()) continue;
1114 std::string tensorName = "tensor_" + item.first;
1115 size_t length = 1;
1116 length = ConvertShapeToLength(item.second.shape());
1117 if(item.second.type() == ETensorType::FLOAT) {
1118 const float* data = item.second.data<float>();
1119 std::vector<float> tensorDataVector(data, data + length);
1120 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<float>", tensorName.c_str());
1121 }
1122 else if(item.second.type() == ETensorType::DOUBLE) {
1123 const double* data = item.second.data<double>();
1124 std::vector<double> tensorDataVector(data, data + length);
1125 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<double>", tensorName.c_str());
1126 }
1127 else if(item.second.type() == ETensorType::INT64) {
1128 const int64_t* data = item.second.data<int64_t>();
1129 std::vector<int64_t> tensorDataVector(data, data + length);
1130 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<int64_t>", tensorName.c_str());
1131 }
1132 else {
1133 std::runtime_error("tmva-sofie tensor " + tensorName + " with type " + ConvertTypeToString(item.second.type()) +
1134 " cannot be written to a ROOT file");
1135 }
1136 }
1137 outputFile->Write(filename.c_str());
1138
1139 // this needs to be changed, similar to the text file
1140 return -1;
1141
1142 } else if (fWeightFile == WeightFileType::Text) {
1143 std::ofstream f;
1144 if(fIsGNNComponent) {
1145 // appending all GNN components into the same file
1146 f.open(filename, std::ios::app);
1147 } else {
1148 f.open(filename);
1149 }
1150 if (!f.is_open())
1151 throw
1152 std::runtime_error("tmva-sofie failed to open file " + filename + " for tensor weight data");
1153 for (auto& i: fInitializedTensors) {
1154 // skip Constant tensors and not writable tensors (e.g. shape tensors)
1155 if (!i.second.IsWeightTensor()) {
1156 continue;
1157 }
1158 size_t length = ConvertShapeToLength(i.second.shape());
1159 std::string tensor_name = "tensor_" + i.first;
1160 f << tensor_name << " " << length << "\n";
1161 if (i.second.type() == ETensorType::FLOAT) {
1162 const float * data = i.second.data<float>();
1163 for (size_t idx = 0; idx < length; idx++) {
1164 // round to zero sub-normal values
1165 float value = data[idx];
1166 if (value != 0. && std::abs(value) < std::numeric_limits<float>::min() ) value = 0;
1167 f << std::setprecision(std::numeric_limits<float>::max_digits10) << value;
1168 f << ( (idx < length-1) ? " " : "\n" );
1169 }
1170 }
1171 else {
1172 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be written to a file");
1173 }
1174 if (f.fail())
1175 std::runtime_error("tmva-sofie failed to write tensor data to file for " + tensor_name);
1176 }
1177 long curr_pos = f.tellp();
1178 f.close();
1179 return curr_pos;
1180 } else {
1181 return -1;
1182 }
1183}
1184
1186 std::cout << "Model requires following inputs:\n";
1187 for (auto& inputInfo: fInputTensorInfos) {
1188 std::cout << "Parametrised Tensor name: " << inputInfo.first << "\t";
1189 std::cout << "type: " << ConvertTypeToString(inputInfo.second.type) << "\t";
1190 std::cout << "shape: [";
1191 for (size_t i = 0; i < inputInfo.second.shape.size(); i++) {
1192 if (inputInfo.second.shape[i].isParam) {
1193 std::cout << inputInfo.second.shape[i].param;
1194 } else {
1195 std::cout << inputInfo.second.shape[i].dim ;
1196 }
1197 if (i < inputInfo.second.shape.size() - 1) std::cout << ",";
1198 }
1199 std::cout << "]" << std::endl;
1200 }
1201
1202 for (auto& inputInfo: fReadyInputTensorInfos) {
1203 std::cout << "Fully Specified Tensor name: " << inputInfo.first << "\t";
1204 std::cout << "type: " << ConvertTypeToString(inputInfo.second.type) << "\t";
1205 std::cout << "shape: [";
1206 for (size_t i = 0; i < inputInfo.second.shape.size(); i++) {
1207 std::cout << inputInfo.second.shape[i];
1208 if (i < inputInfo.second.shape.size() - 1) std::cout << ",";
1209 }
1210 std::cout << "]" << std::endl;
1211 }
1212 std::cout << "\n";
1213}
1214
1216 std::cout << "Model initialized the following tensors:\n";
1217 for (auto& it: fInitializedTensors) {
1218 std::cout << "Tensor name: \"" << it.first << "\"\t";
1219 std::cout << "type: " << ConvertTypeToString(it.second.type()) << "\t";
1220 std::cout << "shape: [";
1221 for (size_t i = 0; i < it.second.shape().size(); i++) {
1222 std::cout << it.second.shape()[i];
1223 if (i < it.second.shape().size() - 1) std::cout << ",";
1224 }
1225 std::cout << "]";
1226 if (it.second.IsConstantTensor()) std::cout << " (Constant)";
1227 else if (!it.second.IsWeightTensor()) std::cout << " (Not Writable)";
1228 std::cout << std::endl;
1229 }
1230 std::cout << "\n";
1231}
1232
1234 std::cout << "Model specify the following intermediate tensors:\n";
1235 for (auto& it: fIntermediateTensorInfos) {
1236 std::cout << "Tensor name: \"" << it.first << "\"\t";
1237 std::cout << "type: " << ConvertTypeToString(it.second.type) << "\t";
1238 std::cout << "shape: [";
1239 for (size_t i = 0; i < it.second.shape.size(); i++) {
1240 std::cout << it.second.shape[i];
1241 if (i < it.second.shape.size() - 1) std::cout << ",";
1242 }
1243 std::cout << "]" << std::endl;
1244 }
1245 std::cout << "\n";
1246}
1247
1249 std::cout << "Model specify the following dynamic tensors:\n";
1250 for (auto& it: fDynamicTensorInfos) {
1251 std::cout << "Tensor name: \"" << it.first << "\"\t";
1252 std::cout << "type: " << ConvertTypeToString(it.second.type) << "\t";
1253 std::cout << "shape: [";
1254 for (size_t i = 0; i < it.second.shape.size(); i++) {
1255 std::cout << it.second.shape[i].GetVal();
1256 if (i < it.second.shape.size() - 1) std::cout << ",";
1257 }
1258 std::cout << "]" << std::endl;
1259 }
1260 std::cout << "\n";
1261}
1262
1264 std::cout << "Model specify the following output tensors:\n";
1265 for (auto& it: fOutputTensorNames) {
1266 std::cout << "Tensor name: \"" << it << "\"\t";
1267 if (!IsDynamicTensor(it))
1268 std::cout << "shape: " << ConvertShapeToString(GetTensorShape(it)) << std::endl;
1269 else
1270 std::cout << "shape: " << ConvertDynamicShapeToString(GetDynamicTensorShape(it)) << std::endl;
1271 }
1272 std::cout << "\n";
1273}
1274
1276 auto it = fInitializedTensors.find(name);
1277 if (it == fInitializedTensors.end()) {
1278 std::cout << "Tensor " << name << " not found in model's initialized tensor list" << std::endl;
1279 return;
1280 }
1281
1282 std::cout << "Tensor name: " << it->first << "\t";
1283 std::cout << "type: " << ConvertTypeToString(it->second.type()) << "\t";
1284 int length =1;
1285 std::cout << "shape: [";
1286 for (size_t i = 0; i < it->second.shape().size(); i++) {
1287 std::cout << it->second.shape()[i];
1288 length *= it->second.shape()[i];
1289 if (i < it->second.shape().size() - 1) std::cout << ",";
1290 }
1291 std::cout << "]" << std::endl;
1292 bool ellipsis = true;
1293 if (n_print > length) {
1294 n_print = length;
1295 ellipsis = false;
1296 }
1297
1298 std::cout << "data: [" << std::endl;
1299 if (it->second.type() == ETensorType::FLOAT) {
1300 auto converted_data = it->second.data<float>();
1301 for (int i =0; i < n_print; i++) {
1302 std::cout << converted_data[i];
1303 if (i < n_print - 1) std::cout << " ,";
1304 }
1305 }
1306 if (ellipsis) std::cout << ", ...";
1307 std::cout << "]" << std::endl;
1308
1309}
1310
1311void RModel::OutputGenerated(std::string filename, bool append) {
1312
1314
1315 // write weights in a text file
1316 if (fUseWeightFile) {
1317 if (!filename.empty()) {
1318 size_t pos = filename.find(".hxx");
1320 filename.replace(pos, 4, ".dat");
1322 filename = filename.erase(pos, 4);
1323 filename += ".root";
1324 }
1325 } else {
1326 filename = fName;
1327 filename += fWeightFile == WeightFileType::Text ? ".dat" : ".root";
1328 }
1330 }
1331}
1332
1333void RModel::Streamer(TBuffer &R__b) {
1334 if (R__b.IsReading()) {
1335 RModel::Class()->ReadBuffer(R__b, this);
1336 for(auto i=RModel::fInitializedTensors.begin(); i!=RModel::fInitializedTensors.end(); ++i) {
1337 i->second.CastPersistentToShared();
1338 }
1339 }
1340 else {
1341 for(auto i=RModel::fInitializedTensors.begin(); i!=RModel::fInitializedTensors.end(); ++i) {
1342 i->second.CastSharedToPersistent();
1343 }
1344 RModel::Class()->WriteBuffer(R__b, this);
1345 }
1346}
1347
1348}//SOFIE
1349}//Experimental
1350}//TMVA
#define d(i)
Definition RSha256.hxx:102
#define f(i)
Definition RSha256.hxx:104
#define e(i)
Definition RSha256.hxx:103
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char filename
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t r
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize id
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
char name[80]
Definition TGX11.cxx:110
const_iterator begin() const
const_iterator end() const
Buffer base class used for serializing objects.
Definition TBuffer.h:43
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
Definition TFile.cxx:4130
void GenerateHeaderInfo(std::string &hgname)
std::unordered_set< std::string > fNeededBlasRoutines
void OutputGenerated(std::string filename="", bool append=false)
std::unordered_set< std::string > fNeededStdLib
void AddBlasRoutines(std::vector< std::string > routines)
void AddNeededStdLib(std::string libname)
const ETensorType & GetTensorType(std::string name)
Definition RModel.cxx:94
std::unordered_map< std::string, DynamicTensorInfo > fDynamicTensorInfos
Definition RModel.hxx:25
bool IsDynamicTensor(const std::string &name) const
Definition RModel.cxx:213
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
Definition RModel.cxx:227
std::vector< Dim > GetDynamicTensorShape(std::string name)
Definition RModel.cxx:82
std::string GenerateInferSignature(bool isdecl=true)
Definition RModel.cxx:681
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:122
std::vector< std::unique_ptr< ROperator > > fOperators
Definition RModel.hxx:31
void OutputGenerated(std::string filename="", bool append=false)
Definition RModel.cxx:1311
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:132
std::unordered_map< std::string, TensorInfo > fIntermediateTensorInfos
Definition RModel.hxx:24
void AddOutputTensorNameList(std::vector< std::string > output_tensor_names)
Definition RModel.cxx:265
std::unordered_map< std::string, TensorInfo > fReadyInputTensorInfos
Definition RModel.hxx:22
void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:192
void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:244
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:182
RModel & operator=(RModel &&other)
Definition RModel.cxx:39
std::unordered_map< std::string_view, size_t > fIntermediateTensorFrequencyLookup
! lookup table for intermediate tensor frequency (transient)
Definition RModel.hxx:40
void AddInputTensorName(std::string name)
Definition RModel.cxx:151
std::vector< std::string > fOutputTensorNames
Definition RModel.hxx:28
bool IsDimInputTensor(const std::string &name) const
Definition RModel.cxx:217
bool IsInitializedTensor(const std::string &name) const
Definition RModel.cxx:202
void CheckAndFlushIntermediateMemory(std::span< const std::string_view > op_output_tensors, const size_t &op_idx)
Definition RModel.cxx:360
void AddOperator(std::unique_ptr< ROperator > op, int order_execution=-1)
Definition RModel.cxx:155
RModel()=default
Default constructor.
void HeadInitializedTensors(std::string name, int n_print=50)
Definition RModel.cxx:1275
bool IsConstantTensor(const std::string &name) const
Definition RModel.cxx:206
void Initialize(int batchSize=-1, bool verbose=false)
Definition RModel.cxx:399
const std::vector< size_t > & GetTensorShape(std::string name)
Definition RModel.cxx:56
long WriteInitializedTensorsToFile(std::string filename="")
Definition RModel.cxx:1077
void Generate(std::underlying_type_t< Options > options, int batchSize=-1, long pos=0, bool verbose=false)
Definition RModel.cxx:933
std::unordered_map< std::string, InputTensorInfo > fInputTensorInfos
Definition RModel.hxx:21
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
Definition RModel.cxx:288
MemoryPoolInfo fIntermediateMemoryInfo
! intermediate memory info (transient)
Definition RModel.hxx:39
std::string AllocateIntermediateMemory(std::span< const std::string_view > op_output_tensors)
Definition RModel.cxx:305
void InitializeSubGraph(std::shared_ptr< RModel > graph)
Definition RModel.cxx:509
std::unordered_map< std::string, std::string > fShapeParams
Definition RModel.hxx:27
void SetNotWritableInitializedTensor(const std::string &tensor_name)
Definition RModel.cxx:297
std::vector< std::string > fInputTensorNames
Definition RModel.hxx:29
std::unordered_map< std::string, InitializedTensor > fInitializedTensors
Definition RModel.hxx:23
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:279
std::vector< std::shared_ptr< RModel > > fSubGraphs
! sub-graph models (transient)
Definition RModel.hxx:33
bool IsReadyInputTensor(const std::string &name) const
Definition RModel.cxx:221
void UpdateOutputTensorList(std::vector< std::string > curr_output_tensor, std::vector< std::string > modify_output_tensor)
Definition RModel.cxx:272
std::string Clean_name(std::string input_tensor_name)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
constexpr size_t GetTypeSize(ETensorType type)
std::string ConvertValuesToString(size_t n, const T *data)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string GenerateConstantTensorCode(const std::pair< std::string, InitializedTensor > &t)
Definition RModel.cxx:541
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
std::underlying_type_t< Options > operator|(Options opA, Options opB)
Definition RModel.cxx:16
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::string ConvertValToString(T value)
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations
std::map< size_t, TensorMemoryInfo > total_stack
std::map< size_t, size_t > available_stack