7#ifdef SOFIE_SUPPORT_ROOT_BINARY
15namespace Experimental {
19const std::string SP =
" ";
23 return static_cast<std::underlying_type_t<Options>
>(
opA) |
static_cast<std::underlying_type_t<Options>
>(
opB);
26 return opA |
static_cast<std::underlying_type_t<Options>
>(
opB);
32 return f->second.shape;
36 return f2->second.shape();
40 throw std::runtime_error(
"TMVA SOFIE tensor [" +
name +
"] is an input tensor with unspecified dimension parameter");
44 return f4->second.shape;
50 if (f5->second.second)
51 return std::vector<size_t>{};
53 return std::vector<size_t>{f5->second.first.size()};
57 throw std::runtime_error(
"TMVA SOFIE tensor [" +
name +
"] is a dynamic tensor. Use GetDynamicTensorShape instead of GetTensorShape");
62 throw std::runtime_error(
"TMVA SOFIE tensor [" +
name +
"] for which the shape is requested is not found");
67 return f->second.shape;
70 return f->second.shape;
78 return f->second.shape;
81 return f->second.shape;
85 throw std::runtime_error(
"TMVA SOFIE tensor [" +
name +
"] for which the shape is requested is not dynamic");
87 throw std::runtime_error(
"TMVA SOFIE tensor [" +
name +
"] for which the shape is requested is not found");
93 return f->second.type;
97 return f2->second.type();
101 return f3->second.type;
105 return f4->second.type;
109 return f5->second.type;
119 throw std::runtime_error(
"TMVA SOFIE tensor [" +
name +
"] for which the type is requested is not found, model name: " +
fName);
136 throw std::runtime_error(
"TMVA-SOFIE: input tensor with name " +
input_name +
" already exists \n");
146 throw std::runtime_error(
"TMVA-SOFIE: input tensor with name " +
input_name +
" already exists \n");
158 auto libs =
op->GetStdLibs();
187 throw std::runtime_error(
"TMVA-SOFIE: initialized tensor with name " + tensor_name +
" already exists \n");
197 throw std::runtime_error(
"TMVA-SOFIE: constant tensor with name " + tensor_name +
" already exists \n");
206 throw std::runtime_error(
"TMVA-SOFIE: shape tensor with name " + tensor_name +
" already exists \n");
216 throw std::runtime_error(
"TMVA-SOFIE: alias tensor with name " + tensor_name +
" already exists \n");
243 return itr->second.IsConstantTensor();
273 throw std::runtime_error(
"TMVA-SOFIE: intermediate tensor with name " + tensor_name +
" already exists \n");
282 throw std::runtime_error(
"TMVA-SOFIE: intermediate tensor with name " + tensor_name +
" already exists \n");
287 for (
auto &
d : shape) {
289 if (
d.dim !=
size_t(-1)) {
321 throw std::runtime_error(
"TMVA-SOFIE: tensor " + tensor_name +
" not found when trying to update it");
330 throw std::runtime_error(
"TMVA-SOFIE: tensor " + tensor_name +
" not found when trying to get its data");
332 return f->second.sharedptr();
339 throw std::runtime_error(
"TMVA-SOFIE: initialized tensor " + tensor_name +
" not found when trying to get its info");
341 t->second.SetNotWritable();
346 std::stringstream code;
349 std::cout <<
"Total chunks allocated\n";
351 std::cout <<
"..... chunk " <<
chunk->first <<
" size " <<
chunk->second.tensor_size <<
" " <<
chunk->second.tensor_name << std::endl;
357 code <<
"\n // Allocating memory for intermediate tensor " <<
name <<
" with size " <<
size <<
" bytes";
359 << typeName <<
"* tensor_" <<
name <<
" = reinterpret_cast<" << typeName
360 <<
"*>(fIntermediateMemoryPool.data() + " << location <<
");\n";
363 if (
fVerbose) std::cout <<
"*** AllocateIntermediateMemory: Loop on op output tensors\n";
368 auto name = std::string(it);
388 std::string
name = std::string{it.tensor_name};
389 size_t tensor_size = it.tensor_size;
391 std::cout <<
"output tensor " <<
name <<
" size " << tensor_size << std::endl;
396 if (
fVerbose) std::cout <<
".. available chunk " <<
chunk->first <<
" with size = " <<
chunk->second;
398 if (
chunk->second >= tensor_size) {
406 chunk->second -= tensor_size;
412 if (
chunk->second == 0) {
413 if (
fVerbose) std::cout <<
" and deleted since size matches";
416 if (
fVerbose) std::cout << std::endl;
425 if (
fVerbose) std::cout <<
" is extended with a bigger one of size " << tensor_size << std::endl;
429 if (
fVerbose) std::cout << std::endl;
442 if (
fVerbose) std::cout <<
"no chunk available - add in total stack a new chunk with size of tensor and idx : " <<
chunk_idx
450 if (
fVerbose) std::cout <<
"*** CheckAndFlushIntermediateMemory: Loop on input tensors for op " <<
op_idx <<
"\n";
452 if (
fVerbose) std::cout <<
"available chunks before freeing them : \n";
455 if (
fVerbose) std::cout <<
"-- free chunk " <<
chunk->first <<
" size = " <<
chunk->second << std::endl;
459 if (
fVerbose) std::cout <<
".. input tensors : " <<
iv;
466 if (
fVerbose) std::cout <<
" flash condition is met - looping on chunks to find matching one \n";
469 if (
fVerbose) std::cout <<
"--- chunk " <<
chunk->first <<
" , " <<
chunk->second.tensor_name <<
" size " <<
chunk->second.tensor_size;
470 if (
chunk->second.tensor_name == it) {
471 if (
fVerbose) std::cout <<
" -- Found chunk corresponding to input tensor: " <<
chunk->first;
486 if (
fVerbose) std::cout <<
" is adjacent in memory with previous one - merge ";
496 if (
fVerbose) std::cout <<
" merge also with following that is free ";
499 if (
fVerbose) std::cout << std::endl;
504 if (
fVerbose) std::cout <<
" is adjacent in memory with following one - merge \n";
516 if (
fVerbose) std::cout <<
" insert in the available stack the chunk with size " <<
chunk->second.tensor_size << std::endl;
518 chunk->second.tensor_name =
"free";
523 if (
fVerbose) std::cout << std::endl;
544 std::cout <<
"Model is already initialized - skip initialization " << std::endl;
556 if (verbose) std::cout <<
"looking at the tensor " <<
input.first << std::endl;
559 for (
auto &
d :
input.second.shape) {
561 std::string
pname =
d.param;
567 std::cout <<
"Tensor: " <<
input.first <<
" - fix parametric shape " <<
itr->first <<
" to " <<
itr->second << std::endl;
577 if (!shape.empty()) {
589 for (
auto &
d :
input.second.shape) {
613 std::cout <<
"Initializing operator " << i <<
" " <<
typeid(
r).
name() << std::endl;
617 std::string
name = std::string{it};
633 it.second.SetConstant();
640 if (it.second.IsWeightTensor()) {
667 graph->fParentGraph =
this;
668 graph->fIsSubGraph =
true;
677 for (
auto &
e : graph->fNeededBlasRoutines)
680 for (
auto e : graph->fNeededStdLib)
685 graph->fInputTensorNames.emplace_back(
name);
697 std::stringstream
strs;
704 const T *
data = t.second.data<T>();
719 strs <<
"std::vector<" <<
type <<
"> fTensor_" << t.first <<
" = ";
725 strs <<
type <<
" * tensor_" + t.first +
" = fTensor_" + t.first +
".data();\n";
733 fGC +=
"// initialized (weights and constant) tensors\n";
737 if (i.second.IsNotWritable())
continue;
738 if (!
fUseWeightFile || i.second.IsConstantTensor() || !i.second.IsWeightTensor() ) {
751 fGC +=
"std::vector<float> fTensor_" + i.first +
" = std::vector<float>(" + std::to_string(
length) +
");\n";
752 fGC +=
"float * tensor_" + i.first +
" = fTensor_" + i.first +
".data();\n";
761 fGC +=
"\n//--- Allocating session memory pool to be used for allocating intermediate tensors\n";
767 fGC +=
"std::vector<char> fIntermediateMemoryPool = std::vector<char>(" + std::to_string(
memPoolSize) +
");\n\n";
817 fGC +=
"//--- declare the dynamic tensors\n";
821 fGC +=
"//--- dynamic tensors pool\n";
822 fGC +=
"std::vector<char> fDynamicMemoryPool;\n";
833 fGC +=
"\n//---- operator declarations \n";
845 std::cout <<
"generating code for dynamic tensor management" << std::endl;
849 std::stringstream out;
850 out <<
"// dynamic tensor memory management\n";
851 out << SP <<
"std::vector<TMVA::Experimental::SOFIE::TensorLifeInfo> dynamicTensorInfos;\n";
856 std::vector<std::pair<std::string, ETensorType>>
tensors;
860 for (
auto &it :
op->GetOpOutputTensors()) {
863 std::cout <<
"Looping on operator " <<
op_index <<
" " <<
typeid(*op_ptr).name() << std::endl;
866 std::string
name = std::string(it);
881 std::cout <<
"op " <<
op_index <<
"tensor_" <<
name <<
" begin " << begin <<
" " <<
" end " << end << std::endl;
882 throw std::runtime_error(
"TMVA-SOFIE: RModel::GenerateDynamicTensorInfo: tensor_" +
name +
" has end before begin");
886 out << SP <<
"dynamicTensorInfos.push_back( {" << begin <<
", " << end <<
", " <<
type_size <<
"* (" << tensor_size <<
") });"
887 <<
" // tensor_" <<
name << std::endl;
893 out <<
"\n" << SP <<
"auto memory_result = OrganizeMemory(dynamicTensorInfos);\n\n";
894 out <<
"// allocating now the memory\n";
895 out << SP <<
"fDynamicMemoryPool = std::vector<char>(memory_result.total_bytes);\n";
896 out << SP <<
"int idx = 0;\n";
898 out << SP <<
"tensor_" << it.first <<
" = reinterpret_cast<" <<
ConvertTypeToString(it.second) <<
" *>(fDynamicMemoryPool.data() + memory_result.offsets[idx++]);\n";
905 std::cout <<
"Dynamic tensors " << i.first <<
" is not in list of operator input/output " << std::endl;
910 throw std::runtime_error(
"TMVA-SOFIE: RModel::GenerateDynamicTensorInfo - some tensors are not in input/output list");
925 for (
auto &
d : shape) {
926 std::string
pName =
d.param;
930 rGC +=
d.param +
",";
938 throw std::runtime_error(
"TMVA-SOFIE: input tensor " +
name +
939 " is of a data type which is not yet supported.");
970 if (outputSize == 1) {
982 for (
size_t i = 0; i < outputSize; i++) {
984 if (i < outputSize - 1)
1006 fGC += SP +
"return {";
1032 fGC +=
"struct Session {\n";
1034 fGC +=
"struct Session_" +
fName +
" {\n";
1047 std::cout <<
"\n******************\n analyzing input/output operator " <<
op_idx <<
" "
1048 <<
typeid(*op).name() << std::endl;
1076 fGC +=
"Session_" + graph->fName +
" fSession_" + graph->fName +
";\n";
1086 for (
size_t id = 0;
id <
fOperators.size();
id++) {
1087 std::string
opName = std::to_string(
id);
1093 std::string fileName =
fName;
1098 fileName +=
".root";
1100 fGC +=
sessionName +
"(std::string filename =\"" + fileName +
"\"";
1119 fGC +=
"\n//--- reading weights from file\n";
1129 for (
size_t id = 0;
id <
fOperators.size();
id++) {
1141 std::cout <<
"Generating main inference code for " <<
fName << std::endl;
1144 throw std::runtime_error(
"TMVA-SOFIE: output size=0 are not supported");
1148 std::cout <<
"Generating code for operator .... " <<
op_idx << std::endl;
1152 fGC += SP +
"using TMVA::Experimental::SOFIE::UTILITY::FillOutput;\n\n";
1160 fGC += SP +
"FillOutput(tensor_" +
name +
", output_tensor_" +
name +
", " +
n +
");\n";
1170 fGC +=
"}; // end of Session\n\n";
1174void RModel::Generate(std::underlying_type_t<Options> options,
int batchSize,
long pos,
bool verbose)
1194 throw std::runtime_error(
1195 "TMVA-SOFIE: RModel::Generate: cannot use a separate weight file without generating a Session class");
1198 if (
static_cast<std::underlying_type_t<Options>
>(
Options::kGNN) & options)
1210 std::cout <<
"Warning: Force having a Session since model has dynamic tensors " << std::endl;
1222 std::cout <<
"generate session code for subgraph " << graph->fName << std::endl;
1223 graph->GenerateSessionCode();
1228 std::cout <<
"generate Main session code - model " <<
fName << std::endl;
1234 fGC += (
"} //TMVA_SOFIE_" +
fName +
"\n");
1246 fGC +=
" std::ifstream f;\n";
1247 fGC +=
" f.open(filename);\n";
1248 fGC +=
" if (!f.is_open()) {\n";
1249 fGC +=
" throw std::runtime_error(\"tmva-sofie failed to open file \" + filename + \" for input weights\");\n";
1253 fGC +=
" f.seekg(" + std::to_string(pos) +
");\n";
1256 fGC +=
" using TMVA::Experimental::SOFIE::ReadTensorFromStream;\n";
1261 if (!i.second.IsWeightTensor())
continue;
1262 std::string tensor_name =
"tensor_" + i.first;
1265 fGC +=
" ReadTensorFromStream(f, " + tensor_name +
", \"" + tensor_name +
"\", " +
length +
");\n";
1267 throw std::runtime_error(
"tmva-sofie tensor " + tensor_name +
" with type " +
ConvertTypeToString(i.second.type()) +
" cannot be read from a file");
1270 fGC +=
" f.close();\n";
1275#ifdef SOFIE_SUPPORT_ROOT_BINARY
1277 fGC +=
" std::unique_ptr<TFile> rootFile(TFile::Open(filename.c_str(), \"READ\"));\n";
1278 fGC +=
" if (!rootFile->IsOpen()) {\n";
1279 fGC +=
" throw std::runtime_error(\"tmva-sofie failed to open ROOT file for input weights\");\n";
1283 fGC +=
" if (!rootFile->GetKey(\"" +
dirName +
"\")) {\n";
1284 fGC +=
" throw std::runtime_error(\"tmva-sofie failed to open ROOT directory for input weights\");\n";
1289 if (!i.second.IsWeightTensor())
continue;
1291 std::string tensor_name =
"tensor_" + i.first;
1293 fGC +=
" fTensor_" + i.first +
" = *reinterpret_cast<std::vector<float>*>(rootFile->Get(\"";
1294 fGC +=
dirName +
"/" + tensor_name +
"\"));\n";
1296 fGC +=
" fTensor_" + i.first +
" = *reinterpret_cast<std::vector<double>*>(rootFile->Get(\"";
1297 fGC +=
dirName + +
"/" + tensor_name +
"\"));\n";
1299 fGC +=
" fTensor_" + i.first +
" = *reinterpret_cast<std::vector<int64_t>*>(rootFile->Get(\"";
1300 fGC +=
dirName +
"/" + tensor_name +
"\"));\n";
1302 std::runtime_error(
"tmva-sofie tensor " + tensor_name +
" with type " +
ConvertTypeToString(i.second.type()) +
" cannot be read from a ROOT file");
1308 throw std::runtime_error(
"SOFIE was not built with ROOT file support.");
1335#ifdef SOFIE_SUPPORT_ROOT_BINARY
1337 throw std::runtime_error(
"SOFIE-GNN yet not supports writing to a ROOT file.");
1350 if (!
item.second.IsWeightTensor())
continue;
1355 const float*
data =
item.second.data<
float>();
1360 const double*
data =
item.second.data<
double>();
1365 const int64_t*
data =
item.second.data<int64_t>();
1371 " cannot be written to a ROOT file");
1380 throw std::runtime_error(
"SOFIE was not built with ROOT file support.");
1392 std::runtime_error(
"tmva-sofie failed to open file " +
filename +
" for tensor weight data");
1395 if (!i.second.IsWeightTensor()) {
1399 std::string tensor_name =
"tensor_" + i.first;
1400 f << tensor_name <<
" " <<
length <<
"\n";
1402 const float *
data = i.second.data<
float>();
1403 for (
size_t idx = 0; idx <
length; idx++) {
1406 if (
value != 0. && std::abs(
value) < std::numeric_limits<float>::min() )
value = 0;
1407 f << std::setprecision(std::numeric_limits<float>::max_digits10) <<
value;
1408 f << ( (idx <
length-1) ?
" " :
"\n" );
1412 throw std::runtime_error(
"tmva-sofie tensor " + tensor_name +
" with type " +
ConvertTypeToString(i.second.type()) +
" cannot be written to a file");
1415 std::runtime_error(
"tmva-sofie failed to write tensor data to file for " + tensor_name);
1426 std::cout <<
"Model requires following inputs:\n";
1428 std::cout <<
"Parametrised Tensor name: " <<
inputInfo.first <<
"\t";
1430 std::cout <<
"shape: [";
1431 for (
size_t i = 0; i <
inputInfo.second.shape.size(); i++) {
1432 if (
inputInfo.second.shape[i].isParam) {
1433 std::cout <<
inputInfo.second.shape[i].param;
1435 std::cout <<
inputInfo.second.shape[i].dim ;
1437 if (i <
inputInfo.second.shape.size() - 1) std::cout <<
",";
1439 std::cout <<
"]" << std::endl;
1443 std::cout <<
"Fully Specified Tensor name: " <<
inputInfo.first <<
"\t";
1445 std::cout <<
"shape: [";
1446 for (
size_t i = 0; i <
inputInfo.second.shape.size(); i++) {
1448 if (i <
inputInfo.second.shape.size() - 1) std::cout <<
",";
1450 std::cout <<
"]" << std::endl;
1456 std::cout <<
"Model initialized the following tensors:\n";
1458 std::cout <<
"Tensor name: \"" << it.first <<
"\"\t";
1460 std::cout <<
"shape: [";
1461 for (
size_t i = 0; i < it.second.shape().
size(); i++) {
1462 std::cout << it.second.shape()[i];
1463 if (i < it.second.shape().size() - 1) std::cout <<
",";
1466 if (it.second.IsConstantTensor()) std::cout <<
" (Constant)";
1467 else if (!it.second.IsWeightTensor()) std::cout <<
" (Not Writable)";
1468 std::cout << std::endl;
1474 std::cout <<
"Model specify the following intermediate tensors:\n";
1476 std::cout <<
"Tensor name: \"" << it.first <<
"\"\t";
1478 std::cout <<
"shape: [";
1479 for (
size_t i = 0; i < it.second.shape.size(); i++) {
1480 std::cout << it.second.shape[i];
1481 if (i < it.second.shape.size() - 1) std::cout <<
",";
1483 std::cout <<
"]" << std::endl;
1489 std::cout <<
"Model specify the following dynamic tensors:\n";
1491 std::cout <<
"Tensor name: \"" << it.first <<
"\"\t";
1493 std::cout <<
"shape: [";
1494 for (
size_t i = 0; i < it.second.shape.size(); i++) {
1495 std::cout << it.second.shape[i].GetVal();
1496 if (i < it.second.shape.size() - 1) std::cout <<
",";
1498 std::cout <<
"]" << std::endl;
1504 std::cout <<
"Model specify the following output tensors:\n";
1506 std::cout <<
"Tensor name: \"" << it <<
"\"\t";
1511 std::cout <<
"with shape not yet defined" << std::endl;
1520 std::cout <<
"Tensor " <<
name <<
" not found in model's initialized tensor list" << std::endl;
1524 std::cout <<
"Tensor name: " << it->first <<
"\t";
1527 std::cout <<
"shape: [";
1528 for (
size_t i = 0; i < it->second.shape().
size(); i++) {
1529 std::cout << it->second.shape()[i];
1530 length *= it->second.shape()[i];
1531 if (
i < it->second.shape().size() - 1) std::cout <<
",";
1533 std::cout <<
"]" << std::endl;
1540 std::cout <<
"data: [" << std::endl;
1543 for (
int i =0; i <
n_print; i++) {
1545 if (i <
n_print - 1) std::cout <<
" ,";
1548 if (
ellipsis) std::cout <<
", ...";
1549 std::cout <<
"]" << std::endl;
1560 size_t pos =
filename.find(
".hxx");
1576 if (
R__b.IsReading()) {
1577 RModel::Class()->ReadBuffer(
R__b,
this);
1579 i.second.CastPersistentToShared();
1584 i.second.CastSharedToPersistent();
1586 RModel::Class()->WriteBuffer(
R__b,
this);
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char filename
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t r
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize id
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
const_iterator begin() const
const_iterator end() const
Buffer base class used for serializing objects.
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
void GenerateHeaderInfo(std::string &hgname)
void OutputGenerated(std::string filename="", bool append=false)
WeightFileType fWeightFile
void AddBlasRoutines(std::vector< std::string > routines)
void AddNeededStdLib(std::string libname)
void AddShapeParam(const std::string &name, size_t def_value=0)
std::vector< size_t > GetTensorShape(const std::string &name) const
void PrintIntermediateTensors() const
std::vector< Dim > GetDimTensorShape(const std::string &name) const
std::unordered_map< std::string, DynamicTensorInfo > fDynamicTensorInfos
bool IsDynamicTensor(const std::string &name) const
void AddAliasTensor(const std::string &tensor_name, const std::string &orig_tensor_name)
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
void GenerateIntermediateTensorInfo()
std::string GenerateInferSignature(bool isdecl=true)
void GenerateOperatorDeclarations()
size_t fWeightsTensorSize
bool CheckIfTensorAlreadyExist(std::string tensor_name)
std::vector< std::unique_ptr< ROperator > > fOperators
void OutputGenerated(std::string filename="", bool append=false)
std::unordered_map< std::string, std::string > fAliasTensors
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector< Dim > shape)
std::unordered_map< std::string, TensorInfo > fIntermediateTensorInfos
void AddOutputTensorNameList(std::vector< std::string > output_tensor_names)
std::unordered_map< std::string, TensorInfo > fReadyInputTensorInfos
void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector< Dim > shape)
std::vector< std::string > fDimShapeNames
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
std::unordered_map< std::string_view, size_t > fIntermediateTensorFrequencyLookup
! lookup table for intermediate tensor frequency (transient)
void AddInputTensorName(std::string name)
std::vector< std::string > fOutputTensorNames
void PrintRequiredInputTensors() const
void GenerateSessionCode()
bool IsDimInputTensor(const std::string &name) const
void GenerateDynamicTensorInfo()
void PrintDynamicTensors() const
bool IsShapeTensor(const std::string &name) const
check if a tensor is a shape tensor
bool IsInitializedTensor(const std::string &name) const
bool IsAliasTensor(const std::string &name) const
check if a tensor is a alias tensor
size_t fConstantTensorSize
void CheckAndFlushIntermediateMemory(std::span< const std::string_view > op_output_tensors, const size_t &op_idx)
void AddOperator(std::unique_ptr< ROperator > op, int order_execution=-1)
void PrintOutputTensors() const
void HeadInitializedTensors(std::string name, int n_print=50)
bool IsConstantTensor(const std::string &name) const
void Initialize(int batchSize=-1, bool verbose=false)
long WriteInitializedTensorsToFile(std::string filename="")
OptimizationLevel fOptimizationLevel
void Generate(std::underlying_type_t< Options > options, int batchSize=-1, long pos=0, bool verbose=false)
std::vector< Dim > GetDynamicTensorShape(const std::string &name) const
void PrintInitializedTensors() const
std::unordered_map< std::string, InputTensorInfo > fInputTensorInfos
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
MemoryPoolInfo fIntermediateMemoryInfo
! intermediate memory info (transient)
void GenerateIntermediateMemoryPool()
void ReadInitializedTensorsFromFile(long)
std::string AllocateIntermediateMemory(std::span< const std::string_view > op_output_tensors)
std::unordered_map< std::string, std::pair< std::vector< Dim >, bool > > fShapeTensors
void InitializeSubGraph(std::shared_ptr< RModel > graph)
std::unordered_map< std::string, std::string > fShapeParams
void SetNotWritableInitializedTensor(const std::string &tensor_name)
ETensorType GetTensorType(std::string name) const
void GenerateInitializedTensorInfo()
std::vector< std::string > fInputTensorNames
std::unordered_map< std::string, InitializedTensor > fInitializedTensors
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
const std::vector< Dim > & GetShapeTensorValues(const std::string &tensor_name) const
std::vector< std::shared_ptr< RModel > > fSubGraphs
! sub-graph models (transient)
bool IsReadyInputTensor(const std::string &name) const
void UpdateOutputTensorList(std::vector< std::string > curr_output_tensor, std::vector< std::string > modify_output_tensor)
void AddShapeTensor(const std::string &name, const std::vector< Dim > &shapeValues, bool scalar=false)
std::string Clean_name(std::string input_tensor_name)
std::size_t ConvertShapeToLength(const std::vector< size_t > &shape)
std::vector< Dim > ConvertShapeToDim(const std::vector< size_t > &shape)
Convert shape from integer format to dynamic one (based on Dim)
constexpr size_t GetTypeSize(ETensorType type)
std::string ConvertValuesToString(size_t n, const T *data)
std::string GenerateConstantTensorCode(const std::pair< std::string, InitializedTensor > &t)
std::vector< size_t > ConvertShapeToInt(const std::vector< Dim > &shape)
Convert shape based on Dim to integer format.
std::string ConvertTypeToString(ETensorType type)
std::underlying_type_t< Options > operator|(Options opA, Options opB)
std::string ConvertDimShapeToLength(const std::vector< Dim > &shape)
std::string ConvertShapeToString(const std::vector< size_t > &shape)
std::string ConvertValToString(T value)
create variable transformations
std::map< size_t, TensorMemoryInfo > total_stack
std::map< size_t, size_t > available_stack