Logo ROOT  
Reference Guide
Loading...
Searching...
No Matches
ROperator_Expand.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_ROperator_Expand
2#define TMVA_SOFIE_ROperator_Expand
3
5#include "TMVA/ROperator.hxx"
6#include "TMVA/RModel.hxx"
7
8#include <sstream>
9
10namespace TMVA{
11namespace Experimental{
12namespace SOFIE{
13
14template<typename T>
15class ROperator_Expand final : public ROperator{
16private:
17
18 std::vector<Dim> fShapeX;
19 std::vector<size_t> fShape;
20 std::vector<Dim> fShapeY;
21 std::vector<Dim> fShapeDim;
22
23 std::string fNX;
24 std::string fNShape;
25 std::string fNY;
26 std::string fType;
27
28 bool fInitialized = false;
29 bool fInitializedShape = false;
30 bool fDimShapeValues = false;
31 bool fInitBroadcast = false;
32
33public:
35 ROperator_Expand(std::string nameX, std::string nameShape, std::string nameY):
36 fNX(UTILITY::Clean_name(nameX)), fNShape(UTILITY::Clean_name(nameShape)), fNY(UTILITY::Clean_name(nameY)){
39 }
40
41
42 void Initialize(RModel& model) override {
43 // input must be a graph input, or already initialized intermediate tensor
44 if (!model.CheckIfTensorAlreadyExist(fNX)) {
45 throw std::runtime_error("TMVA SOFIE Expand Op Input Tensor " + fNX + " is not found in model");
46 }
47 fShapeX = model.GetDimTensorShape(fNX);
48 if (model.IsInitializedTensor(fNShape)) {
49 fInitializedShape = true;
50 int64_t *shapeData =
51 static_cast<int64_t *>(model.GetInitializedTensorData(fNShape).get());
52 fShape = model.GetTensorShape(fNShape);
53 if (fShape.size() != 1) {
54 throw std::runtime_error("TMVA::SOFIE - Expand operator shape must be a 1d tensor.");
55 }
56 size_t N = fShape[0];
57 // what do we do if shapeData contains negative values?
58 for (size_t i = 0; i < N; i++) {
59 if ( shapeData[i] < 0)
60 throw std::runtime_error("TMVA::SOFIE - Expand: invalid shape value " + std::to_string(shapeData[i]));
61 }
62 std::vector<size_t> shape(shapeData, shapeData + N);
64 } else if (model.IsShapeTensor(fNShape)) {
65 // case input shape is a shape tensor
66 fShapeDim = model.GetShapeTensorValues(fNShape);
67 fDimShapeValues = true;
68 } else {
69 // assume shape of input shape is known (size is 1)
70 auto shapeOfInputShape = model.GetTensorShape(fNShape);
71 fShapeDim.resize(shapeOfInputShape[0]);
72 for (size_t i = 0; i < fShapeDim.size(); i++) {
73 fShapeDim[i] = Dim{std::string("v_") + fNShape + "_" + std::to_string(i)};
74 model.AddShapeParam(fShapeDim[i].param);
75 }
76 }
77 // Y is the common shape of fShapeX and shape
79 fShapeY = ret.second;
80 fInitialized = model.IsInitializedTensor(fNX) && fInitializedShape;
81 std::vector<size_t> shapeX;
82 std::vector<size_t> shapeY;
83 // case shape tensor and input shape are known
84 if (!model.IsDynamicTensor(fNX) && !model.IsDimInputTensor(fNX) && fInitializedShape) {
85 shapeX = ConvertShapeToInt(fShapeX);
86 shapeY = ConvertShapeToInt(fShapeY);
87 if (!UTILITY::AreSameShape(shapeX, shapeY))
88 fInitBroadcast = true;
89 }
90 if (fInitialized) {
91 // cannot have Dim initialized tensors
92 assert(!shapeX.empty() && !shapeY.empty());
93 // Broadcast X to the common shape shapeY
94 // If X is an initialized tensor (constant)
95 auto data = model.GetInitializedTensorData(fNX);
96 if (fInitBroadcast) {
97 std::shared_ptr<void> broadcastedData(
98 UTILITY::UnidirectionalBroadcast(static_cast<T *>(data.get()), shapeX, shapeY),
99 std::default_delete<T[]>());
100 // Update the data and the shape of X
101 model.UpdateInitializedTensor(fNX, model.GetTensorType(fNX), shapeY, broadcastedData);
103 // need to set as a not writable tensor
104 model.SetNotWritableInitializedTensor(fNX);
105 data = broadcastedData;
106 }
107 if (fInitBroadcast || model.IsConstantTensor(fNX)) {
108 fIsOutputConstant = true; // constant output in this case
109 model.AddConstantTensor(fNY, model.GetTensorType(fNX), shapeY, data);
110 fOutputTensorNames.pop_back();
111 } else {
112 model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), shapeY);
113 }
114 } else {
115 // // case input is not initialized
116 model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);
117 }
118 fType = ConvertTypeToString(model.GetTensorType(fNX));
119 if (model.Verbose()) {
120 std::cout << "Expand - input " << fNX << " shape " << ConvertDimShapeToString(fShapeX) << " --> " << fNY << " shape "
121 << ConvertDimShapeToString(fShapeY) << (fIsOutputConstant ? ConvertValuesToString(model.GetTensorData<T>(fNY)) + " (constant)" : "") << std::endl;
122 }
123 }
124
125 std::string GenerateInitCode() override {
126 std::stringstream out;
128 // shapeX and shapeY are the same in this case
129 auto length = ConvertDimShapeToLength(fShapeY);
130 out << "// Copying initialized tensor " << fNX << " to " << fNY << "\n";
131 out << SP << "std::copy(tensor_" << fNX << ", " << "tensor_" << fNX << " + " << length << ", tensor_" << fNY << ");\n";
132 }
133 return out.str();
134 }
135
136 std::string Generate(std::string opName) override {
137 if (fIsOutputConstant) return "";
138 opName = "op_" + opName;
139 if (fShapeY.empty()) {
140 throw std::runtime_error("TMVA SOFIE Expand Op called to Generate without being initialized first");
141 }
142 std::stringstream out;
143 out << SP << "\n//------ Expand " << opName << " --> " << ConvertDimShapeToString(fShapeY) << "\n";
144 // need to declare shape parameters for non initialized shapes
146 for (size_t i = 0; i < fShapeDim.size(); i++) {
147 out << SP << "size_t " << fShapeDim[i] << " = " << "tensor_" << fNShape << "[" << i << "];\n";
148 }
149 }
150 // No need to broadcast A if it's an initialized tensor or shapes are the same
151 auto lengthX = ConvertDimShapeToLength(fShapeX);
152 auto lengthY = ConvertDimShapeToLength(fShapeY);
153 if (lengthX != lengthY) {
154 out << SP << "if ( (" << lengthX << ") < (" << lengthY << ") ) {\n";
155 out << SP << SP << "// Broadcasting uninitialized tensor " << fNX << "\n";
156 out << SP << SP << "TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast(tensor_" << fNX << ", " << ConvertDimShapeToString(fShapeX) << ", " << ConvertDimShapeToString(fShapeY)
157 << ", tensor_"<<fNY<<");\n";
158 out << SP << "} else {\n";
159 out << SP << SP << "std::copy(tensor_" << fNX << ", " << "tensor_" << fNX << " + (" << lengthX << "), tensor_" << fNY << ");\n";
160 out << SP << "}\n";
161 } else {
162 // case of equal length even if shapes are dims
163 out << SP << "std::copy(tensor_" << fNX << ", " << "tensor_" << fNX << " + (" << lengthX << "), tensor_" << fNY << ");\n";
164 }
165
166 return out.str();
167 }
168
169};
170
171}//SOFIE
172}//Experimental
173}//TMVA
174
175
176#endif //TMVA_SOFIE_ROperator_Expand
char * ret
Definition Rotated.cxx:221
#define N
ROperator_Expand(std::string nameX, std::string nameShape, std::string nameY)
std::string Generate(std::string opName) override
std::vector< std::string_view > fInputTensorNames
Definition ROperator.hxx:50
bool fIsOutputConstant
flag to identify if operator has a constant output (no need to generate code)
Definition ROperator.hxx:47
const std::string SP
space used to correctly indent the generated C++ code
Definition ROperator.hxx:45
std::vector< std::string_view > fOutputTensorNames
Definition ROperator.hxx:51
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
T * UnidirectionalBroadcast(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
std::string ConvertDimShapeToString(const std::vector< Dim > &shape)
std::string ConvertValuesToString(size_t n, const T *data, size_t maxprint=-1)
std::vector< Dim > ConvertShapeToDim(const std::vector< size_t > &shape)
Convert shape from integer format to dynamic one (based on Dim).
std::vector< size_t > ConvertShapeToInt(const std::vector< Dim > &shape)
Convert shape based on Dim to integer format.
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDimShapeToLength(const std::vector< Dim > &shape)
create variable transformations