Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
ROperator_BasicBinary.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_ROperator_BasicBinary
2#define TMVA_SOFIE_ROperator_BasicBinary
3
5#include "TMVA/ROperator.hxx"
6#include "TMVA/RModel.hxx"
7
8#include <sstream>
9
10namespace TMVA {
11namespace Experimental {
12namespace SOFIE {
13
21
22template <typename T, EBasicBinaryOperator Op1>
24
25template <typename T>
27 static const std::string Name() { return "Add"; }
28 static std::string Op(const std::string &t1, const std::string t2) { return t1 + " + " + t2; }
29 static T Func(T t1, T t2) { return t1 + t2; }
30};
31
32template <typename T>
34 static const std::string Name() { return "Sub"; }
35 static std::string Op(const std::string &t1, const std::string t2) { return t1 + " - " + t2; }
36 static T Func(T t1, T t2) { return t1 - t2; }
37};
38
39template <typename T>
41 static const std::string Name() { return "Mul"; }
42 static std::string Op(const std::string &t1, const std::string t2) { return t1 + " * " + t2; }
43 static T Func(T t1, T t2) { return t1 * t2; }
44};
45
46template <typename T>
48 static const std::string Name() { return "Div"; }
49 static std::string Op(const std::string &t1, const std::string t2) { return t1 + " / " + t2; }
50 static T Func(T t1, T t2) { return t1 / t2; }
51};
52
53template <typename T>
55 static const std::string Name() { return "Pow"; }
56 static std::string Op(const std::string &t1, const std::string t2) { return "std::pow(" + t1 + "," + t2 + ")"; }
57 static T Func(T t1, T t2) { return std::pow(t1, t2); }
58};
59
60template <typename T, EBasicBinaryOperator Op>
62private:
64 std::string fNA;
65 std::string fNB;
66 std::string fNBroadcastedA;
67 std::string fNBroadcastedB;
68 std::string fNY;
69
70 std::vector<size_t> fShapeA;
71 std::vector<size_t> fShapeB;
72 std::vector<size_t> fShapeY;
73
74 std::vector<Dim> fDimShapeA;
75 std::vector<Dim> fDimShapeB;
76 std::vector<Dim> fDimShapeY;
77
78public:
80 ROperator_BasicBinary(std::string nameA, std::string nameB, std::string nameY)
81 : fNA(UTILITY::Clean_name(nameA)), fNB(UTILITY::Clean_name(nameB)), fNY(UTILITY::Clean_name(nameY))
82 {
85 }
86
87 // type of output given input
88 std::vector<ETensorType> TypeInference(std::vector<ETensorType> input) override { return input; }
89
90 // shape of output tensors given input tensors
91 std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input) override
92 {
93 // assume now inputs have same shape (no broadcasting)
94 auto ret = std::vector<std::vector<size_t>>(1, input[0]); // return vector size 1 with first input
95 return ret;
96 }
97
98 void Initialize(RModel &model) override
99 {
100 // input must be a graph input, or already initialized intermediate tensor
101 if (!model.CheckIfTensorAlreadyExist(fNA)) {
102 throw std::runtime_error(std::string("TMVA SOFIE Binary Op Input Tensor ") + fNA + "is not found in model");
103 }
104 if (!model.CheckIfTensorAlreadyExist(fNB)) {
105 throw std::runtime_error(std::string("TMVA SOFIE Binary Op Input Tensor ") + fNB + "is not found in model");
106 }
107 int dynamicInputs = 0;
108 if (model.IsDynamicTensor(fNA)) {
110 dynamicInputs |= 1;
111 } else {
112 fShapeA = model.GetTensorShape(fNA);
114 }
115 if (model.IsDynamicTensor(fNB)) {
116 dynamicInputs |= 2;
118 } else {
119 fShapeB = model.GetTensorShape(fNB);
121 }
122 if (dynamicInputs & 1 && model.Verbose())
123 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : input " << fNA << " is dynamic "
125 if (dynamicInputs & 2 && model.Verbose())
126 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : input " << fNB << " is dynamic "
128 std::cout << std::endl;
129 // check if need to broadcast at initialization time if shapes are known and different
130 // (we could broadcast the tensor tensor to maximum values of dynamic shapes - to be done)
131 // case of known shapes
132 // if shapes are known find the output shape from broadcasting
133 if (dynamicInputs == 0) {
135 fBroadcastFlag = ret.first;
136 fShapeY = ret.second;
137 if (model.IsConstantTensor(fNA) && model.IsConstantTensor(fNB)) {
138 bool broadcast = fBroadcastFlag > 0;
139 if (broadcast) {
140 // Y is the common shape of A and B
141 bool broadcastA = fBroadcastFlag & 2;
142 bool broadcastB = fBroadcastFlag & 1;
143 // Broadcast A to Y
144 if (broadcastA) {
145 fNBroadcastedA = "Broadcasted" + fNA + "to" + fNY;
146 auto data = model.GetInitializedTensorData(fNA);
147 std::shared_ptr<void> broadcastedData(
148 UTILITY::UnidirectionalBroadcast<T>(static_cast<T *>(data.get()), fShapeA, fShapeY),
149 std::default_delete<T[]>());
150 if (model.Verbose())
151 std::cout << "broadcasted data A " << ConvertShapeToString(fShapeY) << " : "
153 static_cast<T *>(broadcastedData.get()))
154 << std::endl;
155 // Update the data and the shape of A
159 }
160 // Broadcast B to Y
161 if (broadcastB) {
162 fNBroadcastedB = "Broadcasted" + fNB + "to" + fNY;
163 auto data = model.GetInitializedTensorData(fNB);
164 if (model.Verbose())
165 std::cout << "data B " << ConvertShapeToString(fShapeB) << " : "
166 << ConvertValuesToString(ConvertShapeToLength(fShapeB), static_cast<T *>(data.get()))
167 << std::endl;
168 std::shared_ptr<void> broadcastedData(
169 UTILITY::UnidirectionalBroadcast<T>(static_cast<T *>(data.get()), fShapeB, fShapeY),
170 std::default_delete<T[]>());
171 // do not update tensor B but add broadcasted one (since it can be input to some other operators)
172 if (model.Verbose())
173 std::cout << "broadcasted data B " << ConvertShapeToString(fShapeY) << " : "
175 static_cast<T *>(broadcastedData.get()))
176 << std::endl;
180 }
181 } else {
183 }
184 // tensors are constant: perform here the binary operation
185
186 const std::string &nameA = fNBroadcastedA.empty() ? fNA : fNBroadcastedA;
187 const std::string &nameB = fNBroadcastedB.empty() ? fNB : fNBroadcastedB;
188 auto dataA = static_cast<T *>(model.GetInitializedTensorData(nameA).get());
189 auto dataB = static_cast<T *>(model.GetInitializedTensorData(nameB).get());
190 std::vector<T> dataY(ConvertShapeToLength(fShapeY));
191 for (size_t i = 0; i < dataY.size(); i++) {
193 }
194 model.AddConstantTensor<T>(fNY, fShapeY, dataY.data());
195 // flag tensors to not be written in the weight file
198 fIsOutputConstant = true;
199 if (model.Verbose()) {
200 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : " << fNA << " " << ConvertShapeToString(fShapeA)
201 << " , " << fNB << " " << ConvertShapeToString(fShapeB) << " ---> " << fNY << " "
202 << ConvertShapeToString(fShapeY) << " : " << ConvertValuesToString(dataY) << std::endl;
203 }
204 } else {
205 // case of defined and non-constant tensors
207 if (model.Verbose()) {
208 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : " << fNA << " " << ConvertShapeToString(fShapeA)
209 << " , " << fNB << " " << ConvertShapeToString(fShapeB) << " ---> " << fNY << " "
210 << ConvertShapeToString(fShapeY) << std::endl;
211 }
212 // we convert non-dim shapes to Dim shapes
214 }
215 } else {
216 // case A or B have dynamic shapes. We need to broadcast if shape are not same
218 fBroadcastFlag = ret.first;
219 fDimShapeY = ret.second;
220 // case of all parametric shapes and MultiDirectionalBroadcastShape return the max of the 2
221 // need to do before we declare the output tensor shape and the broadcasted ones
222 if (ret.first & 4) {
223 // check if one of the parameter is an input dimension
224 // define function to find this
225 auto IsInputDimParam = [&](const std::string &p) {
226 auto inputNames = model.GetInputTensorNames();
227 for (auto &input : inputNames) {
228 for (auto &i_s : model.GetDimTensorShape(input)) {
229 if (i_s.isParam && i_s.param == p)
230 return true;
231 }
232 }
233 return false;
234 };
235 for (size_t i = 0; i < fDimShapeY.size(); i++) {
236 auto &s = fDimShapeY[i];
237 if (s.isParam && s.param.find("std::max") != std::string::npos) {
238 if (IsInputDimParam(fDimShapeA[i].param)) {
239 // case dim is 1 we indicate that the input parameter is equal to 1
240 if (fDimShapeA[i].dim != 1)
241 s = fDimShapeA[i];
242 else
243 s = fDimShapeB[i];
244 } else if (IsInputDimParam(fDimShapeB[i].param)) {
245 if (fDimShapeB[i].dim != 1)
246 s = fDimShapeB[i];
247 else
248 s = fDimShapeA[i];
249 }
250 }
251 }
252 }
253
255 if (model.Verbose()) {
256 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : " << ConvertShapeToString(fDimShapeA) << " , "
257 << ConvertShapeToString(fDimShapeB) << " --> " << ConvertShapeToString(fDimShapeY) << std::endl;
258 }
259 }
260 }
261
262 std::string GenerateInitCode() override
263 {
264 std::stringstream out;
265 return out.str();
266 }
267
268 std::string Generate(std::string opName) override
269 {
270
272 return "";
273
274 opName = "op_" + opName;
275
276 if (fDimShapeY.empty()) {
277 throw std::runtime_error("TMVA SOFIE Binary Op called to Generate without being initialized first");
278 }
279 std::stringstream out;
280 out << SP << "\n//------ " << opName << " " << BinaryOperatorTrait<T, Op>::Name() << " --> "
283 std::string typeName = TensorType<T>::Name();
284
285 // we need to check if we can broadcast (case flag has bit 4 set)
286
287 if (fBroadcastFlag & 4) {
288 // need to check if shapes are the same
291 out << SP << "if (" << lengthA << "!=" << lengthB << ") {\n";
292 // check if A->B or B->A
293 // bool broadcastable = true;
294 for (size_t i = 0; i < fDimShapeY.size(); i++) {
295 if (fBroadcastFlag & 5 && fDimShapeY[i] == fDimShapeA[i] && fDimShapeA[i].dim > 1 &&
296 fDimShapeB[i].isParam) {
297 // B->A B[i] needs to be 1
298 out << SP << SP << "if (" << fDimShapeB[i] << "!= 1)\n";
299 out << SP << SP << SP << "throw std::runtime_error(\"SOFIE - Cannot broadcast B->A in operator "
300 << opName << "\");\n";
301 }
302 if (fBroadcastFlag & 6 && fDimShapeY[i] == fDimShapeB[i] && fDimShapeB[i].dim > 1 &&
303 fDimShapeA[i].isParam) {
304 // A-> B A[i] needs to be 1
305 out << SP << SP << "if (" << fDimShapeA[i] << "!= 1)\n";
306 out << SP << SP << SP << "throw std::runtime_error(\"SOFIE - Cannot broadcast A->B in operator "
307 << opName << "\");\n";
308 } else if (fDimShapeA[i].isParam && fDimShapeB[i].isParam) {
309 // both shapes are parametric and we broadcast to maximum
310 // we allocate here output vector
311 out << SP << SP << "if (" << fDimShapeA[i] << " != " << fDimShapeB[i] << " && (" << fDimShapeA[i]
312 << " != 1 || " << fDimShapeB[i] << " != 1))\n";
313 out << SP << SP << SP << "throw std::runtime_error(\"SOFIE - Cannot broadcast shapes in operator " << opName
314 << "\");\n";
315 }
316 }
317 out << SP << "}\n";
318 }
319
323
325 if (fDimShapeA.empty() ||
326 std::all_of(fDimShapeA.begin(), fDimShapeA.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
327 compute_idx_A = "0";
328 } else {
329 for (size_t i = 0; i < fDimShapeA.size(); ++i) {
330 if (fDimShapeA[i].dim == 1 || fDimShapeA[i].GetVal() == "1")
331 continue;
332 compute_idx_A += "idx_" + std::to_string(i + (fDimShapeY.size() - fDimShapeA.size()));
333 if (stridesA[i].GetVal() != "1")
334 compute_idx_A += " * " + stridesA[i].GetVal();
335 compute_idx_A += " + ";
336 }
337 // remove last 3 character " + "
338 for (int j = 0; j < 3; j++)
339 compute_idx_A.pop_back();
340 }
341 if (fDimShapeB.empty() ||
342 std::all_of(fDimShapeB.begin(), fDimShapeB.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
343 compute_idx_B = "0";
344 } else {
345 for (size_t i = 0; i < fDimShapeB.size(); ++i) {
346 if (fDimShapeB[i].dim == 1 || fDimShapeB[i].GetVal() == "1")
347 continue;
348 compute_idx_B += "idx_" + std::to_string(i + (fDimShapeY.size() - fDimShapeB.size()));
349 if (stridesB[i].GetVal() != "1")
350 compute_idx_B += " * " + stridesB[i].GetVal();
351 compute_idx_B += " + ";
352 }
353 // remove last 3 character " + "
354 for (int j = 0; j < 3; j++)
355 compute_idx_B.pop_back();
356 }
357 int nloop = 0;
358 if (fDimShapeY.empty() ||
359 std::all_of(fDimShapeY.begin(), fDimShapeY.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
360 compute_idx_Y = "0";
361 } else {
362 for (size_t i = 0; i < fDimShapeY.size(); ++i) {
363 if (fDimShapeY[i].dim != 1 && fDimShapeY[i].GetVal() != "1") {
364 nloop++;
365 for (int j = 0; j < nloop; j++) out << SP;
366 out << "for (size_t idx_" << i << " = 0; idx_" << i << " < " << fDimShapeY[i]
367 << "; ++idx_" << i << "){\n";
368 compute_idx_Y += "idx_" + std::to_string(i);
369 if (stridesY[i].GetVal() != "1")
370 compute_idx_Y += " * " + stridesY[i].GetVal();
371 compute_idx_Y += " + ";
372 }
373 }
374 // remove last 3 characters " + "
375 for (int j = 0; j < 3; j++)
376 compute_idx_Y.pop_back();
377 }
378 for (int j = 0; j < nloop + 1; j++) out << SP;
379 out << "tensor_" << fNY << "[" << compute_idx_Y << "] = "
380 << BinaryOperatorTrait<T, Op>::Op("tensor_" + fNA + "[" + compute_idx_A + "]",
381 "tensor_" + fNB + "[" + compute_idx_B + "]")
382 << " ;\n";
383
384 for (int i = nloop; i > 0; i--) {
385 for (int j = 0; j < i; j++) out << SP;
386 out << "}\n";
387 }
388 return out.str();
389 }
390
391 std::vector<std::string> GetStdLibs() override
392 {
393 if (Op == EBasicBinaryOperator::Pow) {
394 return {std::string("cmath")};
395 } else {
396 return {};
397 }
398 }
399};
400
401} // namespace SOFIE
402} // namespace Experimental
403} // namespace TMVA
404
405#endif // TMVA_SOFIE_ROperator_BasicBinary
#define d(i)
Definition RSha256.hxx:102
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
std::vector< size_t > GetTensorShape(const std::string &name) const
Definition RModel.cxx:29
std::vector< Dim > GetDimTensorShape(const std::string &name) const
Definition RModel.cxx:65
bool IsDynamicTensor(const std::string &name) const
Definition RModel.cxx:232
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
Definition RModel.cxx:247
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:122
void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:193
bool IsConstantTensor(const std::string &name) const
Definition RModel.cxx:224
std::vector< Dim > GetDynamicTensorShape(const std::string &name) const
Definition RModel.cxx:76
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
Definition RModel.cxx:312
void SetNotWritableInitializedTensor(const std::string &tensor_name)
Definition RModel.cxx:321
ETensorType GetTensorType(std::string name) const
Definition RModel.cxx:90
const std::vector< std::string > & GetInputTensorNames() const
Definition RModel.hxx:201
std::string Generate(std::string opName) override
std::vector< ETensorType > TypeInference(std::vector< ETensorType > input) override
std::vector< std::vector< size_t > > ShapeInference(std::vector< std::vector< size_t > > input) override
ROperator_BasicBinary(std::string nameA, std::string nameB, std::string nameY)
std::vector< std::string_view > fInputTensorNames
Definition ROperator.hxx:47
bool fIsOutputConstant
flag to identify if operator has a constant output (no need to generate code)
Definition ROperator.hxx:44
const std::string SP
space used to correctly indent the generated C++ code
Definition ROperator.hxx:42
std::vector< std::string_view > fOutputTensorNames
Definition ROperator.hxx:48
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::string ConvertDimShapeToString(const std::vector< Dim > &shape)
std::size_t ConvertShapeToLength(const std::vector< size_t > &shape)
std::vector< Dim > ConvertShapeToDim(const std::vector< size_t > &shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertValuesToString(size_t n, const T *data)
std::string ConvertDimShapeToLength(const std::vector< Dim > &shape)
std::string ConvertShapeToString(const std::vector< size_t > &shape)
create variable transformations
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
auto * t1
Definition textangle.C:20