Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
ROperator_Slice.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_ROPERATOR_SLICE
2#define TMVA_SOFIE_ROPERATOR_SLICE
3
5#include "TMVA/ROperator.hxx"
6#include "TMVA/RModel.hxx"
7
8#include <cassert>
9#include <sstream>
10#include <numeric>
11
12namespace TMVA{
13namespace Experimental{
14namespace SOFIE{
15
16// slice operator
17
18template <typename IType>
20{
21
22private:
23
24 // flags to indicate if start/end and steps are not defined at compiled time
25 bool fIsStartUndef = false;
26 bool fIsEndUndef = false;
27 bool fIsStepUndef = false;
28 std::string fNData; // input data tensor name
29 std::string fNOutput; // output data name
30 std::vector<std::string> fNames; // tensor names for meta(axis) information
31 std::vector<Dim> fShapeInput; // input shape data
32 std::vector<Dim> fShapeOutput; // output shape data
33 // saved Start/End.Steps are corrected from initial ONNX for negative/default values
34 // and are available for each axis
35 std::vector<Dim> fStart; // starting values of slices for all axes
36 std::vector<Dim> fEnd; // End values of slices for all axes
37 std::vector<Dim> fSteps; // step values of slices for all axes
38 std::vector<Dim> fStartDims; // input starting values of slices
39 std::vector<Dim> fEndDims; // input End values of slices
40 std::vector<Dim> fStepDims; // input step values of slices
41 std::vector<IType> fAxes; // axes for input start/emd/step values
42
43 std::vector<std::vector<IType>> fAttributes; // attributes for the version <=10 case
44
45
46public:
47
49
50 // ctor for versions >= 10
51 ROperator_Slice(std::string nameData, std::vector<std::string> names, std::string nameOutput)
52 : fNData(UTILITY::Clean_name(nameData)),
53 fNOutput(UTILITY::Clean_name(nameOutput))
54 {
55 fNames.resize(4);
56 // axes and steps can be optional
57 for (size_t i = 0; i < names.size(); ++i) {
58 fNames[i] = UTILITY::Clean_name(names[i]);
59 }
60
63 }
64 // ctor for versions < 10
65 ROperator_Slice(std::string nameData, std::vector<IType> starts, std::vector<IType> ends, std::vector<IType> axes, std::string nameOutput)
66 : fNData(UTILITY::Clean_name(nameData)),
67 fNOutput(UTILITY::Clean_name(nameOutput))
68 {
69 fAttributes.push_back(starts);
70 fAttributes.push_back(ends);
71 fAttributes.push_back(axes);
72 }
73
74
75
76 void Initialize(RModel& model) override {
77 if (model.CheckIfTensorAlreadyExist(fNData) == false){ //input must be a graph input, or already initialized intermediate tensor
78 throw std::runtime_error("TMVA Slice Op Input Tensor is not found in model");
79 }
80
81 std::vector<std::vector<Dim>> shapes;
83 shapes.push_back(fShapeInput);
84
85 std::vector<std::vector<IType>> itensors(4);
86
87 if (fNames.size() > 0) { // size has to be equal to 4
88 // loop on the extra 2 or 3 or 4 inputs
89 for (size_t i = 0; i < 4; ++i) {
90 if (!fNames[i].empty()) {
91 if (model.IsInitializedTensor(fNames[i])) {
92 auto dptr = model.GetInitializedTensorData(fNames[i]);
93 auto tensor = static_cast<IType *>(dptr.get());
94 auto vec = model.GetTensorShape(fNames[i]);
95 assert(vec.size() == 1);
96 itensors[i] = std::vector<IType>(tensor, tensor + vec[0]);
97
98 } else if (model.IsShapeTensor(fNames[i])) {
99 // case is a shape tensor
100 if (i == 0) {
102 } else if (i == 1) {
104 } else if (i == 3) {
106 }
107 } else {
108 // case is an intermediate tensor
109 auto shape = model.GetTensorShape(fNames[i]);
110 size_t s = shape[0];
111 for (size_t k = 0; k < s; k++) {
112 if (i == 0) {
113 fStartDims.push_back( Dim{std::string("start_") + fNOutput + "_" + std::to_string(k)});
114 fIsStartUndef = true;
115 } else if (i == 1) {
116 fEndDims.push_back(Dim{std::string("end_") + fNOutput + "_" + std::to_string(k)});
117 fIsEndUndef = true;
118 } else if (i == 3) {
119 fStepDims.push_back(Dim{std::string("step_") + fNOutput + "_" + std::to_string(k)});
120 fIsStepUndef = true;
121 }
122 }
123 }
124 }
125 }
126 } else {
127 // old slice versions
128 assert(fAttributes.size() > 1);
129 for (size_t i = 0; i < fAttributes.size(); i++) {
130 itensors[i] = fAttributes[i];
131 }
132 }
133 size_t dim = fShapeInput.size();
134
135 // default values
136 fSteps = std::vector<Dim>(dim, Dim{1});
137 fStart = std::vector<Dim>(dim, Dim{0});
139
140 // default axes
141 if (itensors[2].empty()) {
142 fAxes.resize(dim);
143 std::iota(fAxes.begin(), fAxes.end(), 0);
144 } else {
145 fAxes = itensors[2];
146 for (size_t i = 0; i < fAxes.size(); i++) {
147 // negative axes - they count from the back
148 if (fAxes[i] < 0) fAxes[i] = dim + fAxes[i];
149 if (fAxes[i] < 0 || fAxes[i] >= static_cast<IType>(dim))
150 throw std::runtime_error("TMVA Slice Op : invalid axis value " + std::to_string(fAxes[i]) +
151 " for " + std::to_string(i));
152 }
153 }
154 // Loop on axis to get start/end/step values
155 for (size_t i = 0; i < fAxes.size(); i++) {
156 if (!itensors[0].empty() )
157 fStartDims.push_back(Dim{ static_cast<size_t>(itensors[0][i])});
158 if (fStartDims.empty())
159 throw std::runtime_error("TMVA Slice Op : Missing start input tensor");
160
161 if (!itensors[1].empty())
162 fEndDims.push_back(Dim{ static_cast<size_t>(itensors[1][i])});
163 else if (fEndDims.empty())
164 throw std::runtime_error("TMVA Slice Op : Missing end input tensor");
165
166 if (!itensors[3].empty()) {
167 fStepDims.push_back(Dim{ static_cast<size_t>(itensors[3][i])});
168 }
169 else if (fStepDims.size() < fAxes.size()) // this can happen since it is optional
170 fStepDims.push_back(Dim{size_t(1)});
171
172 if (!fShapeInput[fAxes[i]].isParam) {
173 size_t iAxisDim = fShapeInput[fAxes[i]].dim;
174 //correct values if too large or too small
175 IType istart = 0;
176 if (!fStartDims[i].isParam) {
177 istart = static_cast<IType>(fStartDims[i].dim);
178 if (istart < 0) istart = iAxisDim + istart;
179 }
180 IType iend = static_cast<IType>(iAxisDim);
181 if (!fEndDims[i].isParam) {
182 iend = static_cast<IType>(fEndDims[i].dim);
183 if (iend < 0) iend = iAxisDim + iend;
184 }
185 //steps
186 IType istep = 1;
187 if (!fStepDims[i].isParam) {
188 istep = static_cast<IType>(fStepDims[i].dim);
189 } else {
190 throw std::runtime_error("TMVA Slice Op : parametric step inputs are not supported");
191 }
192 // clamp start end values depending on steps
193 // start must be [0,N] for positive steps or [0,N-1] for negative
194 // end must be [0,N] for positive steps or [-1, N-1] for negative
195 if (istart < 0) istart = 0;
196 if (istep > 0) {
197 if (istart > static_cast<IType>(iAxisDim)) istart = static_cast<IType>(iAxisDim);
198 if (iend < 0) iend = 0;
199 if (iend > static_cast<IType>(iAxisDim)) iend = static_cast<IType>(iAxisDim);
200 } else if (istep < 0) {
201 if (istart > static_cast<IType>(iAxisDim)-1) istart = static_cast<IType>(iAxisDim) -1;
202 if (iend < -1) iend = -1;
203 if (iend > static_cast<IType>(iAxisDim)-1) iend = static_cast<IType>(iAxisDim) -1;
204 } else {
205 throw std::runtime_error("TMVA Slice Op : invalid step value " + std::to_string(istep) +
206 " for " + std::to_string(i));
207 }
208 // for parametric values clamping we will done at run time
209 if (fStartDims[i].isParam)
210 fStart[fAxes[i]] = fStartDims[i];
211 else
212 fStart[fAxes[i]] = Dim{size_t(istart)};
213 if (fStartDims[i].isParam)
214 fEnd[fAxes[i]] = fEndDims[i];
215 else
216 fEnd[fAxes[i]] = Dim{size_t(iend)};
217
218 fSteps[fAxes[i]] = Dim{size_t(istep)};
219 } else {
220 //std::cout << i << " Param dim for " << fAxes[i] << " " << fShapeInput[fAxes[i]] << std::endl;
221 // correct only negative values
222 if (!fStartDims[i].isParam) {
223 IType istart = static_cast<IType>(fStartDims[i].dim);
224 if (istart < 0) {
225 std::string sstart = std::string("(") + fShapeInput[fAxes[i]].param + "-" + std::to_string(-istart) +")";
226 fStart[fAxes[i]] = Dim{sstart,size_t(-1)};
227 } else {
228 fStart[fAxes[i]] = Dim{size_t(istart)};
229 }
230 } else {
231 fStart[fAxes[i]] = fStartDims[i];
232 }
233 if (!fEndDims[i].isParam) {
234 IType iend = static_cast<IType>(fEndDims[i].dim);
235 if (iend < 0) {
236 std::string send = std::string("(") + fShapeInput[fAxes[i]].param + "-" + std::to_string(-iend) +")";
237 fEnd[fAxes[i]] = Dim{send,size_t(-1)};
238 } else {
239 fEnd[fAxes[i]] = Dim{size_t(iend)};
240 }
241 } else {
242 fEnd[fAxes[i]] = fEndDims[i];
243 }
244
245 fSteps[fAxes[i]] = fStepDims[i];
246 }
247
248 }
249 // find output shape
250 fShapeOutput.resize(dim);
251 for (size_t i = 0; i < dim; i++) {
252 if (!fEnd[i].isParam && !fStart[i].isParam && !fSteps[i].isParam) {
253 int64_t istart = static_cast<int64_t>(fStart[i].dim);
254 int64_t iend = static_cast<int64_t>(fEnd[i].dim);
255 int64_t istep= static_cast<int64_t>(fSteps[i].dim);
256 int64_t s = (iend-istart)/istep;
257 fShapeOutput[i] = Dim{static_cast<size_t>(s)};
258 } else {
259 std::string s;
260 if (fStart[i].GetVal() != "0")
261 s = "(" + fEnd[i].GetVal() + "-" + fStart[i].GetVal() + ")";
262 else
263 s = fEnd[i].GetVal();
264 if (fSteps[i].GetVal() != "1") {
265 s.insert(0,"(");
266 s += ")/" + fSteps[i].GetVal() + ")";
267 }
268 fShapeOutput[i] = Dim{s,size_t(-1)};
269 // add also the shape parameters to RModel to declare them when
270 // allocating output tensor
271 if (fEnd[i].isParam && fEnd[i].dim != size_t(-1))
272 model.AddShapeParam(fEnd[i].param,fEnd[i].dim );
273 if (fStart[i].isParam && fStart[i].dim != size_t(-1))
274 model.AddShapeParam(fStart[i].param,fStart[i].dim );
275 if (fSteps[i].isParam && fSteps[i].dim != size_t(-1))
276 model.AddShapeParam(fSteps[i].param,fSteps[i].dim );
277
278 }
279 }
280 // case input is a constant tensor and of int64 type
281 if (model.IsInitializedTensor(fNData) && model.GetTensorType(fNData) == ETensorType::INT64) {
282 fIsOutputConstant = true;
283 auto inputData = static_cast<int64_t*>(model.GetInitializedTensorData(fNData).get());
284 size_t outputSize = ConvertShapeToLength(ConvertShapeToInt(fShapeOutput));
285 std::vector<int64_t> outputData(outputSize);
286 std::vector<size_t> inputStride = UTILITY::ComputeStrideFromShape(ConvertShapeToInt(fShapeInput));
287 std::cout << "slice " << ConvertDimShapeToString(fShapeInput) << " output size " << outputSize << " " << ConvertDimShapeToString(fShapeOutput) << std::endl;
288 std::cout << " start - end -steps \n";
289 for (size_t ii = 0; ii< fStart.size(); ii++)
290 std::cout << fStart[ii] << " " << fEnd[ii] << " " << fSteps[ii] << std::endl;
291 // perform slice using a recursive function- need to use two lambda functions for this
292 auto sliceRecursive = [&](size_t iaxis, size_t & outIdx, size_t & inOffset) {
293 auto slice_impl = [&](size_t iax, size_t & outputIdx, size_t & inputOffset, auto & sliceRecImpl) {
294 std::cout << "SLice_impl " << fStart.size() << " " << fEnd.size() << " " << fSteps.size() << " " << iax << std::endl;
295 if (fStart[iax].isParam || fEnd[iax].isParam || fSteps[iax].isParam)
296 throw std::runtime_error("TMVA Slice Op : cannot have parametric values when input is constant");
297 // compute indices
298 std::vector<IType> indices;
299 for (IType i = (IType) fStart[iax].dim; (IType(fSteps[iax].dim) > 0) ? i < IType(fEnd[iax].dim) : i > IType(fEnd[iax].dim); i += IType(fSteps[iax].dim) )
300 indices.push_back(i);
301 if (iax == dim-1) { // last axis
302 std::cout << "SLice_impl last axis: " << indices.size() << " : ";
303 for (size_t i = 0; i < indices.size(); i++) {
304 std::cout << outputIdx << " , " << indices[i] << " " << inputOffset << " ; ";
305 outputData[outputIdx] = inputData[inputOffset + indices[i]];
306 outputIdx++;
307 }
308 std::cout << std::endl;
309 return;
310 } else {
311 std::cout << "SLice_impl else : " << indices.size() << " : ";
312 for (size_t i = 0; i < indices.size(); i++) {
313 std::cout << inputStride[iax] << " , " << indices[i] << " " << inputOffset << " ";
314 size_t offset = inputOffset + inputStride[iax]*indices[i];
316 }
317 std::cout << std::endl;
318 }
319 };
321 };
322 size_t idx = 0;
323 size_t offset = 0;
324 sliceRecursive(0, idx, offset);
325
326 model.AddConstantTensor<int64_t>(fNOutput, ConvertShapeToInt(fShapeOutput), outputData.data());
327 if (model.Verbose()) {
328 std::cout << "Slice: output is a constant tensor " << ConvertShapeToString(fShapeOutput) << " : "
329 << ConvertValuesToString(outputData) << std::endl;
330 }
331 }
332 else {
333 model.AddIntermediateTensor(fNOutput, model.GetTensorType(fNData), fShapeOutput);
334 if (model.Verbose()) {
335 std::cout << "Slice ---> " << fNOutput << " " << ConvertShapeToString(fShapeOutput) << std::endl;
336 }
337 }
338 }
339
340 std::string Generate(std::string OpName) override {
341 if (fIsOutputConstant) return ""; //no op for constant tensors
342
343 OpName = "op_" + OpName;
344 if (fShapeInput.empty() || fShapeOutput.empty()){
345 throw std::runtime_error("TMVA SOFIE Slice Op called to Generate without being initialized first");
346 }
347
348 std::stringstream out;
349 //std::string opName = "Slice";
350
351 out << SP << "///------- Slice operator\n" << std::endl;
352 // loop on the dimensions depending no the orders
353 size_t ndim = fShapeInput.size();
354 auto strides = UTILITY::ComputeStrideFromShape(fShapeInput);
355
356
357 out << SP << "{\n"; // define operator scope
358 for (size_t i = 0; i < fStepDims.size(); i++) {
359 if (fStepDims[i].isParam) {
360 if (fIsStepUndef)
361 out << SP << "size_t " << fStepDims[i] << " = tensor_" << fNames[3] << "[" << i << "];\n";
362 }
363 }
364 // special case for parametric values for start/end. Need to do clipping
365 for (size_t i = 0; i < fStartDims.size(); i++) {
366 if (fStartDims[i].isParam && fStartDims[i].param != fShapeInput[fAxes[i]].param) {
367 std::string s_start = "start_" + std::to_string(i);
368 if (fIsStartUndef) {
369 s_start = fStartDims[i].param;
370 out << SP << "size_t " << s_start << " = tensor_" << fNames[0] << "[" << i << "];\n";
371 } else {
372 out << SP << "size_t " << s_start << " = " << fStartDims[i] << ";\n";
373 fStart[fAxes[i]] = s_start; // need to use this value later when slicing
374 }
375 out << SP << "if (" << s_start << " < 0) " << s_start << " += " << fShapeInput[fAxes[i]] <<";\n";
376 out << SP << "if (" << s_start << " < 0) " << s_start << " = 0;\n";
377 if (!fStepDims[i].isParam) {
378 if (static_cast<IType>(fStepDims[i].dim) > 0 )
379 out << SP << "if (" << s_start << " > " << fShapeInput[fAxes[i]] << " ) " << s_start << " = " << fShapeInput[fAxes[i]] <<";\n";
380 else
381 out << SP << "if (" << s_start << " > " << fShapeInput[fAxes[i]] << " - 1" << " ) " << s_start << " = " << fShapeInput[fAxes[i]] << " - 1;\n";
382 }
383 }
384 // special case if step is negative and shape are equal and step is negative
385 else if (fStartDims[i].isParam && fStartDims[i].param == fShapeInput[fAxes[i]].param && !fStepDims[i].isParam && static_cast<IType>(fStepDims[i].dim) < 0 ) {
386 fStart[fAxes[i]] = Dim{ fStartDims[i].param + "-1" };
387 }
388 }
389 // now to for end
390 for (size_t i = 0; i < fEndDims.size(); i++) {
391 if (fEndDims[i].isParam && fEndDims[i].param != fShapeInput[fAxes[i]].param) {
392 std::string s_end = "end_" + std::to_string(i);
393 if (fIsEndUndef) {
394 s_end = fEndDims[i].param;
395 out << SP << "size_t " << s_end << " = tensor_" << fNames[1] << "[" << i << "];\n";
396 } else {
397 out << SP << "size_t " << s_end << " = " << fEndDims[i] << ";\n";
398 fEnd[fAxes[i]] = s_end; // need to use this value later when slicing
399 }
400 out << SP << "if (" << s_end << " < 0) " << s_end << " += " << fShapeInput[fAxes[i]] <<";\n";
401 if (!fStepDims[i].isParam) {
402 if (static_cast<IType>(fStepDims[i].dim) > 0 ) {
403 out << SP << "if (" << s_end << " < 0) " << s_end << " = 0;\n";
404 out << SP << "if (" << s_end << " > " << fShapeInput[fAxes[i]] << " ) " << s_end << " = " << fShapeInput[fAxes[i]] <<";\n";
405 } else {
406 out << SP << "if (" << s_end << " < -1) " << s_end << " = -1;\n";
407 out << SP << "if (" << s_end << " > " << fShapeInput[fAxes[i]] << " - 1" << " ) " << s_end << " = " << fShapeInput[fAxes[i]] << " - 1;\n";
408 }
409 }
410 }
411 // special case if step is negative and shape are equal and step is negative
412 else if (fEndDims[i].isParam && fEndDims[i].param == fShapeInput[fAxes[i]].param && !fStepDims[i].isParam && static_cast<IType>(fStepDims[i].dim) < 0 ) {
413 fEnd[fAxes[i]] = Dim{ fEndDims[i].param + "-1" };
414 }
415 }
416
417 out << SP << "size_t iOut = 0;\n";
418 std::string MSP = SP;
419 for (size_t idim = 0; idim < ndim; idim++) {
420 out << MSP << "for (size_t i" << idim << " = " << fStart[idim] << "; i" << idim << " < " << fEnd[idim]
421 << "; i" << idim << "+= " << fSteps[idim] << ") {\n";
422 MSP += SP;
423 if (idim < ndim-1) out << MSP << "size_t stride" << idim << " = " << strides[idim] << "*i" << idim << ";\n";
424 }
425 out << MSP << "size_t iInput = ";
426 for (size_t idim = 0; idim < ndim-1; idim++) out << " stride" << idim << " + ";
427 // here should be step size ?
428 out << "i" << ndim-1 << ";\n";
429 out << MSP << "tensor_" << fNOutput << "[iOut++] = tensor_" <<fNData << "[iInput];\n";
430 for (size_t idim = 0; idim < ndim; idim++) {
431 MSP = MSP.replace(0,SP.length(),"");
432 out << MSP << "}\n";
433 }
434 out << SP << "}\n"; // end operator scope
435
436 return out.str();
437 }
438
439};
440
441}//SOFIE
442}//Experimental
443}//TMVA
444
445
446#endif //TMVA_SOFIE_ROPERATOR_SLICE
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
std::vector< size_t > GetTensorShape(const std::string &name) const
Definition RModel.cxx:29
std::vector< Dim > GetDimTensorShape(const std::string &name) const
Definition RModel.cxx:65
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:122
bool IsShapeTensor(const std::string &name) const
check if a tensor is a shape tensor
Definition RModel.cxx:211
bool IsInitializedTensor(const std::string &name) const
Definition RModel.cxx:220
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
Definition RModel.cxx:312
const std::vector< Dim > & GetShapeTensorValues(const std::string &tensor_name) const
Definition RModel.cxx:215
std::vector< std::vector< IType > > fAttributes
ROperator_Slice(std::string nameData, std::vector< IType > starts, std::vector< IType > ends, std::vector< IType > axes, std::string nameOutput)
std::string Generate(std::string OpName) override
ROperator_Slice(std::string nameData, std::vector< std::string > names, std::string nameOutput)
std::vector< std::string_view > fInputTensorNames
Definition ROperator.hxx:47
std::vector< std::string_view > fOutputTensorNames
Definition ROperator.hxx:48
std::string Clean_name(std::string input_tensor_name)
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::string ConvertDimShapeToString(const std::vector< Dim > &shape)
std::size_t ConvertShapeToLength(const std::vector< size_t > &shape)
std::string ConvertValuesToString(size_t n, const T *data)
std::vector< size_t > ConvertShapeToInt(const std::vector< Dim > &shape)
Convert shape based on Dim to integer format.
std::string ConvertShapeToString(const std::vector< size_t > &shape)
create variable transformations