Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
ROperator_Gemm.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_ROPERATOR_GEMM
2#define TMVA_SOFIE_ROPERATOR_GEMM
3
4
6#include "TMVA/ROperator.hxx"
7#include "TMVA/RModel.hxx"
8
9#include <sstream>
10#include <algorithm>
11#include <iterator>
12#include <iomanip>
13#include <limits>
14#include <cassert>
15
16namespace TMVA{
17namespace Experimental{
18namespace SOFIE{
19
20
21 template <typename T>
23 {
24
25 private:
26 bool fIsDynamic = false;
27 bool fBroadcastBias = false;
28
29 float fAttrAlpha = 1.0;
30 float fAttrBeta = 1.0;
33
34 std::string fNA;
35 std::string fNB;
36 std::string fNC = "";
37 std::string fNY;
38 std::string fType;
40 std::vector<Dim> fShapeA;
41 std::vector<Dim> fShapeB;
42 std::vector<size_t> fShapeC;
43 std::vector<Dim> fDimShapeC;
44 std::vector<Dim> fShapeY;
45 RModel * fModel = nullptr;
46
47 public:
48
50 ROperator_Gemm(float alpha, float beta, int_t transA, int_t transB, std::string nameA, std::string nameB, std::string nameY, EActivationType activation=EActivationType::UNDEFINED):
51 fAttrAlpha(alpha), fAttrBeta(beta), fAttrTransA(transA), fAttrTransB(transB), fNA(UTILITY::Clean_name(nameA)),
52 fNB(UTILITY::Clean_name(nameB)), fNY(UTILITY::Clean_name(nameY))
53 {
55 fType = "float";
56 static_assert(std::is_same_v<T, float>,
57 "TMVA::SOFIE - Unsupported type parsing a Gemm operator");
60 }
61
62 ROperator_Gemm(float alpha, float beta, int_t transA, int_t transB, std::string nameA, std::string nameB, std::string nameC, std::string nameY, EActivationType activation=EActivationType::UNDEFINED):
63 fAttrAlpha(alpha), fAttrBeta(beta), fAttrTransA(transA), fAttrTransB(transB), fNA(UTILITY::Clean_name(nameA)),
64 fNB(UTILITY::Clean_name(nameB)), fNC(UTILITY::Clean_name(nameC)), fNY(UTILITY::Clean_name(nameY)), fActivation(activation)
65 {
67 fType = "float";
68
71 }
72
73 std::vector<ETensorType> TypeInference(std::vector<ETensorType> input) override {
74 ETensorType out = input[0];
75 return {out};
76 }
77
78 template <typename U>
79 std::vector<U> DoShapeInference(const std::vector<std::vector<U>> & input){
80 if (input.size() > 3) throw std::runtime_error("TMVA SOFIE Gemm Op Shape Inference only need 2 or 3 input tensor");
81 // accept tensor with input dimensions > 2
82 // example: A = (d1,d2,...,N1,N2) B = (d1,d2,...,N2,N3) --> Y = (d1,d2,..,N1,N3)
83 for (auto& i: input){
84 if (i.size() < 2){
85 throw std::runtime_error("TMVA SOFIE Gemm Op Shape Inference only accept input tensor with >=2 dimensions");
86 }
87 }
88
89 // when there are 3 inputs shape of Y is the one of C
90 if (input.size() == 3){
91 //shape of C is shape of Y
92 return input[2];
93 }
94 // ioffset cannot be less than 2
95 int ioffset = input[0].size()-2; // in case of tensors with dim > 2
96
97 std::vector<U> s_a(input[0].begin() + ioffset, input[0].begin() + ioffset + 2);
98 std::vector<U> s_b(input[1].begin() + ioffset, input[1].begin() + ioffset + 2);
99 // reverse in case of transpose
100 if (fAttrTransA){
101 std::reverse(s_a.begin(), s_a.end());
102 }
103 if (fAttrTransB){
104 std::reverse(s_b.begin(), s_b.end());
105 }
106 std::vector<U> s_y;
107 s_y.reserve(input[0].size());
108 if (input[0].size() > 2 && input[1].size() == input[0].size()) {
109 // in case of dim > 2 first dimensions are equal to the input ones not
110 // equal to 1 (e.g. (1,2,3) * (2,3,4) -> (2,2,4))
111 // here could probably use the Broadcasting function UTILITY::MultidirectionalBroadcastShape
112 for (size_t i = 0; i < input[0].size()-2; i++) {
113 Dim valueA = input[0][i];
114 Dim valueB = input[1][i];
115 if (valueA.GetVal() != valueB.GetVal()) {
116 if (valueB.GetVal() == "1")
117 s_y.push_back(input[0][i]);
118 else if (valueA.GetVal() == "1")
119 s_y.push_back(input[1][i]);
120 else if (!valueA.isParam && !valueB.isParam)
121 throw std::runtime_error("TMVA SOFIE Gemm Op - invalid input shapes " + valueA.GetVal() + " and "
122 + valueB.GetVal());
123 else if (valueA.isParam && valueB.isParam){
124 // check which parameter is first in RModel list
125 auto & dimNames = fModel->GetDimShapeNames();
126 auto p1 = std::find(dimNames.begin(), dimNames.end(), valueA.param);
127 auto p2 = std::find(dimNames.begin(), dimNames.end(), valueB.param);
128 if (p1 < p2) s_y.push_back(input[0][i]);
129 else s_y.push_back(input[1][i]);
130 }
131 else if (!valueA.isParam)
132 s_y.push_back(input[0][i]);
133 else if (!valueB.isParam)
134 s_y.push_back(input[1][i]);
135 else
136 throw std::runtime_error("TMVA SOFIE Gemm Op - invalid input shapes " + valueA.GetVal() + " and "
137 + valueB.GetVal());
138 }
139 else
140 s_y.push_back(input[0][i]);
141 }
142 }
143
144 s_y.push_back(s_a[0]);
145 s_y.push_back(s_b[1]);
146 return s_y;
147 }
148
149 std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input) override {
150 std::vector<std::vector<size_t>> ret;
152 return ret;
153 }
154 std::vector<Dim> DynamicShapeInference(const std::vector<std::vector<Dim>> & input){
156 }
157
158
159
160 void Initialize(RModel& model) override {
161 //TODO: propagate A or B as specified by ONNX standard
162 fModel = &model;
163
164 if ((model.CheckIfTensorAlreadyExist(fNA) == false) || (model.CheckIfTensorAlreadyExist(fNB) == false) ){ //input must be a graph input, or already initialized intermediate tensor
165 throw std::runtime_error("TMVA SOFIE Gemm Op Input Tensor " + fNA + " or " + fNB + " is not found in model");
166 }
167 if (fNC != ""){
168 if (model.CheckIfTensorAlreadyExist(fNC) == false){ //input must be a graph input, or already initialized intermediate tensor
169 throw std::runtime_error("TMVA SOFIE Gemm Op Input Tensor " + fNC + " is not found in model");
170 }
171 }
172 if (model.IsDynamicTensor(fNA) || model.IsDimInputTensor(fNA) ) {
174 fIsDynamic = true;
175 } else {
176 auto shapeA_int = model.GetTensorShape(fNA);
178 }
179 // case A is of dim1 we prepend a 1 but we need to remove later
180 bool prependOne = false;
181 if (fShapeA.size() == 1) {
182 fShapeA.insert(fShapeA.begin(), Dim(1));
183 prependOne = true;
184 }
185
186 if (model.IsDynamicTensor(fNB) || model.IsDimInputTensor(fNB)) {
188 fIsDynamic = true;
189 }
190 else {
191 auto shapeB_int = model.GetTensorShape(fNB);
193 }
194 // case B is dim1 we append a 1 but we need to remove later
195 bool appendOne = false;
196 if (fShapeB.size() == 1) {
197 fShapeB.insert(fShapeB.end(), Dim(1));
198 appendOne = true;
199 }
200 // assume if not shape is 2 that extra values are 1.
201 // implement also MatMul case where we stack matrices (see numpy.matmul)
202 if (fShapeA.size() != fShapeB.size()) {
203 // if different dimensions we prepend 1 values
204 if (fShapeA.size() < fShapeB.size()) {
205 fShapeA.insert(fShapeA.begin(), fShapeB.size()-fShapeA.size(), Dim(1));
206 } else if (fShapeB.size() < fShapeA.size()) {
207 fShapeB.insert(fShapeB.begin(), fShapeA.size()-fShapeB.size(), Dim(1));
208 }
209 }
210
212 std::vector<size_t> shapeY = ConvertShapeToInt(fShapeY);
213
214 // bias is normally not dynamic (not support it for time being)
215 if (fNC != ""){
216 if (model.IsDynamicTensor(fNC))
218 else {
219 fShapeC = model.GetTensorShape(fNC);
221 }
222 // for dynamic outputs broadcasting is always needed
223 bool broadcast_needed = false;
224 if (fIsDynamic && shapeY.empty())
225 broadcast_needed = true;
226 else
227 // consider broadcasting also if they have different length
229
230
231 if (broadcast_needed) {
232 fBroadcastBias = true;
233 // check if broadcasting is compatible and note that prepend 1 to shapeC
235 // return flag must be equal to 1 since this is a unidirectional broadcast of C->Y
236 if (r.first > 1) {
237 throw std::runtime_error("TMVA SOFIE Gemm Op - bias tensor of shape " + ConvertDimShapeToString(fDimShapeC) + " cannot be uni-directional broadcasted to " + ConvertDimShapeToString(fShapeY));
238 }
240 }
241 }
242
243 // remove appended or prepended value of 1 in Y
244 if (prependOne) {
245 if (fIsDynamic)
246 fShapeY.erase(fShapeY.begin());
247 else
248 shapeY.erase(shapeY.begin());
249 }
250 if (appendOne) {
251 if (fIsDynamic)
252 fShapeY.erase(fShapeY.end()-1);
253 else
254 shapeY.erase(shapeY.end()-1);
255 }
256
257 if (!fIsDynamic)
259 else
261
262 if (model.Verbose()){
263 std::cout << "Gemm (or MatMul) " << " ---> " << fNY << " shape ";
264 if (fIsDynamic)
265 std::cout << ConvertDimShapeToString(fShapeY) << std::endl;
266 else
267 std::cout << ConvertShapeToString(shapeY) << std::endl;
268 }
269
270 model.AddNeededStdLib("algorithm");
271 }
272
273 std::string Generate(std::string opName) override {
274 opName = "op_" + opName;
275
276 if (fShapeA.empty() || fShapeB.empty() || fShapeY.empty() || (fNC != "" && fShapeC.empty())) {
277 throw std::runtime_error("TMVA SOFIE Gemm Op called to Generate without being initialized first");
278 }
279 std::stringstream out;
280 out << "\n//--------- Gemm " << opName << " " << ConvertDimShapeToString(fShapeA) << " * " << ConvertDimShapeToString(fShapeB)
281 << " -> " << ConvertDimShapeToString(fShapeY) << "\n";
282 // need to consider case A and B have dim > 2 (for MatMul)
283 int64_t dimA = fShapeA.size();
284 int64_t dimB = fShapeB.size();
285 int64_t dimY = fShapeY.size();
286 int64_t dimC = fDimShapeC.size();
287 if (dimA != dimB || dimA != dimY || (fBroadcastBias && dimC != dimY)) {
288 std::cout << " shape A " << ConvertDimShapeToString(fShapeA)
289 << " shape B " << ConvertDimShapeToString(fShapeB)
290 << " shape C " << ConvertDimShapeToString(fDimShapeC)
291 << " shape Y " << ConvertDimShapeToString(fShapeY) << std::endl;
292 throw std::runtime_error("TMVA SOFIE Gemm(MatMul) has invalid shape for inputs or output");
293 }
294 auto m = (fAttrTransA ? fShapeA[dimA-1].GetVal() : fShapeA[dimA-2].GetVal());
295 auto n = (fAttrTransB ? fShapeB[dimB-2].GetVal() : fShapeB[dimB-1].GetVal());
296 auto k = (fAttrTransA ? fShapeA[dimA-2].GetVal() : fShapeA[dimA-1].GetVal());
297 // size of A: if (transposeA) is m*k else k*m
298 // size of B n*k
299 std::vector<Dim> sY = {fShapeY[dimY-2], fShapeY[dimY-1]};
300 // extra dimensions in case of stacked MatMul
301 std::vector<Dim> sExtraY;
302 for (int64_t i = 0; i < dimY-2; i++) {
303 sExtraY.push_back(fShapeY[i]);
304 }
305 auto lengthGemm = ConvertDimShapeToLength(sY); // size of the Gemm operation
306 auto lengthExtra_Y = ConvertDimShapeToLength(sExtraY); // extra length in case input tensors are of dim>2 (MatMul)
307 std::string lengthExtra_C;
308 std::vector<Dim> sExtraC;
309 std::vector<Dim> sC;
310 bool haveExtraC = false;
311 if (dimC > 2) {
312 sC = {fDimShapeC[dimC-2], fDimShapeC[dimC-1]};
313 for (int64_t i = 0; i < dimC-2; i++) {
314 sExtraC.push_back(fDimShapeC[i]);
315 }
317 if (lengthExtra_C != "1") haveExtraC = true;
318 } else if (dimC > 0) {
319 for (int64_t i = 0; i < dimC; i++) {
320 sC.push_back(fDimShapeC[i]);
321 }
322 }
323
324 // case bias is present
325 if (!fNC.empty()){
326 // when the 2 last dims of bias and Y are not compatible we need to perform a run time broadcast
327 if (sC != sY) fBroadcastBias = true;
328 if (!fBroadcastBias) {
329 // add a check in case broadcasting was not needed or done outside of session
330 // C should have smaller dimension of Y
331 if (!fIsDynamic) {
332 if ((std::stoi(lengthGemm) != std::stoi(ConvertDimShapeToLength(sC))) ||
333 ( haveExtraC && std::stoi(lengthExtra_Y) != std::stoi(lengthExtra_C)))
334 throw std::runtime_error("TMVA SOFIE Gemm Op " + opName + " Bias tensor " + fNC + " has not correct size "
335 + ConvertShapeToString(fShapeC) + " output length " + lengthGemm);
336 } else {
337 // add a dynamic check (C should not be a dynamic tensor)
338 out << SP << "assert(" << lengthGemm << " == " << ConvertDimShapeToLength(sC) << ");\n";
339 if (haveExtraC) out << SP << "assert(" << lengthExtra_Y << " == " << lengthExtra_C << ");\n";
340 }
341 }
342 } else {
343 fBroadcastBias = false;
344 //in this case fAttrBeta needs to be equal to zero otherwise second time we run we will use
345 // the previous result
346 if (fAttrBeta != 0) {
347 throw std::runtime_error("TMVA SOFIE Gemm Op " + opName + " Bias tensor is not present but beta value in Gemm is not zero");
348 }
349 }
350
351 // include MatMul case where we stack the Gemm operations
352 // exclude case where we have only 1's in the additional dims
353 bool doStackMul = dimY > 2 && ( fIsDynamic || std::stoi(lengthExtra_Y) > 1);
354 // compute input offset for stack multiplications
355 std::string lengthExtra_A;
356 std::string lengthExtra_B;
357 std::string increment_A;
358 std::string increment_B;
359
360 if (doStackMul) {
361 std::vector<Dim> sA(fShapeA.begin(), fShapeA.begin()+dimA-2);
362 std::vector<Dim> sB(fShapeB.begin(), fShapeB.begin()+dimB-2);
363 std::vector<Dim> mA = {fShapeA[dimA-2], fShapeA[dimA-1]};
364 std::vector<Dim> mB = {fShapeB[dimB-2], fShapeB[dimB-1]};
367 // if A ( b, m, k) and B (b, k, n) these are the strides of A and B ( m*k for A and n*k for B )
370 }
371 bool extraA = (doStackMul && lengthExtra_A != "1");
372 bool extraB = (doStackMul && lengthExtra_B != "1");
374 auto SP2 = SP;
375 if (doStackMul) {
376 out << SP << "size_t " << opName << "_y_offset = 0;\n"; // needed if we stack the gemm operations
377 if (extraA)
378 out << SP << "size_t " << opName << "_A_offset = 0;\n";
379 if (extraB)
380 out << SP << "size_t " << opName << "_B_offset = 0;\n";
381 if (extraC)
382 out << SP << "size_t " << opName << "_C_offset = 0;\n";
383 out << SP << "for (size_t i = 0; i < " << lengthExtra_Y << "; i++){\n";
384 SP2 += SP;
385 }
386 // do the bias broadcasting at run time by
387 // initializing output Y vector with bias values
388 if (fBroadcastBias) {
389
390 fAttrBeta = 1.;
391 out << SP2 << "for (size_t j = 0; j < " << sY[0] << "; j++) { \n";
392 out << SP2 << SP << "size_t y_index = ";
393 if (doStackMul) // add offset in caseof stack multiplications (not sure if bias is present in these cases)
394 out << opName << "_y_offset + ";
395 if (sY[1].GetVal() != "1")
396 out << sY[1] << " * j;\n";
397 else
398 out << "j;\n";
399
400 std::string prefix = SP2 + SP + "TMVA::Experimental::SOFIE::";
401 std::string target = "tensor_" + fNY;
402 if (sC.size() != 2) {
403 throw std::runtime_error("TMVA SOFIE Gemm Op - invalid rank for bias tensor " + ConvertDimShapeToString(fDimShapeC) + ConvertDimShapeToString(sC));
404 } if (sC[0].GetVal() == "1" && sC[1].GetVal() == sY[1].GetVal()) {
405 out << prefix << "Copy(" << target << " + y_index, tensor_" << fNC << ", " << sY[1] << ");\n";
406 } else if (sC[1].GetVal() == "1" && sC[0].GetVal() == sY[0].GetVal()) {
407 out << prefix << "Fill(" << target << " + y_index, tensor_" << fNC << "[j], " << sY[1] << ");\n";
408 } else if (sC[0].GetVal() == "1" && sC[1].GetVal() == "1") {
409 // scalar case
410 out << prefix << "Fill(" << target << " + y_index, tensor_" << fNC << "[0], " << sY[1] << ");\n";
411 } else {
412 throw std::runtime_error("TMVA SOFIE Gemm Op - invalid shape for bias tensor " + ConvertDimShapeToString(fDimShapeC));
413 }
414
415 out << SP2 << "}\n";
416 }
417
418 if (fType == "float"){
419
420 out << SP2 << "TMVA::Experimental::SOFIE::Gemm_Call(" << "tensor_" << fNY;
421 if (doStackMul) out << " + " << opName << "_y_offset";
422 out << ", "
423 << (fAttrTransB ? "true, " : "false, ")
424 << (fAttrTransA ? "true, " : "false, ")
425 << n << ", " << m << ", " << k << ", ";
426 out << std::setprecision(std::numeric_limits<float>::max_digits10) << fAttrAlpha << ", tensor_" << fNB;
427 if (extraB) out << " + " << opName << "_B_offset";
428 out << ", tensor_" << fNA;
429 if (extraA) out << " + " << opName << "_A_offset";
430 out << ", " << std::setprecision(std::numeric_limits<float>::max_digits10) << fAttrBeta << ",";
431 // in the case of bias and no broadcasting needed - I need to add bias as an extra tensor in Gemm call
432 if (!fNC.empty() && !fBroadcastBias) {
433 out << "tensor_" << fNC;
434 if (extraC) {
435 out << " + " << opName << "_C_offset";
436 }
437 } else {
438 out << "nullptr";
439 }
440 out << ");\n";
441
442 }
443
444 if (doStackMul) {
445 out << SP << SP << opName << "_y_offset += " << lengthGemm << ";\n";
446 if (lengthExtra_A != "1")
447 out << SP << SP << opName << "_A_offset += " << increment_A << ";\n";
448 if (lengthExtra_B != "1")
449 out << SP << SP << opName << "_B_offset += " << increment_B << ";\n";
450 if (extraC)
451 // increment_C is lengthGEmm
452 out << SP << SP << opName << "_C_offset += " << lengthGemm << ";\n";
453 out << SP << "}\n"; // end of loop on the stacked multiplication
454 }
455
456 // fuse with Relu
458 out << SP << "//--- applying RELU to output\n";
459 std::string tnsr = "tensor_" + fNY;
461 out << SP << "TMVA::Experimental::SOFIE::Relu(" << tnsr << ", " << tnsr << ", " << reluSize << ");\n";
462 }
463
464 return out.str();
465 }
466
467 std::vector<std::string> GetBlasRoutines() override { return {"Gemm", "Gemv"}; }
468
469 };
470
471
472}//SOFIE
473}//Experimental
474}//TMVA
475
476
477#endif //TMVA_SOFIE_ROPERATOR_GEMM
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t target
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t r
const_iterator begin() const
const_iterator end() const
void AddNeededStdLib(std::string libname)
std::vector< size_t > GetTensorShape(const std::string &name) const
Definition RModel.cxx:51
bool IsDynamicTensor(const std::string &name) const
Definition RModel.cxx:269
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
Definition RModel.cxx:284
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:144
void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:301
bool IsDimInputTensor(const std::string &name) const
Definition RModel.cxx:274
std::vector< Dim > GetDynamicTensorShape(const std::string &name) const
Definition RModel.cxx:98
ETensorType GetTensorType(std::string name) const
Definition RModel.cxx:112
const std::vector< std::string > & GetDimShapeNames() const
Definition RModel.hxx:205
ROperator_Gemm(float alpha, float beta, int_t transA, int_t transB, std::string nameA, std::string nameB, std::string nameC, std::string nameY, EActivationType activation=EActivationType::UNDEFINED)
std::vector< Dim > DynamicShapeInference(const std::vector< std::vector< Dim > > &input)
std::vector< ETensorType > TypeInference(std::vector< ETensorType > input) override
ROperator_Gemm(float alpha, float beta, int_t transA, int_t transB, std::string nameA, std::string nameB, std::string nameY, EActivationType activation=EActivationType::UNDEFINED)
std::vector< std::vector< size_t > > ShapeInference(std::vector< std::vector< size_t > > input) override
std::vector< U > DoShapeInference(const std::vector< std::vector< U > > &input)
std::string Generate(std::string opName) override
void Initialize(RModel &model) override
std::vector< std::string > GetBlasRoutines() override
std::vector< std::string_view > fInputTensorNames
Definition ROperator.hxx:50
const std::string SP
space used to correctly indent the generated C++ code
Definition ROperator.hxx:45
std::vector< std::string_view > fOutputTensorNames
Definition ROperator.hxx:51
const Int_t n
Definition legend1.C:16
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
std::string ConvertDimShapeToString(const std::vector< Dim > &shape)
std::vector< Dim > ConvertShapeToDim(const std::vector< size_t > &shape)
Convert shape from integer format to dynamic one (based on Dim)
std::vector< size_t > ConvertShapeToInt(const std::vector< Dim > &shape)
Convert shape based on Dim to integer format.
std::string ConvertDimShapeToLength(const std::vector< Dim > &shape)
std::string ConvertShapeToString(const std::vector< size_t > &shape)
create variable transformations
TMarker m
Definition textangle.C:8