Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
SOFIE_common.cxx
Go to the documentation of this file.
2
3#include <cctype>
4#include <sstream>
5#include <stdexcept>
6#include <charconv>
7#include <unordered_map>
8#include <set>
9
10namespace TMVA {
11namespace Experimental {
12namespace SOFIE {
13
14/// @brief Convert shape from integer format to dynamic one (based on Dim)
15/// @param shape
16/// @return shape based on Dim
17std::vector<Dim> ConvertShapeToDim(const std::vector<size_t> & shape){
18 std::vector<Dim> ret_shape(shape.size());
19 for (size_t i =0; i < shape.size(); i++){
20 ret_shape[i].dim = shape[i];
21 }
22 return ret_shape;
23}
24
25/// @brief Convert shape based on Dim to integer format
26/// @param shape
27/// @return shape based on integer. Return an empty shape in case shape is dynamic (has a parameter)
28std::vector<size_t> ConvertShapeToInt(const std::vector<Dim> & shape){
29 std::vector<size_t> ret_shape(shape.size());
30 for (size_t i =0; i < shape.size(); i++){
31 if (shape[i].isParam) {
32 // try converting to integer in case string is a number >=0
33 int val = -1;
34 try {
35 val = std::stoi(shape[i].param);
36 if (val >= 0) ret_shape[i] = static_cast<size_t>(val);
37 else {
38 ret_shape.clear();
39 break;
40 }
41 }
42 catch (const std::invalid_argument& ) {
43 ret_shape.clear();
44 break;
45 }
46 } else {
47 ret_shape[i] = shape[i].dim;
48 }
49 }
50 return ret_shape;
51}
52
53
54std::size_t ConvertShapeToLength(const std::vector<size_t> & shape){
55 // Empty shape represent scalar values, so we return a length=1
56 std::size_t fLength = 1;
57 for (auto& dim: shape) fLength *= dim;
58 return fLength;
59}
60
62 switch(type){
63 case ETensorType::FLOAT : {
64 return "float";
65 }
66 case ETensorType::INT8 : {
67 return "int8_t";
68 }
69 case ETensorType::INT16 : {
70 return "int16_t";
71 }
72 case ETensorType::INT32 : {
73 return "int32_t";
74 }
75 case ETensorType::INT64 : {
76 return "int64_t";
77 }
78 case ETensorType::UINT8 : {
79 return "uint8_t";
80 }
81 case ETensorType::UINT16 : {
82 return "uint16_t";
83 }
84 case ETensorType::UINT32 : {
85 return "uint32_t";
86 }
87 case ETensorType::UINT64 : {
88 return "uint64_t";
89 }
90 case ETensorType::DOUBLE : {
91 return "double";
92 }
93 case ETensorType::BOOL : {
94 return "uint8_t";
95 }
96 default:{
97 return "other_" + std::to_string( (int) type);
98 }
99 }
100}
101
102// invert function might now work correctly for booleans
103// prefer avoid using it if possible
105 if(type == "float32" || type == "float" || type == "Float"){
106 return ETensorType::FLOAT;
107 }
108 else if(type == "int64" || type == "int64_t"){
109 return ETensorType::INT64;
110 }
111 else if(type == "int32" || type == "int32_t"){
112 return ETensorType::INT32;
113 }
114 else if (type == "double" || type == "float64"){
115 return ETensorType::DOUBLE;
116 }
117 else if (type == "bool" || type == "uint8_t" ){
118 return ETensorType::BOOL;
119 }
120 else{
122 }
123}
124
125std::string ConvertShapeToString(const std::vector<size_t> & shape) {
126 std::stringstream out;
127 out << "{ ";
128 for (size_t i = 0; i < shape.size(); i++) {
129 out << shape[i];
130 if (i < shape.size()-1) out << " , ";
131 }
132 out << " }";
133 return out.str();
134}
135
136std::string ConvertDimShapeToString(const std::vector<Dim> & shape) {
137 std::stringstream out;
138 out << "{ ";
139 for (size_t i = 0; i < shape.size(); i++) {
140 out << shape[i];
141 if (i < shape.size()-1) out << " , ";
142 }
143 out << " }";
144 return out.str();
145}
146
147std::string ConvertDimShapeToLength(const std::vector<Dim> & shape) {
148 // convert generic shape to a string
149 // multiply all the integer specified dimensions of the shape
150 std::string length;
151 // case of empty vectors return 1
152 if (shape.empty()) return "1";
153 int64_t int_length = -1;
154 for (size_t i = 0; i < shape.size(); i++) {
155 if (shape[i].isParam) {
156 if (!length.empty()) length += " * ";
157 length += shape[i].param;
158 } else {
159 if (int_length == -1)
160 int_length = shape[i].dim;
161 else
162 int_length *= shape[i].dim;
163 }
164 }
165 // multiply the integer components to the parametric one
166 // if larger than 1 - otherwise returns -1
167 if (int_length >= 0) {
168 if (!length.empty() && int_length > 1) {
169 length += " * ";
170 length += std::to_string(int_length);
171 } else if (length.empty()) { // case is full known shape
172 length = std::to_string(int_length);
173 }
174 }
175 return length;
176}
177
178
179namespace{
180template<typename T>
181static inline void copy_vector_data(int_t no_of_copies, int_t input_size, T* input, T* target){ //only visible within this translation unit
182 std::memcpy(target, input, input_size * sizeof(T));
184
185 while (already_copied * 2 <= no_of_copies){
186 std::memcpy(target + already_copied * input_size, target, already_copied * input_size * sizeof(T));
187 already_copied *= 2;
188 }
189
191 std::memcpy(target + already_copied * input_size, target, (no_of_copies - already_copied) * input_size * sizeof(T));
192 }
193}
194}
195
196bool IsInteger(const std::string & s) {
197 int value;
198 auto [ptr, ec] = std::from_chars(s.data(), s.data() + s.size(), value);
199 return ec == std::errc() && ptr == s.data() + s.size();
200}
201
202bool UTILITY::AreSameShape(const std::vector<size_t>& shapeA, const std::vector<size_t>& shapeB) {
203 if (shapeA.size() != shapeB.size()) {
204 return false;
205 }
206 for (size_t dim = 0; dim < shapeA.size(); dim++) {
207 if (shapeA[dim] != shapeB[dim]) {
208 return false;
209 }
210 }
211 return true;
212}
213bool UTILITY::AreSameShape(const std::vector<size_t>& shapeA, const std::vector<Dim>& shapeB) {
214 if (shapeA.size() != shapeB.size()) {
215 return false;
216 }
217 for (size_t dim = 0; dim < shapeA.size(); dim++) {
218 if (shapeB[dim].isParam) return false;
219 if (shapeA[dim] != shapeB[dim].dim) {
220 return false;
221 }
222 }
223 return true;
224}
225bool UTILITY::AreSameShape(const std::vector<Dim>& shapeA, const std::vector<Dim>& shapeB) {
226 if (shapeA.size() != shapeB.size()) {
227 return false;
228 }
229 for (size_t dim = 0; dim < shapeA.size(); dim++) {
230 if (shapeA[dim].GetVal() != shapeB[dim].GetVal()) {
231 return false;
232 }
233 }
234 return true;
235}
236
237std::vector<size_t> UTILITY::MultidirectionalBroadcastShape(std::vector<std::vector<size_t>> shape)
238{
239 if (shape.size() < 2) {
240 throw
241 std::runtime_error("TMVA::SOFIE - MultidirectionalBroadcastShape requires at least 2 input shapes.");
242 }
243 // Number of input shapes to broadcast
244 size_t n = shape.size();
245 // Size of the output shape
246 size_t targetSize = shape[0].size();
247 for (size_t i = 1; i < n; i++) {
248 targetSize = std::max(targetSize, shape[i].size());
249 }
250 // Check if they have the same size
251 bool sameSize = true;
252 for (size_t i = 0; i < n; i++) {
253 if (shape[i].size() != targetSize) {
254 sameSize = false;
255 break;
256 }
257 }
258 if (sameSize) {
259 // Check if they have the same shape
260 bool sameShape = true;
261 for (size_t i = 1; i < n; i++) {
262 for (size_t dim = 0; dim < shape[0].size(); dim++) {
263 if (shape[i][dim] != shape[0][dim]) {
264 sameShape = false;
265 break;
266 }
267 }
268 if (!sameShape) {
269 break;
270 }
271 }
272 if (sameShape) {
273 return shape[0];
274 } else {
275 // Set the target shape
276 std::vector<size_t> targetShape(targetSize, 1);
277 for (size_t i = 0; i < n; i++) {
278 for (size_t dim = 0; dim < targetSize; dim++) {
279 targetShape[dim] = std::max(targetShape[dim], shape[i][dim]);
280 }
281 }
282 // Check if the input shapes are broadcastable to targetShape
283 bool broadcastable = true;
284 for (size_t i = 0; i < n; i++) {
285 for (size_t dim = 0; dim < targetSize; dim++) {
286 if (shape[i][dim] != 1 && targetShape[dim] != 1 && shape[i][dim] != targetShape[dim]) {
287 broadcastable = false;
288 break;
289 }
290 if (!broadcastable) {
291 break;
292 }
293 }
294 }
295 // They have the same shape and they are broadcastable to targetShape
296 if (broadcastable) {
297 return targetShape;
298 } else {
299 std::stringstream ss;
300 ss << "TMVA::SOFIE - Error multidirectional broadcasting shapes ";
301 for (size_t i = 0; i < n; i++) {
302 ss << ConvertShapeToString(shape[i]);
303 if (n > 2 && i < n - 2) {
304 ss << ", ";
305 } else if ( n >=2 && i == n - 2) {
306 ss << " and ";
307 }
308 }
309 ss << " to the same shape.";
310 throw
311 std::runtime_error(ss.str());
312 }
313 } // end sameShape
314 } // end sameSize
315 // Prepend the ith shape with ones
316 for (size_t i = 0; i < n; i++) {
317 if (shape[i].size() < targetSize) {
318 std::vector<size_t> newShape(targetSize, 1);
319 size_t offset = targetSize - shape[i].size();
320 std::copy(shape[i].begin(), shape[i].end(), newShape.begin() + offset);
321 shape[i] = newShape;
322 }
323 }
324 // Set the target shape
325 std::vector<size_t> targetShape(targetSize, 1);
326 for (size_t i = 0; i < n; i++) {
327 for (size_t dim = 0; dim < targetSize; dim++) {
328 targetShape[dim] = std::max(targetShape[dim], shape[i][dim]);
329 }
330 }
331 // Check if the shapes are broadcastable to targetShape
332 bool broadcastable = true;
333 for (size_t i = 0; i < n; i++) {
334 for (size_t dim = 0; dim < targetSize; dim++) {
335 if (shape[i][dim] != targetShape[dim] && shape[i][dim] != 1 && targetShape[dim] != 1) {
336 broadcastable = false;
337 break;
338 }
339 }
340 if (!broadcastable) {
341 break;
342 }
343 }
344 if (broadcastable) {
345 return targetShape;
346 } else {
347 std::stringstream ss;
348 ss << "TMVA::SOFIE - Error multidirectional broadcasting shapes ";
349 for (size_t i = 0; i < n; i++) {
350 ss << ConvertShapeToString(shape[i]);
351 if (n > 2 && i < n - 2) {
352 ss << ", ";
353 } else if ( n >=2 && i == n - 2) {
354 ss << " and ";
355 }
356 }
357 ss << " to the same shape.";
358 throw
359 std::runtime_error(ss.str());
360 }
361}
362
363// check multi-directional broadcasting of two shapes (need to pass inputs by non const ref. since we might prepends with one's
364// return a pair of integer flag and new broadcasted shape
365// if flag = 0: shape are identical
366// flag = 1: return shape is equal to A, we broadcast B
367// flag = 2: return shape is equal to B we broadcast A
368// flag = 3: return shape is common of two we broadcast A and B to output
369std::pair<int, std::vector<size_t>> UTILITY::MultidirectionalBroadcastShape(std::vector<size_t> & shapeA, std::vector<size_t> & shapeB)
370{
371 size_t sizeA = shapeA.size();
372 size_t sizeB = shapeB.size();
373 // Check if A and B have the same shape
375 return std::make_pair(0, shapeA);
376 }
377 // Find the common shape of A and B
378 size_t size = std::max(sizeA, sizeB);
379 if (sizeA < size) {
380 // prepend 1's in A to make of same shape as B
381 std::vector<size_t> newShapeA(size, 1);
382 size_t offset = size - sizeA;
383 std::copy(shapeA.begin(), shapeA.end(), newShapeA.begin() + offset);
384 shapeA = std::move(newShapeA);
385 }
386 if (sizeB < size) {
387 std::vector<size_t> newShapeB(size, 1);
388 size_t offset = size - sizeB;
389 std::copy(shapeB.begin(), shapeB.end(), newShapeB.begin() + offset);
390 shapeB = std::move(newShapeB);
391 }
392 bool broadcastable = true;
393 for (size_t i = 0; i < size; i++) {
394 if (shapeA[i] != shapeB[i] && shapeA[i] != 1 && shapeB[i] != 1) {
395 broadcastable = false;
396 break;
397 }
398 }
399 int broadcastFlag = 0;
400 if (broadcastable) {
401 // The output shape is max(outShape, targetShape)
402 std::vector<size_t> targetShape(size, 1);
403 for (size_t i = 0; i < size; i++) {
404 targetShape[i] = std::max(shapeA[i], shapeB[i]);
405 if (shapeB[i] < targetShape[i]) broadcastFlag |= 1;
406 if (shapeA[i] < targetShape[i]) broadcastFlag |= 2;
407 }
408 return std::make_pair(broadcastFlag, targetShape);
409 } else {
410 throw
411 std::runtime_error("TMVA::SOFIE - Error multidirectional broadcasting tensors of shape "
413 + " to a common shape.");
414 }
415}
416// unidirectional broadcast- of shape A to target B
417std::vector<size_t> UTILITY::UnidirectionalBroadcastShape(std::vector<size_t> & shapeA, std::vector<size_t> & shapeB)
418{
420 if (ret.first > 1) {
421 throw
422 std::runtime_error("TMVA::SOFIE - Error unidirectional broadcasting tensors of shape "
424 + " in a common shape.");
425 }
426 return ret.second;
427}
428
429// for broadcasting Dim shapes
430// flag indicates also which vector needs to be broadcasted
431// flag & 1 == 1 : broadcast B -> A
432// flag & 2 == 2 : broadcast A -> B
433// flag & 4 == 4 a run time check is needed on shapes with values
434std::pair<int, std::vector<Dim>> UTILITY::MultidirectionalBroadcastShape(std::vector<Dim> & shapeA, std::vector<Dim> & shapeB) {
435 size_t sizeA = shapeA.size();
436 size_t sizeB = shapeB.size();
437 // Check if A and B have the same shape
439 return std::make_pair(0, shapeA);
440 }
441 // Find the common shape of A and B
442 size_t size = std::max(sizeA, sizeB);
443 if (sizeA < size) {
444 // prepend 1's in A to make of same shape as B
445 std::vector<Dim> newShapeA(size, Dim{1});
446 size_t offset = size - sizeA;
447 std::copy(shapeA.begin(), shapeA.end(), newShapeA.begin() + offset);
448 shapeA = std::move(newShapeA);
449 }
450 if (sizeB < size) {
451 std::vector<Dim> newShapeB(size, Dim{1});
452 size_t offset = size - sizeB;
453 std::copy(shapeB.begin(), shapeB.end(), newShapeB.begin() + offset);
454 shapeB = std::move(newShapeB);
455 }
456
457 int broadcastFlag = 0;
458 // The output shape is targetShape
459 std::vector<Dim> targetShape(size);
460 for (size_t i = 0; i < size; i++) {
461 // assume we broadcast to the parametric value
462 if (shapeA[i] == shapeB[i]) {
463 targetShape[i] = shapeA[i];
464 } else if (shapeA[i].isParam && shapeB[i].GetVal() == "1" ) {
465 // broadcast B to A (case A is parametric with )
466 targetShape[i] = shapeA[i];
467 broadcastFlag |= 1;
468 } else if (shapeA[i].GetVal() == "1" && shapeB[i].isParam) {
469 // broadcast A to B
470 targetShape[i] = shapeB[i];
471 broadcastFlag |= 2;
472 } else if (!shapeA[i].isParam && !shapeB[i].isParam) {
473 if (shapeB[i].dim == 1) {
474 targetShape[i] = shapeA[i];
475 broadcastFlag |= 1;
476 } else if (shapeA[i].dim == 1) {
477 targetShape[i] = shapeB[i];
478 broadcastFlag |= 2;
479 } else {
480 // non broadcastable case cannot have A and B two different defined shapes different than one
481 broadcastFlag = -1;
482 }
483 } else if (shapeA[i].isParam && shapeB[i].isParam) {
484 // full dynamic case - we will decided at run time
485 std::stringstream s;
486 s << "std::max(" << shapeA[i] << "," << shapeB[i] << ")";
487 // use -1 for dim to indicate is an expression
488 targetShape[i] = Dim { s.str() , static_cast<size_t>(-1)};
489 broadcastFlag |= 4;
490 } else if (shapeA[i].isParam && !shapeB[i].isParam) {
491 // A -> B need to check at run time if consistent
492 targetShape[i] = shapeB[i];
493 broadcastFlag |= 6;
494 } else if (!shapeA[i].isParam && shapeB[i].isParam) {
495 // B -> A need to check at run time if consistent
496 targetShape[i] = shapeA[i];
497 broadcastFlag |= 5;
498 } else {
499 // all cases should be covered
500 throw std::runtime_error("TMVA::SOFIE - Fatal error in MultiDirectionalBroadCastDimShape");
501 }
502 }
503 if (broadcastFlag == -1) {
504 throw std::runtime_error("TMVA::SOFIE - Error multidirectional broadcasting tensors of shape " +
506 " to a common shape.");
507 }
508
509 return std::make_pair(broadcastFlag, targetShape);
510}
511
512std::string UTILITY::Clean_name(std::string input_tensor_name){
513 std::string s (input_tensor_name);
514 std::replace( s.begin(), s.end(), '-', '_');
515 // replace all non-alpohanumeric character except for "_"
516 s.erase(std::remove_if(s.begin(), s.end(), []( char const& c ) -> bool { return !std::isalnum(c) && c != '_'; } ), s.end());
517 return s;
518}
519
520std::vector<size_t> UTILITY::ComputeStrideFromShape(const std::vector<size_t> & shape) {
521 // assume row major layout
522 const auto size = shape.size();
523 std::vector<size_t> strides(size,1);
524 for (std::size_t i = 1; i < size; i++) {
525 strides[size - 1 - i] = strides[size - i ] * shape[size - i];
526 }
527 return strides;
528}
529
530std::vector<Dim> UTILITY::ComputeStrideFromShape(const std::vector<Dim> & shape) {
531 // assume row major layout
532 const auto size = shape.size();
533 std::vector<Dim> strides(size);
534 if (size > 0) {
535 strides[size-1] = Dim{1};
536 for (std::size_t i = 1; i < size; i++) {
537 if (!shape[size-i].isParam && !strides[size-i].isParam)
538 strides[size - 1 - i] = Dim{strides[size-i].dim * shape[size-i].dim};
539 else {
540 if (strides[size-i].GetVal() == "1")
541 strides[size - 1 - i] = shape[size-i];
542 else if (shape[size-i].GetVal() == "1")
543 strides[size - 1 - i] = strides[size-i];
544 else
545 strides[size - 1 - i] = Dim{std::string(strides[size-i].GetVal() + "*" + shape[size-i].GetVal())};
546 }
547 }
548 }
549 return strides;
550}
551
552// utilities functions for generating code
553
554// ------------------------------------------------------------------ //
555// Emit 'rank' nested for-loops: for(size_t idx_i=0; idx_i<dim_i; ) //
556// ------------------------------------------------------------------ //
557const std::string SP = " ";
558void EmitNestedLoops(std::stringstream &out, size_t loopRank, const std::vector<Dim> shape) {
559 for (size_t i = 0; i < loopRank; ++i) {
560 for (size_t s = 0; s < i + 2; ++s) out << SP;
561
562 out << "for (size_t idx_" << i << " = 0; idx_" << i
563 << " < " << shape[i] << "; ++idx_" << i << ") {\n";
564 }
565}
566void CloseNestedLoops(std::stringstream &out, size_t loopRank) {
567 for (int64_t i = loopRank - 1; i >= 0; --i) {
568 for (int64_t s = 0; s < i + 2; ++s) out << SP;
569 out << "}\n";
570 }
571}
572
573
574
575struct FreeBlock {
576 std::size_t offset;
577 std::size_t size;
578 bool operator<(const FreeBlock& other) const {
579 // order by offset for deterministic coalescing
580 return offset < other.offset;
581 }
582};
583
585 int t; // time (i.e. operator index)
586 int type; // 0 = END first, 1 = START
587 int idx; // tensor index
588 bool operator<(const MemoryEvent& o) const {
589 if (t != o.t) return t < o.t;
590 return type < o.type; // END before START at the same time
591 }
592};
593
594/// Greedy best-fit planner with coalescing free list.
595MemoryResult OrganizeMemory(const std::vector<TensorLifeInfo> & tensorsInfo )
596{
597 // Basic validation
598 for (const auto &t : tensorsInfo) {
599 if (!(t.end > t.begin)) {
600 throw std::runtime_error("Each tensor must have end > begin.");
601 }
602 }
603
604 // Build events: free before allocate at equal times.
605 std::vector<MemoryEvent> events;
606 events.reserve(tensorsInfo.size() * 2);
607 for (int i = 0; i < (int)tensorsInfo.size(); ++i) {
608 events.push_back({tensorsInfo[i].end, 0, i}); // END
609 events.push_back({tensorsInfo[i].begin, 1, i}); // START
610 }
611 std::sort(events.begin(), events.end());
612
613 std::vector<size_t> tensorsOffset(tensorsInfo.size());
614
615 // Free list ordered by offset (for O(log n) coalescing)
616 // and faster insert/erase with respect to a vector
617 std::set<FreeBlock> free_list;
618
619 // Bookkeeping: size/offset map for frees.
620 std::unordered_map<int, std::size_t> live_size;
621 std::unordered_map<int, std::size_t> live_offset;
622
623 std::size_t total_bytes = 0;
624
625 auto allocate_best_fit = [&](std::size_t need) -> std::size_t {
626 // Find the *smallest* block whose size >= need (best-fit).
627 // Since free_list is ordered by offset, we scan to find best by size.
628 // (For very large sets you could maintain a multimap by size as well.)
629 auto best = free_list.end();
630 for (auto it = free_list.begin(); it != free_list.end(); ++it) {
631 if (it->size >= need) {
632 if (best == free_list.end() || it->size < best->size)
633 best = it;
634 }
635 }
636 if (best != free_list.end()) {
637 std::size_t off = best->offset;
638 if (best->size == need) {
639 free_list.erase(best);
640 } else {
641 FreeBlock updated{best->offset + need, best->size - need};
642 free_list.erase(best);
643 free_list.insert(updated);
644 }
645 return off;
646 }
647 // No free block large enough; grow the heap.
648 std::size_t off = total_bytes;
649 total_bytes += need;
650 return off;
651 };
652
653 auto try_coalesce = [&](std::set<FreeBlock>::iterator it) {
654 // Coalesce with previous
655 if (it != free_list.begin()) {
656 auto prev = std::prev(it);
657 if (prev->offset + prev->size == it->offset) {
658 FreeBlock merged{prev->offset, prev->size + it->size};
659 free_list.erase(prev);
660 it = free_list.erase(it);
661 it = free_list.insert(merged).first;
662 }
663 }
664 // Coalesce with next
665 auto next = std::next(it);
666 if (next != free_list.end() && it->offset + it->size == next->offset) {
667 FreeBlock merged{it->offset, it->size + next->size};
668 free_list.erase(next);
669 it = free_list.erase(it);
670 free_list.insert(merged);
671 }
672 };
673
674 // Sweep through time.
675 for (const auto &e : events) {
676 if (e.type == 0) { // END: free
677 auto it_sz = live_size.find(e.idx);
678 auto it_off = live_offset.find(e.idx);
679 if (it_sz != live_size.end() && it_off != live_offset.end()) {
680 FreeBlock fb{it_off->second, it_sz->second};
681 // Insert and coalesce with neighbors
682 auto it = free_list.insert(fb).first;
683 try_coalesce(it);
684 live_size.erase(it_sz);
685 live_offset.erase(it_off);
686 }
687 } else { // START: allocate
688 auto &t = tensorsInfo[e.idx];
689 std::size_t off = allocate_best_fit(t.size);
690 tensorsOffset[e.idx] = off;
691 live_size[e.idx] = t.size;
692 live_offset[e.idx] = off;
693 }
694 }
695
696 return MemoryResult{total_bytes, std::move(tensorsOffset)};
697}
698
699} // namespace SOFIE
700} // namespace Experimental
701} // namespace TMVA
#define c(i)
Definition RSha256.hxx:101
#define e(i)
Definition RSha256.hxx:103
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t target
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
const_iterator begin() const
const_iterator end() const
const Int_t n
Definition legend1.C:16
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t > &, std::vector< size_t > &)
std::string Clean_name(std::string input_tensor_name)
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
MemoryResult OrganizeMemory(const std::vector< TensorLifeInfo > &tensorsInfo)
Greedy best-fit planner with coalescing free list.
std::string ConvertDimShapeToString(const std::vector< Dim > &shape)
std::size_t ConvertShapeToLength(const std::vector< size_t > &shape)
std::vector< Dim > ConvertShapeToDim(const std::vector< size_t > &shape)
Convert shape from integer format to dynamic one (based on Dim)
std::vector< size_t > ConvertShapeToInt(const std::vector< Dim > &shape)
Convert shape based on Dim to integer format.
std::string ConvertTypeToString(ETensorType type)
ETensorType ConvertStringToType(std::string type)
std::string ConvertDimShapeToLength(const std::vector< Dim > &shape)
void EmitNestedLoops(std::stringstream &out, size_t loopRank, const std::vector< Dim > shape)
std::string ConvertShapeToString(const std::vector< size_t > &shape)
void CloseNestedLoops(std::stringstream &out, size_t loopRank)
bool IsInteger(const std::string &s)
create variable transformations
bool operator<(const FreeBlock &other) const
bool operator<(const MemoryEvent &o) const