Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
SOFIE_common.cxx
Go to the documentation of this file.
2
3#include <cctype>
4#include <sstream>
5#include <stdexcept>
6#include <charconv>
7#include <unordered_map>
8#include <set>
9
10namespace TMVA {
11namespace Experimental {
12namespace SOFIE {
13
14/// @brief Convert shape from integer format to dynamic one (based on Dim)
15/// @param shape
16/// @return shape based on Dim
17std::vector<Dim> ConvertShapeToDim(const std::vector<size_t> & shape){
18 std::vector<Dim> ret_shape(shape.size());
19 for (size_t i =0; i < shape.size(); i++){
20 ret_shape[i].dim = shape[i];
21 }
22 return ret_shape;
23}
24
25/// @brief Convert shape based on Dim to integer format
26/// @param shape
27/// @return shape based on integer. Return an empty shape in case shape is dynamic (has a parameter)
28std::vector<size_t> ConvertShapeToInt(const std::vector<Dim> & shape){
29 std::vector<size_t> ret_shape(shape.size());
30 for (size_t i =0; i < shape.size(); i++){
31 if (shape[i].isParam) {
32 // try converting to integer in case string is a number >=0
33 int val = -1;
34 try {
35 val = std::stoi(shape[i].param);
36 if (val >= 0) ret_shape[i] = static_cast<size_t>(val);
37 else {
38 ret_shape.clear();
39 break;
40 }
41 }
42 catch (const std::invalid_argument& ) {
43 ret_shape.clear();
44 break;
45 }
46 } else {
47 ret_shape[i] = shape[i].dim;
48 }
49 }
50 return ret_shape;
51}
52
53
54std::size_t ConvertShapeToLength(const std::vector<size_t> & shape){
55 // Empty shape represent scalar values, so we return a length=1
56 std::size_t fLength = 1;
57 for (auto& dim: shape) fLength *= dim;
58 return fLength;
59}
60
62 switch(type){
63 case ETensorType::FLOAT : {
64 return "float";
65 }
66 case ETensorType::INT8 : {
67 return "int8_t";
68 }
69 case ETensorType::INT16 : {
70 return "int16_t";
71 }
72 case ETensorType::INT32 : {
73 return "int32_t";
74 }
75 case ETensorType::INT64 : {
76 return "int64_t";
77 }
78 case ETensorType::UINT8 : {
79 return "uint8_t";
80 }
81 case ETensorType::UINT16 : {
82 return "uint16_t";
83 }
84 case ETensorType::UINT32 : {
85 return "uint32_t";
86 }
87 case ETensorType::UINT64 : {
88 return "uint64_t";
89 }
90 case ETensorType::DOUBLE : {
91 return "double";
92 }
93 case ETensorType::BOOL : {
94 return "uint8_t";
95 }
96 default:{
97 return "other_" + std::to_string( (int) type);
98 }
99 }
100}
101
103 if(type == "float32" || type == "float" || type == "Float"){
104 return ETensorType::FLOAT;
105 }
106 else if(type == "int64" || type == "int64_t"){
107 return ETensorType::INT64;
108 }
109 else if (type == "double" || type == "float64"){
110 return ETensorType::DOUBLE;
111 }
112 else if (type == "bool" ){
113 return ETensorType::BOOL;
114 }
115 else{
117 }
118}
119
120std::string ConvertShapeToString(const std::vector<size_t> & shape) {
121 std::stringstream out;
122 out << "{ ";
123 for (size_t i = 0; i < shape.size(); i++) {
124 out << shape[i];
125 if (i < shape.size()-1) out << " , ";
126 }
127 out << " }";
128 return out.str();
129}
130
131std::string ConvertDimShapeToString(const std::vector<Dim> & shape) {
132 std::stringstream out;
133 out << "{ ";
134 for (size_t i = 0; i < shape.size(); i++) {
135 out << shape[i];
136 if (i < shape.size()-1) out << " , ";
137 }
138 out << " }";
139 return out.str();
140}
141
142std::string ConvertDimShapeToLength(const std::vector<Dim> & shape) {
143 // convert generic shape to a string
144 // multiply all the integer specified dimensions of the shape
145 std::string length;
146 // case of empty vectors return 1
147 if (shape.empty()) return "1";
148 size_t int_length = 0;
149 for (size_t i = 0; i < shape.size(); i++) {
150 if (shape[i].isParam) {
151 if (!length.empty()) length += " * ";
152 length += shape[i].param;
153 } else {
154 if (int_length == 0)
155 int_length = shape[i].dim;
156 else
157 int_length *= shape[i].dim;
158 }
159 }
160 // multiply the integer components to the parametric one
161 // if larger than 1
162 if (int_length > 0) {
163 if (!length.empty() && int_length > 1) {
164 length += " * ";
165 length += std::to_string(int_length);
166 } else if (length.empty()) { // case is full known shape
167 length = std::to_string(int_length);
168 }
169 }
170 return length;
171}
172std::string ConvertShapeToString(const std::vector<Dim> & shape) {
173 return ConvertDimShapeToString(shape);
174}
175std::string ConvertDynamicShapeToLength(const std::vector<Dim> & shape) {
176 return ConvertDimShapeToLength(shape);
177}
178
179
180namespace{
181template<typename T>
182static inline void copy_vector_data(int_t no_of_copies, int_t input_size, T* input, T* target){ //only visible within this translation unit
183 std::memcpy(target, input, input_size * sizeof(T));
185
186 while (already_copied * 2 <= no_of_copies){
187 std::memcpy(target + already_copied * input_size, target, already_copied * input_size * sizeof(T));
188 already_copied *= 2;
189 }
190
192 std::memcpy(target + already_copied * input_size, target, (no_of_copies - already_copied) * input_size * sizeof(T));
193 }
194}
195}
196
197bool IsInteger(const std::string & s) {
198 int value;
199 auto [ptr, ec] = std::from_chars(s.data(), s.data() + s.size(), value);
200 return ec == std::errc() && ptr == s.data() + s.size();
201}
202
203bool UTILITY::AreSameShape(const std::vector<size_t>& shapeA, const std::vector<size_t>& shapeB) {
204 if (shapeA.size() != shapeB.size()) {
205 return false;
206 }
207 for (size_t dim = 0; dim < shapeA.size(); dim++) {
208 if (shapeA[dim] != shapeB[dim]) {
209 return false;
210 }
211 }
212 return true;
213}
214bool UTILITY::AreSameShape(const std::vector<size_t>& shapeA, const std::vector<Dim>& shapeB) {
215 if (shapeA.size() != shapeB.size()) {
216 return false;
217 }
218 for (size_t dim = 0; dim < shapeA.size(); dim++) {
219 if (shapeB[dim].isParam) return false;
220 if (shapeA[dim] != shapeB[dim].dim) {
221 return false;
222 }
223 }
224 return true;
225}
226bool UTILITY::AreSameShape(const std::vector<Dim>& shapeA, const std::vector<Dim>& shapeB) {
227 if (shapeA.size() != shapeB.size()) {
228 return false;
229 }
230 for (size_t dim = 0; dim < shapeA.size(); dim++) {
231 if (shapeA[dim].GetVal() != shapeB[dim].GetVal()) {
232 return false;
233 }
234 }
235 return true;
236}
237
238std::vector<size_t> UTILITY::MultidirectionalBroadcastShape(std::vector<std::vector<size_t>> shape)
239{
240 if (shape.size() < 2) {
241 throw
242 std::runtime_error("TMVA::SOFIE - MultidirectionalBroadcastShape requires at least 2 input shapes.");
243 }
244 // Number of input shapes to broadcast
245 size_t n = shape.size();
246 // Size of the output shape
247 size_t targetSize = shape[0].size();
248 for (size_t i = 1; i < n; i++) {
249 targetSize = std::max(targetSize, shape[i].size());
250 }
251 // Check if they have the same size
252 bool sameSize = true;
253 for (size_t i = 0; i < n; i++) {
254 if (shape[i].size() != targetSize) {
255 sameSize = false;
256 break;
257 }
258 }
259 if (sameSize) {
260 // Check if they have the same shape
261 bool sameShape = true;
262 for (size_t i = 1; i < n; i++) {
263 for (size_t dim = 0; dim < shape[0].size(); dim++) {
264 if (shape[i][dim] != shape[0][dim]) {
265 sameShape = false;
266 break;
267 }
268 }
269 if (!sameShape) {
270 break;
271 }
272 }
273 if (sameShape) {
274 return shape[0];
275 } else {
276 // Set the target shape
277 std::vector<size_t> targetShape(targetSize, 1);
278 for (size_t i = 0; i < n; i++) {
279 for (size_t dim = 0; dim < targetSize; dim++) {
280 targetShape[dim] = std::max(targetShape[dim], shape[i][dim]);
281 }
282 }
283 // Check if the input shapes are broadcastable to targetShape
284 bool broadcastable = true;
285 for (size_t i = 0; i < n; i++) {
286 for (size_t dim = 0; dim < targetSize; dim++) {
287 if (shape[i][dim] != 1 && targetShape[dim] != 1 && shape[i][dim] != targetShape[dim]) {
288 broadcastable = false;
289 break;
290 }
291 if (!broadcastable) {
292 break;
293 }
294 }
295 }
296 // They have the same shape and they are broadcastable to targetShape
297 if (broadcastable) {
298 return targetShape;
299 } else {
300 std::stringstream ss;
301 ss << "TMVA::SOFIE - Error multidirectional broadcasting shapes ";
302 for (size_t i = 0; i < n; i++) {
303 ss << ConvertShapeToString(shape[i]);
304 if (n > 2 && i < n - 2) {
305 ss << ", ";
306 } else if ( n >=2 && i == n - 2) {
307 ss << " and ";
308 }
309 }
310 ss << " to the same shape.";
311 throw
312 std::runtime_error(ss.str());
313 }
314 } // end sameShape
315 } // end sameSize
316 // Prepend the ith shape with ones
317 for (size_t i = 0; i < n; i++) {
318 if (shape[i].size() < targetSize) {
319 std::vector<size_t> newShape(targetSize, 1);
320 size_t offset = targetSize - shape[i].size();
321 std::copy(shape[i].begin(), shape[i].end(), newShape.begin() + offset);
322 shape[i] = newShape;
323 }
324 }
325 // Set the target shape
326 std::vector<size_t> targetShape(targetSize, 1);
327 for (size_t i = 0; i < n; i++) {
328 for (size_t dim = 0; dim < targetSize; dim++) {
329 targetShape[dim] = std::max(targetShape[dim], shape[i][dim]);
330 }
331 }
332 // Check if the shapes are broadcastable to targetShape
333 bool broadcastable = true;
334 for (size_t i = 0; i < n; i++) {
335 for (size_t dim = 0; dim < targetSize; dim++) {
336 if (shape[i][dim] != targetShape[dim] && shape[i][dim] != 1 && targetShape[dim] != 1) {
337 broadcastable = false;
338 break;
339 }
340 }
341 if (!broadcastable) {
342 break;
343 }
344 }
345 if (broadcastable) {
346 return targetShape;
347 } else {
348 std::stringstream ss;
349 ss << "TMVA::SOFIE - Error multidirectional broadcasting shapes ";
350 for (size_t i = 0; i < n; i++) {
351 ss << ConvertShapeToString(shape[i]);
352 if (n > 2 && i < n - 2) {
353 ss << ", ";
354 } else if ( n >=2 && i == n - 2) {
355 ss << " and ";
356 }
357 }
358 ss << " to the same shape.";
359 throw
360 std::runtime_error(ss.str());
361 }
362}
363
364// check multi-directional broadcasting of two shapes (need to pass inputs by non const ref. since we might prepends with one's
365// return a pair of integer flag and new broadcasted shape
366// if flag = 0: shape are identical
367// flag = 1: return shape is equal to A, we broadcast B
368// flag = 2: return shape is equal to B we broadcast A
369// flag = 3: return shape is common of two we broadcast A and B to output
370std::pair<int, std::vector<size_t>> UTILITY::MultidirectionalBroadcastShape(std::vector<size_t> & shapeA, std::vector<size_t> & shapeB)
371{
372 size_t sizeA = shapeA.size();
373 size_t sizeB = shapeB.size();
374 // Check if A and B have the same shape
376 return std::make_pair(0, shapeA);
377 }
378 // Find the common shape of A and B
379 size_t size = std::max(sizeA, sizeB);
380 if (sizeA < size) {
381 // prepend 1's in A to make of same shape as B
382 std::vector<size_t> newShapeA(size, 1);
383 size_t offset = size - sizeA;
384 std::copy(shapeA.begin(), shapeA.end(), newShapeA.begin() + offset);
385 shapeA = std::move(newShapeA);
386 }
387 if (sizeB < size) {
388 std::vector<size_t> newShapeB(size, 1);
389 size_t offset = size - sizeB;
390 std::copy(shapeB.begin(), shapeB.end(), newShapeB.begin() + offset);
391 shapeB = std::move(newShapeB);
392 }
393 bool broadcastable = true;
394 for (size_t i = 0; i < size; i++) {
395 if (shapeA[i] != shapeB[i] && shapeA[i] != 1 && shapeB[i] != 1) {
396 broadcastable = false;
397 break;
398 }
399 }
400 int broadcastFlag = 0;
401 if (broadcastable) {
402 // The output shape is max(outShape, targetShape)
403 std::vector<size_t> targetShape(size, 1);
404 for (size_t i = 0; i < size; i++) {
405 targetShape[i] = std::max(shapeA[i], shapeB[i]);
406 if (shapeB[i] < targetShape[i]) broadcastFlag |= 1;
407 if (shapeA[i] < targetShape[i]) broadcastFlag |= 2;
408 }
409 return std::make_pair(broadcastFlag, targetShape);
410 } else {
411 throw
412 std::runtime_error("TMVA::SOFIE - Error multidirectional broadcasting tensors of shape "
414 + " to a common shape.");
415 }
416}
417// unidirectional broadcast- of shape A to target B
418std::vector<size_t> UTILITY::UnidirectionalBroadcastShape(std::vector<size_t> & shapeA, std::vector<size_t> & shapeB)
419{
421 if (ret.first > 1) {
422 throw
423 std::runtime_error("TMVA::SOFIE - Error unidirectional broadcasting tensors of shape "
425 + " in a common shape.");
426 }
427 return ret.second;
428}
429
430// for broadcasting Dim shapes
431// flag indicates also which vector needs to be broadcasted
432// flag & 1 == 1 : broadcast B -> A
433// flag & 2 == 2 : broadcast A -> B
434// flag & 4 == 4 a run time check is needed on shapes with values
435std::pair<int, std::vector<Dim>> UTILITY::MultidirectionalBroadcastShape(std::vector<Dim> & shapeA, std::vector<Dim> & shapeB) {
436 size_t sizeA = shapeA.size();
437 size_t sizeB = shapeB.size();
438 // Check if A and B have the same shape
440 return std::make_pair(0, shapeA);
441 }
442 // Find the common shape of A and B
443 size_t size = std::max(sizeA, sizeB);
444 if (sizeA < size) {
445 // prepend 1's in A to make of same shape as B
446 std::vector<Dim> newShapeA(size, Dim{1});
447 size_t offset = size - sizeA;
448 std::copy(shapeA.begin(), shapeA.end(), newShapeA.begin() + offset);
449 shapeA = std::move(newShapeA);
450 }
451 if (sizeB < size) {
452 std::vector<Dim> newShapeB(size, Dim{1});
453 size_t offset = size - sizeB;
454 std::copy(shapeB.begin(), shapeB.end(), newShapeB.begin() + offset);
455 shapeB = std::move(newShapeB);
456 }
457
458 int broadcastFlag = 0;
459 // The output shape is targetShape
460 std::vector<Dim> targetShape(size);
461 for (size_t i = 0; i < size; i++) {
462 // assume we broadcast to the parametric value
463 if (shapeA[i] == shapeB[i]) {
464 targetShape[i] = shapeA[i];
465 } else if (shapeA[i].isParam && shapeB[i].GetVal() == "1" ) {
466 // broadcast B to A (case A is parametric with )
467 targetShape[i] = shapeA[i];
468 broadcastFlag |= 1;
469 } else if (shapeA[i].GetVal() == "1" && shapeB[i].isParam) {
470 // broadcast A to B
471 targetShape[i] = shapeB[i];
472 broadcastFlag |= 2;
473 } else if (!shapeA[i].isParam && !shapeB[i].isParam) {
474 if (shapeB[i].dim == 1) {
475 targetShape[i] = shapeA[i];
476 broadcastFlag |= 1;
477 } else if (shapeA[i].dim == 1) {
478 targetShape[i] = shapeB[i];
479 broadcastFlag |= 2;
480 } else {
481 // non broadcastable case cannot have A and B two different defined shapes different than one
482 broadcastFlag = -1;
483 }
484 } else if (shapeA[i].isParam && shapeB[i].isParam) {
485 // full dynamic case - we will decided at run time
486 std::stringstream s;
487 s << "std::max(" << shapeA[i] << "," << shapeB[i] << ")";
488 // use -1 for dim to indicate is an expression
489 targetShape[i] = Dim { s.str() , static_cast<size_t>(-1)};
490 broadcastFlag |= 4;
491 } else if (shapeA[i].isParam && !shapeB[i].isParam) {
492 // A -> B need to check at run time if consistent
493 targetShape[i] = shapeB[i];
494 broadcastFlag |= 6;
495 } else if (!shapeA[i].isParam && shapeB[i].isParam) {
496 // B -> A need to check at run time if consistent
497 targetShape[i] = shapeA[i];
498 broadcastFlag |= 5;
499 } else {
500 // all cases should be covered
501 throw std::runtime_error("TMVA::SOFIE - Fatal error in MultiDirectionalBroadCastDimShape");
502 }
503 }
504 if (broadcastFlag == -1) {
505 throw std::runtime_error("TMVA::SOFIE - Error multidirectional broadcasting tensors of shape " +
507 " to a common shape.");
508 }
509
510 return std::make_pair(broadcastFlag, targetShape);
511}
512
513std::string UTILITY::Clean_name(std::string input_tensor_name){
514 std::string s (input_tensor_name);
515 std::replace( s.begin(), s.end(), '-', '_');
516 // replace all non-alpohanumeric character except for "_"
517 s.erase(std::remove_if(s.begin(), s.end(), []( char const& c ) -> bool { return !std::isalnum(c) && c != '_'; } ), s.end());
518 return s;
519}
520
521std::vector<size_t> UTILITY::ComputeStrideFromShape(const std::vector<size_t> & shape) {
522 // assume row major layout
523 const auto size = shape.size();
524 std::vector<size_t> strides(size,1);
525 for (std::size_t i = 1; i < size; i++) {
526 strides[size - 1 - i] = strides[size - i ] * shape[size - i];
527 }
528 return strides;
529}
530
531std::vector<Dim> UTILITY::ComputeStrideFromShape(const std::vector<Dim> & shape) {
532 // assume row major layout
533 const auto size = shape.size();
534 std::vector<Dim> strides(size);
535 if (size > 0) {
536 strides[size-1] = Dim{1};
537 for (std::size_t i = 1; i < size; i++) {
538 if (!shape[size-i].isParam && !strides[size-i].isParam)
539 strides[size - 1 - i] = Dim{strides[size-i].dim * shape[size-i].dim};
540 else {
541 if (strides[size-i].GetVal() == "1")
542 strides[size - 1 - i] = shape[size-i];
543 else if (shape[size-i].GetVal() == "1")
544 strides[size - 1 - i] = strides[size-i];
545 else
546 strides[size - 1 - i] = Dim{std::string(strides[size-i].GetVal() + "*" + shape[size-i].GetVal())};
547 }
548 }
549 }
550 return strides;
551}
552
553struct FreeBlock {
554 std::size_t offset;
555 std::size_t size;
556 bool operator<(const FreeBlock& other) const {
557 // order by offset for deterministic coalescing
558 return offset < other.offset;
559 }
560};
561
563 int t; // time (i.e. operator index)
564 int type; // 0 = END first, 1 = START
565 int idx; // tensor index
566 bool operator<(const MemoryEvent& o) const {
567 if (t != o.t) return t < o.t;
568 return type < o.type; // END before START at the same time
569 }
570};
571
572/// Greedy best-fit planner with coalescing free list.
573MemoryResult OrganizeMemory(const std::vector<TensorLifeInfo> & tensorsInfo )
574{
575 // Basic validation
576 for (const auto &t : tensorsInfo) {
577 if (!(t.end > t.begin)) {
578 throw std::runtime_error("Each tensor must have end > begin.");
579 }
580 }
581
582 // Build events: free before allocate at equal times.
583 std::vector<MemoryEvent> events;
584 events.reserve(tensorsInfo.size() * 2);
585 for (int i = 0; i < (int)tensorsInfo.size(); ++i) {
586 events.push_back({tensorsInfo[i].end, 0, i}); // END
587 events.push_back({tensorsInfo[i].begin, 1, i}); // START
588 }
589 std::sort(events.begin(), events.end());
590
591 std::vector<size_t> tensorsOffset(tensorsInfo.size());
592
593 // Free list ordered by offset (for O(log n) coalescing)
594 // and faster insert/erase with respect to a vector
595 std::set<FreeBlock> free_list;
596
597 // Bookkeeping: size/offset map for frees.
598 std::unordered_map<int, std::size_t> live_size;
599 std::unordered_map<int, std::size_t> live_offset;
600
601 std::size_t total_bytes = 0;
602
603 auto allocate_best_fit = [&](std::size_t need) -> std::size_t {
604 // Find the *smallest* block whose size >= need (best-fit).
605 // Since free_list is ordered by offset, we scan to find best by size.
606 // (For very large sets you could maintain a multimap by size as well.)
607 auto best = free_list.end();
608 for (auto it = free_list.begin(); it != free_list.end(); ++it) {
609 if (it->size >= need) {
610 if (best == free_list.end() || it->size < best->size)
611 best = it;
612 }
613 }
614 if (best != free_list.end()) {
615 std::size_t off = best->offset;
616 if (best->size == need) {
617 free_list.erase(best);
618 } else {
619 FreeBlock updated{best->offset + need, best->size - need};
620 free_list.erase(best);
621 free_list.insert(updated);
622 }
623 return off;
624 }
625 // No free block large enough; grow the heap.
626 std::size_t off = total_bytes;
627 total_bytes += need;
628 return off;
629 };
630
631 auto try_coalesce = [&](std::set<FreeBlock>::iterator it) {
632 // Coalesce with previous
633 if (it != free_list.begin()) {
634 auto prev = std::prev(it);
635 if (prev->offset + prev->size == it->offset) {
636 FreeBlock merged{prev->offset, prev->size + it->size};
637 free_list.erase(prev);
638 it = free_list.erase(it);
639 it = free_list.insert(merged).first;
640 }
641 }
642 // Coalesce with next
643 auto next = std::next(it);
644 if (next != free_list.end() && it->offset + it->size == next->offset) {
645 FreeBlock merged{it->offset, it->size + next->size};
646 free_list.erase(next);
647 it = free_list.erase(it);
648 free_list.insert(merged);
649 }
650 };
651
652 // Sweep through time.
653 for (const auto &e : events) {
654 if (e.type == 0) { // END: free
655 auto it_sz = live_size.find(e.idx);
656 auto it_off = live_offset.find(e.idx);
657 if (it_sz != live_size.end() && it_off != live_offset.end()) {
658 FreeBlock fb{it_off->second, it_sz->second};
659 // Insert and coalesce with neighbors
660 auto it = free_list.insert(fb).first;
661 try_coalesce(it);
662 live_size.erase(it_sz);
663 live_offset.erase(it_off);
664 }
665 } else { // START: allocate
666 auto &t = tensorsInfo[e.idx];
667 std::size_t off = allocate_best_fit(t.size);
668 tensorsOffset[e.idx] = off;
669 live_size[e.idx] = t.size;
670 live_offset[e.idx] = off;
671 }
672 }
673
674 return MemoryResult{total_bytes, std::move(tensorsOffset)};
675}
676
677} // namespace SOFIE
678} // namespace Experimental
679} // namespace TMVA
#define c(i)
Definition RSha256.hxx:101
#define e(i)
Definition RSha256.hxx:103
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t target
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
const_iterator begin() const
const_iterator end() const
const Int_t n
Definition legend1.C:16
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t > &, std::vector< size_t > &)
std::string Clean_name(std::string input_tensor_name)
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
MemoryResult OrganizeMemory(const std::vector< TensorLifeInfo > &tensorsInfo)
Greedy best-fit planner with coalescing free list.
std::string ConvertDimShapeToString(const std::vector< Dim > &shape)
std::size_t ConvertShapeToLength(const std::vector< size_t > &shape)
std::string ConvertDynamicShapeToLength(const std::vector< Dim > &shape)
std::vector< Dim > ConvertShapeToDim(const std::vector< size_t > &shape)
Convert shape from integer format to dynamic one (based on Dim)
std::vector< size_t > ConvertShapeToInt(const std::vector< Dim > &shape)
Convert shape based on Dim to integer format.
std::string ConvertTypeToString(ETensorType type)
ETensorType ConvertStringToType(std::string type)
std::string ConvertDimShapeToLength(const std::vector< Dim > &shape)
std::string ConvertShapeToString(const std::vector< size_t > &shape)
bool IsInteger(const std::string &s)
create variable transformations
bool operator<(const FreeBlock &other) const
bool operator<(const MemoryEvent &o) const