Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
SOFIE_common.cxx
Go to the documentation of this file.
2
3#include <cctype>
4#include <sstream>
5#include <stdexcept>
6#include <charconv>
7#include <unordered_map>
8#include <set>
9
10namespace TMVA {
11namespace Experimental {
12namespace SOFIE {
13
14/// @brief Convert shape from integer format to dynamic one (based on Dim)
15/// @param shape
16/// @return shape based on Dim
17std::vector<Dim> ConvertShapeToDim(const std::vector<size_t> & shape){
18 std::vector<Dim> ret_shape(shape.size());
19 for (size_t i =0; i < shape.size(); i++){
20 ret_shape[i].dim = shape[i];
21 }
22 return ret_shape;
23}
24
25/// @brief Convert shape based on Dim to integer format
26/// @param shape
27/// @return shape based on integer. Return an empty shape in case shape is dynamic (has a parameter)
28std::vector<size_t> ConvertShapeToInt(const std::vector<Dim> & shape){
29 std::vector<size_t> ret_shape(shape.size());
30 for (size_t i =0; i < shape.size(); i++){
31 if (shape[i].isParam) {
32 // try converting to integer in case string is a number >=0
33 int val = -1;
34 try {
35 val = std::stoi(shape[i].param);
36 if (val >= 0) ret_shape[i] = static_cast<size_t>(val);
37 else {
38 ret_shape.clear();
39 break;
40 }
41 }
42 catch (const std::invalid_argument& ) {
43 ret_shape.clear();
44 break;
45 }
46 } else {
47 ret_shape[i] = shape[i].dim;
48 }
49 }
50 return ret_shape;
51}
52
53
54std::size_t ConvertShapeToLength(const std::vector<size_t> & shape){
55 // Empty shape represent scalar values, so we return a length=1
56 std::size_t fLength = 1;
57 for (auto& dim: shape) fLength *= dim;
58 return fLength;
59}
60
62 switch(type){
63 case ETensorType::FLOAT : {
64 return "float";
65 }
66 case ETensorType::INT8 : {
67 return "int8_t";
68 }
69 case ETensorType::INT16 : {
70 return "int16_t";
71 }
72 case ETensorType::INT32 : {
73 return "int32_t";
74 }
75 case ETensorType::INT64 : {
76 return "int64_t";
77 }
78 case ETensorType::UINT8 : {
79 return "uint8_t";
80 }
81 case ETensorType::UINT16 : {
82 return "uint16_t";
83 }
84 case ETensorType::UINT32 : {
85 return "uint32_t";
86 }
87 case ETensorType::UINT64 : {
88 return "uint64_t";
89 }
90 case ETensorType::DOUBLE : {
91 return "double";
92 }
93 case ETensorType::BOOL : {
94 return "uint8_t";
95 }
96 default:{
97 return "other_" + std::to_string( (int) type);
98 }
99 }
100}
101
103 if(type == "float32" || type == "float" || type == "Float"){
104 return ETensorType::FLOAT;
105 }
106 else if(type == "int64" || type == "int64_t"){
107 return ETensorType::INT64;
108 }
109 else if (type == "double" || type == "float64"){
110 return ETensorType::DOUBLE;
111 }
112 else if (type == "bool" ){
113 return ETensorType::BOOL;
114 }
115 else{
117 }
118}
119
120std::string ConvertShapeToString(const std::vector<size_t> & shape) {
121 std::stringstream out;
122 out << "{ ";
123 for (size_t i = 0; i < shape.size(); i++) {
124 out << shape[i];
125 if (i < shape.size()-1) out << " , ";
126 }
127 out << " }";
128 return out.str();
129}
130
131std::string ConvertDimShapeToString(const std::vector<Dim> & shape) {
132 std::stringstream out;
133 out << "{ ";
134 for (size_t i = 0; i < shape.size(); i++) {
135 out << shape[i];
136 if (i < shape.size()-1) out << " , ";
137 }
138 out << " }";
139 return out.str();
140}
141
142std::string ConvertDimShapeToLength(const std::vector<Dim> & shape) {
143 // convert generic shape to a string
144 // multiply all the integer specified dimensions of the shape
145 std::string length;
146 // case of empty vectors return 1
147 if (shape.empty()) return "1";
148 int64_t int_length = -1;
149 for (size_t i = 0; i < shape.size(); i++) {
150 if (shape[i].isParam) {
151 if (!length.empty()) length += " * ";
152 length += shape[i].param;
153 } else {
154 if (int_length == -1)
155 int_length = shape[i].dim;
156 else
157 int_length *= shape[i].dim;
158 }
159 }
160 // multiply the integer components to the parametric one
161 // if larger than 1 - otherwise returns -1
162 if (int_length >= 0) {
163 if (!length.empty() && int_length > 1) {
164 length += " * ";
165 length += std::to_string(int_length);
166 } else if (length.empty()) { // case is full known shape
167 length = std::to_string(int_length);
168 }
169 }
170 return length;
171}
172
173
174namespace{
175template<typename T>
176static inline void copy_vector_data(int_t no_of_copies, int_t input_size, T* input, T* target){ //only visible within this translation unit
177 std::memcpy(target, input, input_size * sizeof(T));
179
180 while (already_copied * 2 <= no_of_copies){
181 std::memcpy(target + already_copied * input_size, target, already_copied * input_size * sizeof(T));
182 already_copied *= 2;
183 }
184
186 std::memcpy(target + already_copied * input_size, target, (no_of_copies - already_copied) * input_size * sizeof(T));
187 }
188}
189}
190
191bool IsInteger(const std::string & s) {
192 int value;
193 auto [ptr, ec] = std::from_chars(s.data(), s.data() + s.size(), value);
194 return ec == std::errc() && ptr == s.data() + s.size();
195}
196
197bool UTILITY::AreSameShape(const std::vector<size_t>& shapeA, const std::vector<size_t>& shapeB) {
198 if (shapeA.size() != shapeB.size()) {
199 return false;
200 }
201 for (size_t dim = 0; dim < shapeA.size(); dim++) {
202 if (shapeA[dim] != shapeB[dim]) {
203 return false;
204 }
205 }
206 return true;
207}
208bool UTILITY::AreSameShape(const std::vector<size_t>& shapeA, const std::vector<Dim>& shapeB) {
209 if (shapeA.size() != shapeB.size()) {
210 return false;
211 }
212 for (size_t dim = 0; dim < shapeA.size(); dim++) {
213 if (shapeB[dim].isParam) return false;
214 if (shapeA[dim] != shapeB[dim].dim) {
215 return false;
216 }
217 }
218 return true;
219}
220bool UTILITY::AreSameShape(const std::vector<Dim>& shapeA, const std::vector<Dim>& shapeB) {
221 if (shapeA.size() != shapeB.size()) {
222 return false;
223 }
224 for (size_t dim = 0; dim < shapeA.size(); dim++) {
225 if (shapeA[dim].GetVal() != shapeB[dim].GetVal()) {
226 return false;
227 }
228 }
229 return true;
230}
231
232std::vector<size_t> UTILITY::MultidirectionalBroadcastShape(std::vector<std::vector<size_t>> shape)
233{
234 if (shape.size() < 2) {
235 throw
236 std::runtime_error("TMVA::SOFIE - MultidirectionalBroadcastShape requires at least 2 input shapes.");
237 }
238 // Number of input shapes to broadcast
239 size_t n = shape.size();
240 // Size of the output shape
241 size_t targetSize = shape[0].size();
242 for (size_t i = 1; i < n; i++) {
243 targetSize = std::max(targetSize, shape[i].size());
244 }
245 // Check if they have the same size
246 bool sameSize = true;
247 for (size_t i = 0; i < n; i++) {
248 if (shape[i].size() != targetSize) {
249 sameSize = false;
250 break;
251 }
252 }
253 if (sameSize) {
254 // Check if they have the same shape
255 bool sameShape = true;
256 for (size_t i = 1; i < n; i++) {
257 for (size_t dim = 0; dim < shape[0].size(); dim++) {
258 if (shape[i][dim] != shape[0][dim]) {
259 sameShape = false;
260 break;
261 }
262 }
263 if (!sameShape) {
264 break;
265 }
266 }
267 if (sameShape) {
268 return shape[0];
269 } else {
270 // Set the target shape
271 std::vector<size_t> targetShape(targetSize, 1);
272 for (size_t i = 0; i < n; i++) {
273 for (size_t dim = 0; dim < targetSize; dim++) {
274 targetShape[dim] = std::max(targetShape[dim], shape[i][dim]);
275 }
276 }
277 // Check if the input shapes are broadcastable to targetShape
278 bool broadcastable = true;
279 for (size_t i = 0; i < n; i++) {
280 for (size_t dim = 0; dim < targetSize; dim++) {
281 if (shape[i][dim] != 1 && targetShape[dim] != 1 && shape[i][dim] != targetShape[dim]) {
282 broadcastable = false;
283 break;
284 }
285 if (!broadcastable) {
286 break;
287 }
288 }
289 }
290 // They have the same shape and they are broadcastable to targetShape
291 if (broadcastable) {
292 return targetShape;
293 } else {
294 std::stringstream ss;
295 ss << "TMVA::SOFIE - Error multidirectional broadcasting shapes ";
296 for (size_t i = 0; i < n; i++) {
297 ss << ConvertShapeToString(shape[i]);
298 if (n > 2 && i < n - 2) {
299 ss << ", ";
300 } else if ( n >=2 && i == n - 2) {
301 ss << " and ";
302 }
303 }
304 ss << " to the same shape.";
305 throw
306 std::runtime_error(ss.str());
307 }
308 } // end sameShape
309 } // end sameSize
310 // Prepend the ith shape with ones
311 for (size_t i = 0; i < n; i++) {
312 if (shape[i].size() < targetSize) {
313 std::vector<size_t> newShape(targetSize, 1);
314 size_t offset = targetSize - shape[i].size();
315 std::copy(shape[i].begin(), shape[i].end(), newShape.begin() + offset);
316 shape[i] = newShape;
317 }
318 }
319 // Set the target shape
320 std::vector<size_t> targetShape(targetSize, 1);
321 for (size_t i = 0; i < n; i++) {
322 for (size_t dim = 0; dim < targetSize; dim++) {
323 targetShape[dim] = std::max(targetShape[dim], shape[i][dim]);
324 }
325 }
326 // Check if the shapes are broadcastable to targetShape
327 bool broadcastable = true;
328 for (size_t i = 0; i < n; i++) {
329 for (size_t dim = 0; dim < targetSize; dim++) {
330 if (shape[i][dim] != targetShape[dim] && shape[i][dim] != 1 && targetShape[dim] != 1) {
331 broadcastable = false;
332 break;
333 }
334 }
335 if (!broadcastable) {
336 break;
337 }
338 }
339 if (broadcastable) {
340 return targetShape;
341 } else {
342 std::stringstream ss;
343 ss << "TMVA::SOFIE - Error multidirectional broadcasting shapes ";
344 for (size_t i = 0; i < n; i++) {
345 ss << ConvertShapeToString(shape[i]);
346 if (n > 2 && i < n - 2) {
347 ss << ", ";
348 } else if ( n >=2 && i == n - 2) {
349 ss << " and ";
350 }
351 }
352 ss << " to the same shape.";
353 throw
354 std::runtime_error(ss.str());
355 }
356}
357
358// check multi-directional broadcasting of two shapes (need to pass inputs by non const ref. since we might prepends with one's
359// return a pair of integer flag and new broadcasted shape
360// if flag = 0: shape are identical
361// flag = 1: return shape is equal to A, we broadcast B
362// flag = 2: return shape is equal to B we broadcast A
363// flag = 3: return shape is common of two we broadcast A and B to output
364std::pair<int, std::vector<size_t>> UTILITY::MultidirectionalBroadcastShape(std::vector<size_t> & shapeA, std::vector<size_t> & shapeB)
365{
366 size_t sizeA = shapeA.size();
367 size_t sizeB = shapeB.size();
368 // Check if A and B have the same shape
370 return std::make_pair(0, shapeA);
371 }
372 // Find the common shape of A and B
373 size_t size = std::max(sizeA, sizeB);
374 if (sizeA < size) {
375 // prepend 1's in A to make of same shape as B
376 std::vector<size_t> newShapeA(size, 1);
377 size_t offset = size - sizeA;
378 std::copy(shapeA.begin(), shapeA.end(), newShapeA.begin() + offset);
379 shapeA = std::move(newShapeA);
380 }
381 if (sizeB < size) {
382 std::vector<size_t> newShapeB(size, 1);
383 size_t offset = size - sizeB;
384 std::copy(shapeB.begin(), shapeB.end(), newShapeB.begin() + offset);
385 shapeB = std::move(newShapeB);
386 }
387 bool broadcastable = true;
388 for (size_t i = 0; i < size; i++) {
389 if (shapeA[i] != shapeB[i] && shapeA[i] != 1 && shapeB[i] != 1) {
390 broadcastable = false;
391 break;
392 }
393 }
394 int broadcastFlag = 0;
395 if (broadcastable) {
396 // The output shape is max(outShape, targetShape)
397 std::vector<size_t> targetShape(size, 1);
398 for (size_t i = 0; i < size; i++) {
399 targetShape[i] = std::max(shapeA[i], shapeB[i]);
400 if (shapeB[i] < targetShape[i]) broadcastFlag |= 1;
401 if (shapeA[i] < targetShape[i]) broadcastFlag |= 2;
402 }
403 return std::make_pair(broadcastFlag, targetShape);
404 } else {
405 throw
406 std::runtime_error("TMVA::SOFIE - Error multidirectional broadcasting tensors of shape "
408 + " to a common shape.");
409 }
410}
411// unidirectional broadcast- of shape A to target B
412std::vector<size_t> UTILITY::UnidirectionalBroadcastShape(std::vector<size_t> & shapeA, std::vector<size_t> & shapeB)
413{
415 if (ret.first > 1) {
416 throw
417 std::runtime_error("TMVA::SOFIE - Error unidirectional broadcasting tensors of shape "
419 + " in a common shape.");
420 }
421 return ret.second;
422}
423
424// for broadcasting Dim shapes
425// flag indicates also which vector needs to be broadcasted
426// flag & 1 == 1 : broadcast B -> A
427// flag & 2 == 2 : broadcast A -> B
428// flag & 4 == 4 a run time check is needed on shapes with values
429std::pair<int, std::vector<Dim>> UTILITY::MultidirectionalBroadcastShape(std::vector<Dim> & shapeA, std::vector<Dim> & shapeB) {
430 size_t sizeA = shapeA.size();
431 size_t sizeB = shapeB.size();
432 // Check if A and B have the same shape
434 return std::make_pair(0, shapeA);
435 }
436 // Find the common shape of A and B
437 size_t size = std::max(sizeA, sizeB);
438 if (sizeA < size) {
439 // prepend 1's in A to make of same shape as B
440 std::vector<Dim> newShapeA(size, Dim{1});
441 size_t offset = size - sizeA;
442 std::copy(shapeA.begin(), shapeA.end(), newShapeA.begin() + offset);
443 shapeA = std::move(newShapeA);
444 }
445 if (sizeB < size) {
446 std::vector<Dim> newShapeB(size, Dim{1});
447 size_t offset = size - sizeB;
448 std::copy(shapeB.begin(), shapeB.end(), newShapeB.begin() + offset);
449 shapeB = std::move(newShapeB);
450 }
451
452 int broadcastFlag = 0;
453 // The output shape is targetShape
454 std::vector<Dim> targetShape(size);
455 for (size_t i = 0; i < size; i++) {
456 // assume we broadcast to the parametric value
457 if (shapeA[i] == shapeB[i]) {
458 targetShape[i] = shapeA[i];
459 } else if (shapeA[i].isParam && shapeB[i].GetVal() == "1" ) {
460 // broadcast B to A (case A is parametric with )
461 targetShape[i] = shapeA[i];
462 broadcastFlag |= 1;
463 } else if (shapeA[i].GetVal() == "1" && shapeB[i].isParam) {
464 // broadcast A to B
465 targetShape[i] = shapeB[i];
466 broadcastFlag |= 2;
467 } else if (!shapeA[i].isParam && !shapeB[i].isParam) {
468 if (shapeB[i].dim == 1) {
469 targetShape[i] = shapeA[i];
470 broadcastFlag |= 1;
471 } else if (shapeA[i].dim == 1) {
472 targetShape[i] = shapeB[i];
473 broadcastFlag |= 2;
474 } else {
475 // non broadcastable case cannot have A and B two different defined shapes different than one
476 broadcastFlag = -1;
477 }
478 } else if (shapeA[i].isParam && shapeB[i].isParam) {
479 // full dynamic case - we will decided at run time
480 std::stringstream s;
481 s << "std::max(" << shapeA[i] << "," << shapeB[i] << ")";
482 // use -1 for dim to indicate is an expression
483 targetShape[i] = Dim { s.str() , static_cast<size_t>(-1)};
484 broadcastFlag |= 4;
485 } else if (shapeA[i].isParam && !shapeB[i].isParam) {
486 // A -> B need to check at run time if consistent
487 targetShape[i] = shapeB[i];
488 broadcastFlag |= 6;
489 } else if (!shapeA[i].isParam && shapeB[i].isParam) {
490 // B -> A need to check at run time if consistent
491 targetShape[i] = shapeA[i];
492 broadcastFlag |= 5;
493 } else {
494 // all cases should be covered
495 throw std::runtime_error("TMVA::SOFIE - Fatal error in MultiDirectionalBroadCastDimShape");
496 }
497 }
498 if (broadcastFlag == -1) {
499 throw std::runtime_error("TMVA::SOFIE - Error multidirectional broadcasting tensors of shape " +
501 " to a common shape.");
502 }
503
504 return std::make_pair(broadcastFlag, targetShape);
505}
506
507std::string UTILITY::Clean_name(std::string input_tensor_name){
508 std::string s (input_tensor_name);
509 std::replace( s.begin(), s.end(), '-', '_');
510 // replace all non-alpohanumeric character except for "_"
511 s.erase(std::remove_if(s.begin(), s.end(), []( char const& c ) -> bool { return !std::isalnum(c) && c != '_'; } ), s.end());
512 return s;
513}
514
515std::vector<size_t> UTILITY::ComputeStrideFromShape(const std::vector<size_t> & shape) {
516 // assume row major layout
517 const auto size = shape.size();
518 std::vector<size_t> strides(size,1);
519 for (std::size_t i = 1; i < size; i++) {
520 strides[size - 1 - i] = strides[size - i ] * shape[size - i];
521 }
522 return strides;
523}
524
525std::vector<Dim> UTILITY::ComputeStrideFromShape(const std::vector<Dim> & shape) {
526 // assume row major layout
527 const auto size = shape.size();
528 std::vector<Dim> strides(size);
529 if (size > 0) {
530 strides[size-1] = Dim{1};
531 for (std::size_t i = 1; i < size; i++) {
532 if (!shape[size-i].isParam && !strides[size-i].isParam)
533 strides[size - 1 - i] = Dim{strides[size-i].dim * shape[size-i].dim};
534 else {
535 if (strides[size-i].GetVal() == "1")
536 strides[size - 1 - i] = shape[size-i];
537 else if (shape[size-i].GetVal() == "1")
538 strides[size - 1 - i] = strides[size-i];
539 else
540 strides[size - 1 - i] = Dim{std::string(strides[size-i].GetVal() + "*" + shape[size-i].GetVal())};
541 }
542 }
543 }
544 return strides;
545}
546
547struct FreeBlock {
548 std::size_t offset;
549 std::size_t size;
550 bool operator<(const FreeBlock& other) const {
551 // order by offset for deterministic coalescing
552 return offset < other.offset;
553 }
554};
555
557 int t; // time (i.e. operator index)
558 int type; // 0 = END first, 1 = START
559 int idx; // tensor index
560 bool operator<(const MemoryEvent& o) const {
561 if (t != o.t) return t < o.t;
562 return type < o.type; // END before START at the same time
563 }
564};
565
566/// Greedy best-fit planner with coalescing free list.
567MemoryResult OrganizeMemory(const std::vector<TensorLifeInfo> & tensorsInfo )
568{
569 // Basic validation
570 for (const auto &t : tensorsInfo) {
571 if (!(t.end > t.begin)) {
572 throw std::runtime_error("Each tensor must have end > begin.");
573 }
574 }
575
576 // Build events: free before allocate at equal times.
577 std::vector<MemoryEvent> events;
578 events.reserve(tensorsInfo.size() * 2);
579 for (int i = 0; i < (int)tensorsInfo.size(); ++i) {
580 events.push_back({tensorsInfo[i].end, 0, i}); // END
581 events.push_back({tensorsInfo[i].begin, 1, i}); // START
582 }
583 std::sort(events.begin(), events.end());
584
585 std::vector<size_t> tensorsOffset(tensorsInfo.size());
586
587 // Free list ordered by offset (for O(log n) coalescing)
588 // and faster insert/erase with respect to a vector
589 std::set<FreeBlock> free_list;
590
591 // Bookkeeping: size/offset map for frees.
592 std::unordered_map<int, std::size_t> live_size;
593 std::unordered_map<int, std::size_t> live_offset;
594
595 std::size_t total_bytes = 0;
596
597 auto allocate_best_fit = [&](std::size_t need) -> std::size_t {
598 // Find the *smallest* block whose size >= need (best-fit).
599 // Since free_list is ordered by offset, we scan to find best by size.
600 // (For very large sets you could maintain a multimap by size as well.)
601 auto best = free_list.end();
602 for (auto it = free_list.begin(); it != free_list.end(); ++it) {
603 if (it->size >= need) {
604 if (best == free_list.end() || it->size < best->size)
605 best = it;
606 }
607 }
608 if (best != free_list.end()) {
609 std::size_t off = best->offset;
610 if (best->size == need) {
611 free_list.erase(best);
612 } else {
613 FreeBlock updated{best->offset + need, best->size - need};
614 free_list.erase(best);
615 free_list.insert(updated);
616 }
617 return off;
618 }
619 // No free block large enough; grow the heap.
620 std::size_t off = total_bytes;
621 total_bytes += need;
622 return off;
623 };
624
625 auto try_coalesce = [&](std::set<FreeBlock>::iterator it) {
626 // Coalesce with previous
627 if (it != free_list.begin()) {
628 auto prev = std::prev(it);
629 if (prev->offset + prev->size == it->offset) {
630 FreeBlock merged{prev->offset, prev->size + it->size};
631 free_list.erase(prev);
632 it = free_list.erase(it);
633 it = free_list.insert(merged).first;
634 }
635 }
636 // Coalesce with next
637 auto next = std::next(it);
638 if (next != free_list.end() && it->offset + it->size == next->offset) {
639 FreeBlock merged{it->offset, it->size + next->size};
640 free_list.erase(next);
641 it = free_list.erase(it);
642 free_list.insert(merged);
643 }
644 };
645
646 // Sweep through time.
647 for (const auto &e : events) {
648 if (e.type == 0) { // END: free
649 auto it_sz = live_size.find(e.idx);
650 auto it_off = live_offset.find(e.idx);
651 if (it_sz != live_size.end() && it_off != live_offset.end()) {
652 FreeBlock fb{it_off->second, it_sz->second};
653 // Insert and coalesce with neighbors
654 auto it = free_list.insert(fb).first;
655 try_coalesce(it);
656 live_size.erase(it_sz);
657 live_offset.erase(it_off);
658 }
659 } else { // START: allocate
660 auto &t = tensorsInfo[e.idx];
661 std::size_t off = allocate_best_fit(t.size);
662 tensorsOffset[e.idx] = off;
663 live_size[e.idx] = t.size;
664 live_offset[e.idx] = off;
665 }
666 }
667
668 return MemoryResult{total_bytes, std::move(tensorsOffset)};
669}
670
671} // namespace SOFIE
672} // namespace Experimental
673} // namespace TMVA
#define c(i)
Definition RSha256.hxx:101
#define e(i)
Definition RSha256.hxx:103
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t target
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
const_iterator begin() const
const_iterator end() const
const Int_t n
Definition legend1.C:16
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t > &, std::vector< size_t > &)
std::string Clean_name(std::string input_tensor_name)
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
MemoryResult OrganizeMemory(const std::vector< TensorLifeInfo > &tensorsInfo)
Greedy best-fit planner with coalescing free list.
std::string ConvertDimShapeToString(const std::vector< Dim > &shape)
std::size_t ConvertShapeToLength(const std::vector< size_t > &shape)
std::vector< Dim > ConvertShapeToDim(const std::vector< size_t > &shape)
Convert shape from integer format to dynamic one (based on Dim)
std::vector< size_t > ConvertShapeToInt(const std::vector< Dim > &shape)
Convert shape based on Dim to integer format.
std::string ConvertTypeToString(ETensorType type)
ETensorType ConvertStringToType(std::string type)
std::string ConvertDimShapeToLength(const std::vector< Dim > &shape)
std::string ConvertShapeToString(const std::vector< size_t > &shape)
bool IsInteger(const std::string &s)
create variable transformations
bool operator<(const FreeBlock &other) const
bool operator<(const MemoryEvent &o) const