Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
SOFIE_common.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_SOFIE_COMMON
2#define TMVA_SOFIE_SOFIE_COMMON
3
4#include "TMVA/RTensor.hxx"
5
6#include "ROOT/RSpan.hxx"
7
8#include <stdexcept>
9#include <type_traits>
10#include <cstdint>
11#include <cstring>
12#include <string>
13#include <vector>
14#include <memory>
15#include <regex>
16#include <sstream>
17#include <iostream>
18
19namespace TMVA{
20namespace Experimental{
21namespace SOFIE{
22
23//typedef RTensor tensor_t;
24
25enum class ETensorType{
26 UNDEFINED = 0, FLOAT = 1, UNINT8 = 2, INT8 = 3, UINT16 = 4, INT16 = 5, INT32 = 6, INT64 = 7, STRING = 8, BOOL = 9, //order sensitive
27 FLOAT16 = 10, DOUBLE = 11, UINT32 = 12, UINT64 = 13, COMPLEX64 = 14, COMPLEX28 = 15, BFLOAT16 = 16
28};
29
30typedef std::int64_t int_t;
31
34
35struct Dim{
36 bool isParam = false;
37 size_t dim = 0;
38 std::string param;
39
40 // default constructor (for I/O)
41 Dim() {}
42
43 // constructor for a parametric dimension with the option to pass a default dim value
44 Dim(const std::string & p, size_t d = 0) : isParam(true), dim(d), param(p) {}
45
46 // constructor for a non-parametric dimension
47 Dim(size_t d) : dim(d) {}
48
49 std::string GetVal() const {
50 return (isParam) ? param : std::to_string(dim);
51 }
52};
53
54
57 std::vector<Dim> shape;
58};
59
62 std::vector<size_t> shape;
63};
64
67 std::vector<Dim> shape;
68};
69
70std::vector<Dim> ConvertShapeToDim(std::vector<size_t> shape);
71
72std::vector<size_t> ConvertShapeToInt(std::vector<Dim> shape);
73
74std::size_t ConvertShapeToLength(std::vector<size_t> shape);
75
76std::string ConvertShapeToString(std::vector<size_t> shape);
77std::string ConvertDynamicShapeToString(std::vector<Dim> shape);
78// std::string ConvertShapeToString(std::vector<Dim> shape) {
79// return ConvertDynamicShapeToString(shape);
80// }
81
82std::string ConvertDynamicShapeToLength(std::vector<Dim> shape);
83
85public:
86 InitializedTensor() = default;
87 InitializedTensor(ETensorType type, std::span<std::size_t> shape, std::shared_ptr<void> data)
88 : fType{type}, fShape{shape.begin(), shape.end()}, fData{data}
89 {
90 }
91
92 ETensorType const &type() const { return fType; }
93 std::vector<std::size_t> const &shape() const { return fShape; }
94 std::shared_ptr<void> const &sharedptr() const { return fData; }
95
96 template <class T = void>
97 T const *data() const
98 {
99 return static_cast<T const *>(fData.get());
100 }
101
103 {
104 // We only calculate fSize here, because it is only used for IO to know
105 // the size of the persistent data.
106 fSize = 1;
107 for (std::size_t item : fShape) {
108 fSize *= static_cast<int>(item);
109 }
110 switch (fType) {
111 case ETensorType::FLOAT: fSize *= sizeof(float); break;
112 case ETensorType::DOUBLE: fSize *= sizeof(double); break;
113 case ETensorType::INT32: fSize *= sizeof(int32_t); break;
114 case ETensorType::INT64: fSize *= sizeof(int64_t); break;
115 case ETensorType::BOOL: fSize *= sizeof(bool); break;
116 default:
117 throw std::runtime_error("TMVA::SOFIE doesn't yet supports serialising data-type " +
119 }
120 fPersistentData = static_cast<char *>(fData.get());
121 }
123 {
124 // If there is no persistent data, do nothing
125 if (fSize == 0 || fPersistentData == nullptr) {
126 return;
127 }
128
129 // Nothing to be done if the pointed-to data is the same
130 if (fPersistentData == static_cast<char *>(fData.get())) {
131 return;
132 }
133
134 // Initialize the shared_ptr
135 fData = std::shared_ptr<void>{malloc(fSize), free};
136 std::memcpy(fData.get(), fPersistentData, fSize);
137
138 // Make sure the data read from disk doesn't leak and delete the
139 // persistent data
140 delete[] fPersistentData;
141 fPersistentData = nullptr;
142 fSize = 0;
143 }
144
145private:
146 ETensorType fType; ///< Encodes the type of the data
147 std::vector<std::size_t> fShape; ///< The shape of the data in terms of elements in each dimension
148 std::shared_ptr<void> fData; ///<! Transient shared data
149 int fSize = 0; ///< The size of the persistent data in bytes (not number of elements!)
150 char *fPersistentData = nullptr; ///<[fSize] Persistent version of the data
151};
152
153template <typename T>
155 if (std::is_same<T, float>::value) return ETensorType::FLOAT;
156 if (std::is_same<T, uint8_t>::value) return ETensorType::UNINT8;
157 if (std::is_same<T, int8_t>::value) return ETensorType::INT8;
158 if (std::is_same<T, uint16_t>::value) return ETensorType::UINT16;
159 if (std::is_same<T, int16_t>::value) return ETensorType::INT16;
160 if (std::is_same<T, int32_t>::value) return ETensorType::INT32;
161 if (std::is_same<T, int64_t>::value) return ETensorType::INT64;
162 if (std::is_same<T, std::string>::value) return ETensorType::STRING;
163 if (std::is_same<T, bool>::value) return ETensorType::BOOL;
164 //float16 unimplemented
165 if (std::is_same<T, double>::value) return ETensorType::DOUBLE;
166 if (std::is_same<T, uint32_t>::value) return ETensorType::UINT32;
167 if (std::is_same<T, uint64_t>::value) return ETensorType::UINT64;
168 //complex 64, 28, bfloat 16 unimplemented
169}
170
171namespace UTILITY{
172// Check if two shapes are equal
173bool AreSameShape(const std::vector<size_t>&, const std::vector<size_t>&);
174bool AreSameShape(const std::vector<size_t>&, const std::vector<Dim>&);
175bool AreSameShape(const std::vector<Dim>&, const std::vector<Dim>&);
176
177
178// Multidirectional broadcast a list of tensors to the same shape
179std::vector<size_t> MultidirectionalBroadcastShape(std::vector<std::vector<size_t>>);
180
181// Unidirectional broadcast two shapes to the same shape
182std::vector<size_t> UnidirectionalBroadcastShape(std::vector<size_t>, std::vector<size_t>);
183
184std::string Clean_name(std::string input_tensor_name);
185
186template<typename T>
187T* BroadcastConvBias(const T* data, const size_t channel, const std::vector<size_t>& targetShape) {
188 size_t size = targetShape.size();
189 if (targetShape[1] != channel) {
190 std::stringstream ss;
191 ss << "TMVA::SOFIE - Error broadcasting Conv Bias of shape {";
192 ss << std::to_string(channel);
193 ss << "} to ";
194 ss << ConvertShapeToString(targetShape);
195 throw
196 std::runtime_error(ss.str());
197 }
198
199 size_t targetLength = ConvertShapeToLength(targetShape);
200 T* newData = new T[targetLength];
201
202 if (targetLength == channel) {
203 std::copy(data, data + channel, newData);
204 return newData;
205 }
206
207 // cStride = OutDepth * outHeight * outWidth
208 size_t cStride = 1;
209 for (size_t i = 2; i < size; i++)
210 cStride *= targetShape[i];
211 // Broadcast each element of the bias to a vector of size cStride and concatenate them
212 // into a vector of size channel * cStride
213 for (size_t i = 0; i < channel; i++) {
214 std::fill(newData + i * cStride, newData + (i + 1) * cStride, data[i]);
215 }
216 // Broadcast newData[0...channel * cStride) to newData[0...batch * channel * cStride)
217 size_t batch = targetShape[0];
218 size_t bStride = channel * cStride;
219 for (size_t i = 1; i < batch; i++) {
220 std::copy(newData, newData + bStride, newData + i * bStride);
221 }
222 return newData;
223}
224
225// Broadcast a tensor from shape to targetShape according to numpy broadcasting rules
226// See more at https://numpy.org/doc/stable/user/basics.broadcasting.html
227// and https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md .
228template<typename T>
229T* BroadcastTensor(const T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape) {
230 // Size of the shapes
231 size_t size = shape.size();
232 // Current length of the broadcasted tensor
233 size_t curLength = ConvertShapeToLength(shape);
234 size_t targetLength = ConvertShapeToLength(targetShape);
235 // newShape is an aray of size equal to dimension along which we are broadcasting the tensor
236 T* broadcastedData = new T[targetLength];
237 std::copy(data, data + curLength, broadcastedData);
238 // Product of the previous dimensions of targetShape
239 size_t arrayNum = 1;
240 // New broadcasted data
241 std::vector<T> newData(targetLength);
242
243 for (size_t idx = 0; idx < size; idx++) {
244 size_t dim = shape[idx];
245 size_t targetDim = targetShape[idx];
246 if (dim == 1 && targetDim > 1) {
247 // Set the new length of the data
248 size_t newLength = curLength * targetDim;
249 // View the data as a list of arrayNum arrays of size arrayLength
250 size_t arrayLength = curLength / arrayNum;
251 // Broadcast each array dim times
252 if (arrayLength > 1) {
253 // If each array has at least two elements
254 for (size_t arrayIdx = 0; arrayIdx < arrayNum; arrayIdx++) {
255 for (size_t targetIdx = 0; targetIdx < targetDim; targetIdx++) {
256 size_t offset = arrayIdx * arrayLength * targetDim + targetIdx * arrayLength;
257 std::copy(broadcastedData + arrayIdx * arrayLength,
258 broadcastedData + (arrayIdx + 1) * arrayLength,
259 newData.begin() + offset);
260 }
261 }
262 } else {
263 // If each array has one element
264 for (size_t arrayIdx = 0; arrayIdx < arrayNum; arrayIdx++) {
265 std::fill(newData.begin() + arrayIdx * targetDim,
266 newData.begin() + (arrayIdx + 1) * targetDim, broadcastedData[arrayIdx]);
267 }
268 }
269 // Update current length
270 curLength = newLength;
271 // Update broadcasted data
272 std::copy(newData.begin(), newData.begin() + newLength, broadcastedData);
273 }
274 // Update the number of arrays
275 arrayNum *= targetDim;
276 }
277 return broadcastedData;
278}
279
280// Unidirectional broadcasting shape to targetShape
281template<typename T>
282T* UnidirectionalBroadcast(const T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape) {
283 // Prepend shape with ones
284 if (shape.size() < targetShape.size()) {
285 size_t targetSize = targetShape.size();
286 std::vector<size_t> newShape(targetSize, 1);
287 size_t offset = targetSize - shape.size();
288 std::copy(shape.begin(), shape.end(), newShape.begin() + offset);
289 return BroadcastTensor<T>(data, newShape, targetShape);
290 }
291 return BroadcastTensor<T>(data, shape, targetShape);
292}
293
294/// compute stride of a tensor given its shape (assume layout is row-major)
295std::vector<size_t> ComputeStrideFromShape(const std::vector<size_t> & shape);
296std::vector<Dim> ComputeStrideFromShape(const std::vector<Dim> & shape);
297
298/// function to check if a >> 0 and a < MAX using a single comparison
299//// use trick casting to unsigned values so it becomes a single comparison
300inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
301 return static_cast<unsigned>(a) < static_cast<unsigned>(b);
302}
303
304
305/// im2col : efficient function to re-arrange input data of convolution to a matrix
306/// that can be used by BLAS
307/// Use trick to loop on each element of filtered region first and follow input data layout
308/// By doing this reads and writes are of consecutive data in memory and one gains in efficiency
309/// The resulting matrix will be already transposed and can be used directly in BLAS
310/// since output will be a matrix : (channels*kernel_h*kernel_w , output_h*output_w)
311/// Example: with an input matrix
312/// a1 a2 a3
313/// b1 b2 b3 and a 2x2 kernel (k1,k2,k3,k4) and padding 1 :
314/// c1 c2 c3
315/// outpout will be a matrix (4 x 16)
316/// the routine will follow output order :
317// first all elements which will be operated by k1 then k2 then k3
318/// -> ( 0 0 0 0 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 ) all elements for k1
319/// ( 0 0 0 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 ) for k2
320/// ( 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 0 0 0 ) for k3
321/// ( a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 0 0 0 0 ) for k4
322///
323
324template <typename T>
325void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h,
326 const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w,
327 const int dilation_h, const int dilation_w, T *data_col)
328{
329 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
330 const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
331 const int channel_size = height * width;
332 for (int channel = channels; channel--; data_im += channel_size) {
333 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
334 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
335 int input_row = -pad_h + kernel_row * dilation_h;
336 for (int output_rows = output_h; output_rows; output_rows--) {
337 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
338 for (int output_cols = output_w; output_cols; output_cols--) {
339 *(data_col++) = 0;
340 }
341 } else {
342 int input_col = -pad_w + kernel_col * dilation_w;
343 for (int output_col = output_w; output_col; output_col--) {
344 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
345 *(data_col++) = data_im[input_row * width + input_col];
346 } else {
347 *(data_col++) = 0;
348 }
349 input_col += stride_w;
350 }
351 }
352 input_row += stride_h;
353 }
354 }
355 }
356 }
357}
358
359/// 3d implementation
360template <typename T>
361void Im2col_3d(const T *data_im, const int channels,
362 const int depth, const int height, const int width,
363 const int kernel_d, const int kernel_h, const int kernel_w,
364 const int pad_d, const int pad_h, const int pad_w,
365 const int stride_d, const int stride_h, const int stride_w,
366 const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
367{
368 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
369 const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
370 const int output_d = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1;
371 const int channel_size = height * width * depth;
372 // assume data are c x d x h x w
373 for (int channel = channels; channel--; data_im += channel_size) {
374 for (int kernel_depth = 0; kernel_depth < kernel_d; kernel_depth++) {
375 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
376 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
377 int input_dep = -pad_d + kernel_depth * dilation_d;
378 for (int output_dep = output_d; output_dep; output_dep--) {
379 if (!is_a_ge_zero_and_a_lt_b(input_dep, depth)) {
380 for (int output_rows = output_h; output_rows; output_rows--) {
381 for (int output_cols = output_w; output_cols; output_cols--) {
382 *(data_col++) = 0;
383 }
384 }
385 } else {
386 int input_row = -pad_h + kernel_row * dilation_h;
387 for (int output_rows = output_h; output_rows; output_rows--) {
388 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
389 for (int output_cols = output_w; output_cols; output_cols--) {
390 *(data_col++) = 0;
391 }
392 } else {
393 int input_col = -pad_w + kernel_col * dilation_w;
394 for (int output_col = output_w; output_col; output_col--) {
395 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
396 *(data_col++) = data_im[input_dep * width * height + input_row * width + input_col];
397 } else {
398 *(data_col++) = 0;
399 }
400 input_col += stride_w;
401 }
402 }
403 input_row += stride_h;
404 }
405 }
406 input_dep += stride_d;
407 }
408 }
409 }
410 }
411 }
412}
413
414template <typename Dtype>
415void col2im(const Dtype* data_col, const int channels,
416 const int height, const int width, const int kernel_h, const int kernel_w,
417 const int pad_h, const int pad_w,
418 const int stride_h, const int stride_w,
419 const int dilation_h, const int dilation_w,
420 Dtype* data_im) {
421 // note that output data_im needs to be set to zero value!!!!
422 std::fill(data_im, data_im + height * width * channels, 0.);
423 //caffe_set(height * width * channels, Dtype(0), data_im);
424 // data_im must be a zero vector
425 //const Dtype * data_col_0 = data_col;
426 const int output_h = (height + 2 * pad_h -
427 (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
428 const int output_w = (width + 2 * pad_w -
429 (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
430 const int channel_size = height * width;
431 for (int channel = channels; channel--; data_im += channel_size) {
432 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
433 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
434 int input_row = -pad_h + kernel_row * dilation_h;
435 for (int output_rows = output_h; output_rows; output_rows--) {
436 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
437 data_col += output_w;
438 } else {
439 int input_col = -pad_w + kernel_col * dilation_w;
440 for (int output_col = output_w; output_col; output_col--) {
441 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
442 //assert(input_row*width+input_col < height * width * channels);
443 //assert(data_col - data_col_0 < output_h*output_w*channels);
444 // std::cout << "COL2IM: input_row" << " " << input_row << " " << input_col
445 // << " <---- " << data_col - data_col_0 << " values: "
446 // << data_im[input_row * width + input_col] << " <--- " << *data_col << std::endl;
447 data_im[input_row * width + input_col] += *data_col;
448 }
449 data_col++;
450 input_col += stride_w;
451 }
452 }
453 input_row += stride_h;
454 }
455 }
456 }
457 }
458 //std::cout << "finishing col2imp" << std::endl;
459}
460
461
462
463} // end namespace UTILITY
464
465namespace BLAS{
466extern "C" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,
467 const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,
468 const float * beta, float * C, const int * ldc);
469}//BLAS
470
471
472struct GNN_Data {
473 RTensor<float> node_data; // the node feature data, tensor with shape (num_nodes, num_node_features)
474 RTensor<float> edge_data; // the edge feature data, tensor with shape (num_edges, num_edge_features)
475 RTensor<float> global_data; // the global features, tensor with shape (1, num_global_features)
476 RTensor<int> edge_index; // the edge index (receivers and senders for each edge), tensor with shape (2, num_edges)
477 // edge_index[0,:] are the receivers and edge_index[1,:] are the senders
478
479
480 // need to have default constructor since RTensor has not one
482
483};
484
485template<typename T>
487{
488 // concatenate tensor along axis. Shape must be the same except in the dimension of the concatenated axis
489 if (t1.GetMemoryLayout() != t2.GetMemoryLayout())
490 throw std::runtime_error("TMVA RTensor Concatenate - tensors have different memory layout");
491 auto & shape1 = t1.GetShape();
492 auto & shape2 = t2.GetShape();
493 if (t1.GetSize()/shape1[axis] != t2.GetSize()/shape2[axis]) {
494 std::cout << "axis " << axis << " sizes " << t1.GetSize() << " " << t2.GetSize() << " ";
495 std::cout << "shape 1 : " << ConvertShapeToString(t1.GetShape());
496 std::cout << " shape 2 : " << ConvertShapeToString(t2.GetShape()) << std::endl;
497 throw std::runtime_error("TMVA RTensor Concatenate - tensors have incompatible shapes");
498 }
499 std::vector<size_t> outShape = shape1;
500 outShape[axis] = shape1[axis] + shape2[axis];
501 TMVA::Experimental::RTensor<T> tout(outShape, t1.GetMemoryLayout());
502 if (t1.GetMemoryLayout() == TMVA::Experimental::MemoryLayout::ColumnMajor) {
503 throw std::runtime_error("TMVA RTensor Concatenate is not yet supported for column major tensors");
504 }
505
506 auto & stride1 = t1.GetStrides();
507 auto & stride2 = t2.GetStrides();
508 auto & outStride = tout.GetStrides();
509
510 size_t s1 = (axis > 0) ? stride1[axis-1] : t1.GetSize(); // block size to copy from first tensor
511 size_t s2 = (axis > 0) ? stride2[axis-1] : t2.GetSize(); // block size to copy from second tensor
512 size_t sout = (axis > 0) ? outStride[axis-1] : tout.GetSize();
513 size_t nb = t1.GetSize()/s1;
514 for (size_t i = 0; i < nb; i++) {
515 std::copy(t1.GetData() + i*s1, t1.GetData() + (i+1)*s1, tout.GetData() + i * sout );
516 std::copy(t2.GetData() + i*s2, t2.GetData() + (i+1)*s2, tout.GetData() + i * sout + s1 );
517 }
518
519 return tout;
520}
521
522
523inline GNN_Data Concatenate(GNN_Data & data1, GNN_Data & data2, int axis = 0) {
524 GNN_Data out;
525 out.node_data = Concatenate(data1.node_data,data2.node_data, axis);
526 out.edge_data = Concatenate(data1.edge_data,data2.edge_data, axis);
527 out.global_data = Concatenate<float>(data1.global_data,data2.global_data, axis-1);
528 // assume sender/receivers of data1 and data2 are the same
529 out.edge_index = data1.edge_index.Copy();
530 return out;
531}
532
533inline GNN_Data Copy(const GNN_Data & data) {
534 GNN_Data out;
535 out.node_data = RTensor<float>(data.node_data.GetShape());
536 out.edge_data = RTensor<float>(data.edge_data.GetShape());
537 out.global_data = RTensor<float>(data.global_data.GetShape());
538 out.edge_index = RTensor<int>(data.edge_index.GetShape());
539 std::copy(data.node_data.GetData(), data.node_data.GetData()+ data.node_data.GetSize(), out.node_data.GetData());
540 std::copy(data.edge_data.GetData(), data.edge_data.GetData()+ data.edge_data.GetSize(), out.edge_data.GetData());
541 std::copy(data.global_data.GetData(), data.global_data.GetData()+ data.global_data.GetSize(), out.global_data.GetData());
542 std::copy(data.edge_index.GetData(), data.edge_index.GetData()+ data.edge_index.GetSize(), out.edge_index.GetData());
543 return out;
544}
545
546}//SOFIE
547}//Experimental
548}//TMVA
549
550#endif //TMVA_SOFIE_RMODEL
#define d(i)
Definition RSha256.hxx:102
#define b(i)
Definition RSha256.hxx:100
#define a(i)
Definition RSha256.hxx:99
#define s1(x)
Definition RSha256.hxx:91
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t width
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t height
#define free
Definition civetweb.c:1539
#define malloc
Definition civetweb.c:1536
RTensor is a container with contiguous memory and shape information.
Definition RTensor.hxx:162
MemoryLayout GetMemoryLayout() const
Definition RTensor.hxx:248
RTensor< Value_t, Container_t > Copy(MemoryLayout layout=MemoryLayout::RowMajor) const
Copy RTensor to new object.
Definition RTensor.hxx:563
const Shape_t & GetStrides() const
Definition RTensor.hxx:243
std::size_t GetSize() const
Definition RTensor.hxx:241
const Shape_t & GetShape() const
Definition RTensor.hxx:242
std::shared_ptr< void > const & sharedptr() const
std::shared_ptr< void > fData
! Transient shared data
ETensorType fType
Encodes the type of the data.
std::vector< std::size_t > const & shape() const
char * fPersistentData
[fSize] Persistent version of the data
std::vector< std::size_t > fShape
The shape of the data in terms of elements in each dimension.
InitializedTensor(ETensorType type, std::span< std::size_t > shape, std::shared_ptr< void > data)
int fSize
The size of the persistent data in bytes (not number of elements!)
const Int_t n
Definition legend1.C:16
void sgemm_(const char *transa, const char *transb, const int *m, const int *n, const int *k, const float *alpha, const float *A, const int *lda, const float *B, const int *ldb, const float *beta, float *C, const int *ldc)
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
void Im2col_3d(const T *data_im, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
3d implementation
T * BroadcastConvBias(const T *data, const size_t channel, const std::vector< size_t > &targetShape)
void col2im(const Dtype *data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype *data_im)
T * BroadcastTensor(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t >, std::vector< size_t >)
std::string Clean_name(std::string input_tensor_name)
bool is_a_ge_zero_and_a_lt_b(int a, int b)
function to check if a >> 0 and a < MAX using a single comparison / use trick casting to unsigned val...
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
T * UnidirectionalBroadcast(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, T *data_col)
im2col : efficient function to re-arrange input data of convolution to a matrix that can be used by B...
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
ETensorType GetTemplatedType(T)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
ETensorType ConvertStringToType(std::string type)
TMVA::Experimental::RTensor< T > Concatenate(TMVA::Experimental::RTensor< T > &t1, TMVA::Experimental::RTensor< T > &t2, int axis=0)
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
GNN_Data Copy(const GNN_Data &data)
create variable transformations
Dim(const std::string &p, size_t d=0)
TMarker m
Definition textangle.C:8
auto * t1
Definition textangle.C:20