Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
SOFIE_common.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_SOFIE_COMMON
2#define TMVA_SOFIE_SOFIE_COMMON
3
4#include "TMVA/RTensor.hxx"
5
6#include <stdexcept>
7#include <type_traits>
8#include <cstdint>
9#include <cstring>
10#include <string>
11#include <vector>
12#include <memory>
13#include <regex>
14#include <sstream>
15#include <iostream>
16
17namespace TMVA{
18namespace Experimental{
19namespace SOFIE{
20
21//typedef RTensor tensor_t;
22
23enum class ETensorType{
24 UNDEFINED = 0, FLOAT = 1, UNINT8 = 2, INT8 = 3, UINT16 = 4, INT16 = 5, INT32 = 6, INT64 = 7, STRING = 8, BOOL = 9, //order sensitive
25 FLOAT16 = 10, DOUBLE = 11, UINT32 = 12, UINT64 = 13, COMPLEX64 = 14, COMPLEX28 = 15, BFLOAT16 = 16
26};
27
28typedef std::int64_t int_t;
29
32
33struct Dim{
34 bool isParam = false;
35 size_t dim = 0;
36 std::string param;
37
38 // default constructor (for I/O)
39 Dim() {}
40
41 // constructor for a parametric dimension with the option to pass a default dim value
42 Dim(const std::string & p, size_t d = 0) : isParam(true), dim(d), param(p) {}
43
44 // constructor for a non-parametric dimension
45 Dim(size_t d) : dim(d) {}
46
47 std::string GetVal() const {
48 return (isParam) ? param : std::to_string(dim);
49 }
50};
51
52
55 std::vector<Dim> shape;
56};
57
60 std::vector<size_t> shape;
61};
62
65 std::vector<Dim> shape;
66};
67
68std::vector<Dim> ConvertShapeToDim(std::vector<size_t> shape);
69
70std::vector<size_t> ConvertShapeToInt(std::vector<Dim> shape);
71
72std::size_t ConvertShapeToLength(std::vector<size_t> shape);
73
74std::string ConvertShapeToString(std::vector<size_t> shape);
75std::string ConvertDynamicShapeToString(std::vector<Dim> shape);
76// std::string ConvertShapeToString(std::vector<Dim> shape) {
77// return ConvertDynamicShapeToString(shape);
78// }
79
80std::string ConvertDynamicShapeToLength(std::vector<Dim> shape);
81
84 std::vector<std::size_t> fShape;
85 std::shared_ptr<void> fData; //! Transient
86 int fSize=1;
87 char* fPersistentData=nullptr; //[fSize] Persistent
88
90 for(auto item:fShape){
91 fSize*=(int)item;
92 }
93 switch(fType){
94 case ETensorType::FLOAT: fSize*=sizeof(float); break;
95 case ETensorType::DOUBLE: fSize*=sizeof(double); break;
96 case ETensorType::INT32: fSize*=sizeof(int32_t); break;
97 case ETensorType::INT64: fSize*=sizeof(int64_t); break;
98 case ETensorType::BOOL: fSize*=sizeof(bool); break;
99 default:
100 throw std::runtime_error("TMVA::SOFIE doesn't yet supports serialising data-type " + ConvertTypeToString(fType));
101 }
102 fPersistentData=(char*)fData.get();
103 }
105 switch (fType) {
106 case ETensorType::FLOAT: {
107 std::shared_ptr<void> tData(malloc(fSize * sizeof(float)), free);
108 std::memcpy(tData.get(), fPersistentData, fSize * sizeof(float));
109 fData = tData;
110 break;
111 }
112 case ETensorType::DOUBLE: {
113 std::shared_ptr<void> tData(malloc(fSize * sizeof(double)), free);
114 std::memcpy(tData.get(), fPersistentData, fSize * sizeof(double));
115 fData = tData;
116 break;
117 }
118 case ETensorType::INT32: {
119 std::shared_ptr<void> tData(malloc(fSize * sizeof(int32_t)), free);
120 std::memcpy(tData.get(), fPersistentData, fSize * sizeof(int32_t));
121 fData = tData;
122 break;
123 }
124 case ETensorType::INT64: {
125 std::shared_ptr<void> tData(malloc(fSize * sizeof(int64_t)), free);
126 std::memcpy(tData.get(), fPersistentData, fSize * sizeof(int64_t));
127 fData = tData;
128 break;
129 }
130 case ETensorType::BOOL: {
131 std::shared_ptr<void> tData(malloc(fSize * sizeof(bool)), free);
132 std::memcpy(tData.get(), fPersistentData, fSize * sizeof(bool));
133 fData = tData;
134 break;
135 }
136 default: {
137 throw std::runtime_error("TMVA::SOFIE doesn't yet supports serialising data-type " +
139 }
140 }
141 }
142};
143
144template <typename T>
146 if (std::is_same<T, float>::value) return ETensorType::FLOAT;
147 if (std::is_same<T, uint8_t>::value) return ETensorType::UNINT8;
148 if (std::is_same<T, int8_t>::value) return ETensorType::INT8;
149 if (std::is_same<T, uint16_t>::value) return ETensorType::UINT16;
150 if (std::is_same<T, int16_t>::value) return ETensorType::INT16;
151 if (std::is_same<T, int32_t>::value) return ETensorType::INT32;
152 if (std::is_same<T, int64_t>::value) return ETensorType::INT64;
153 if (std::is_same<T, std::string>::value) return ETensorType::STRING;
154 if (std::is_same<T, bool>::value) return ETensorType::BOOL;
155 //float16 unimplemented
156 if (std::is_same<T, double>::value) return ETensorType::DOUBLE;
157 if (std::is_same<T, uint32_t>::value) return ETensorType::UINT32;
158 if (std::is_same<T, uint64_t>::value) return ETensorType::UINT64;
159 //complex 64, 28, bfloat 16 unimplemented
160}
161
162namespace UTILITY{
163// Check if two shapes are equal
164bool AreSameShape(const std::vector<size_t>&, const std::vector<size_t>&);
165bool AreSameShape(const std::vector<size_t>&, const std::vector<Dim>&);
166bool AreSameShape(const std::vector<Dim>&, const std::vector<Dim>&);
167
168
169// Multidirectional broadcast a list of tensors to the same shape
170std::vector<size_t> MultidirectionalBroadcastShape(std::vector<std::vector<size_t>>);
171
172// Unidirectional broadcast two shapes to the same shape
173std::vector<size_t> UnidirectionalBroadcastShape(std::vector<size_t>, std::vector<size_t>);
174
175std::string Clean_name(std::string input_tensor_name);
176
177template<typename T>
178T* BroadcastConvBias(const T* data, const size_t channel, const std::vector<size_t>& targetShape) {
179 size_t size = targetShape.size();
180 if (targetShape[1] != channel) {
181 std::stringstream ss;
182 ss << "TMVA::SOFIE - Error broadcasting Conv Bias of shape {";
183 ss << std::to_string(channel);
184 ss << "} to ";
185 ss << ConvertShapeToString(targetShape);
186 throw
187 std::runtime_error(ss.str());
188 }
189
190 size_t targetLength = ConvertShapeToLength(targetShape);
191 T* newData = new T[targetLength];
192
193 if (targetLength == channel) {
194 std::copy(data, data + channel, newData);
195 return newData;
196 }
197
198 // cStride = OutDepth * outHeight * outWidth
199 size_t cStride = 1;
200 for (size_t i = 2; i < size; i++)
201 cStride *= targetShape[i];
202 // Broadcast each element of the bias to a vector of size cStride and concatenate them
203 // into a vector of size channel * cStride
204 for (size_t i = 0; i < channel; i++) {
205 std::fill(newData + i * cStride, newData + (i + 1) * cStride, data[i]);
206 }
207 // Broadcast newData[0...channel * cStride) to newData[0...batch * channel * cStride)
208 size_t batch = targetShape[0];
209 size_t bStride = channel * cStride;
210 for (size_t i = 1; i < batch; i++) {
211 std::copy(newData, newData + bStride, newData + i * bStride);
212 }
213 return newData;
214}
215
216// Broadcast a tensor from shape to targetShape according to numpy broadcasting rules
217// See more at https://numpy.org/doc/stable/user/basics.broadcasting.html
218// and https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md .
219template<typename T>
220T* BroadcastTensor(const T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape) {
221 // Size of the shapes
222 size_t size = shape.size();
223 // Current length of the broadcasted tensor
224 size_t curLength = ConvertShapeToLength(shape);
225 size_t targetLength = ConvertShapeToLength(targetShape);
226 // newShape is an aray of size equal to dimension along which we are broadcasting the tensor
227 T* broadcastedData = new T[targetLength];
228 std::copy(data, data + curLength, broadcastedData);
229 // Product of the previous dimensions of targetShape
230 size_t arrayNum = 1;
231 // New broadcasted data
232 std::vector<T> newData(targetLength);
233
234 for (size_t idx = 0; idx < size; idx++) {
235 size_t dim = shape[idx];
236 size_t targetDim = targetShape[idx];
237 if (dim == 1 && targetDim > 1) {
238 // Set the new length of the data
239 size_t newLength = curLength * targetDim;
240 // View the data as a list of arrayNum arrays of size arrayLength
241 size_t arrayLength = curLength / arrayNum;
242 // Broadcast each array dim times
243 if (arrayLength > 1) {
244 // If each array has at least two elements
245 for (size_t arrayIdx = 0; arrayIdx < arrayNum; arrayIdx++) {
246 for (size_t targetIdx = 0; targetIdx < targetDim; targetIdx++) {
247 size_t offset = arrayIdx * arrayLength * targetDim + targetIdx * arrayLength;
248 std::copy(broadcastedData + arrayIdx * arrayLength,
249 broadcastedData + (arrayIdx + 1) * arrayLength,
250 newData.begin() + offset);
251 }
252 }
253 } else {
254 // If each array has one element
255 for (size_t arrayIdx = 0; arrayIdx < arrayNum; arrayIdx++) {
256 std::fill(newData.begin() + arrayIdx * targetDim,
257 newData.begin() + (arrayIdx + 1) * targetDim, broadcastedData[arrayIdx]);
258 }
259 }
260 // Update current length
261 curLength = newLength;
262 // Update broadcasted data
263 std::copy(newData.begin(), newData.begin() + newLength, broadcastedData);
264 }
265 // Update the number of arrays
266 arrayNum *= targetDim;
267 }
268 return broadcastedData;
269}
270
271// Unidirectional broadcasting shape to targetShape
272template<typename T>
273T* UnidirectionalBroadcast(const T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape) {
274 // Prepend shape with ones
275 if (shape.size() < targetShape.size()) {
276 size_t targetSize = targetShape.size();
277 std::vector<size_t> newShape(targetSize, 1);
278 size_t offset = targetSize - shape.size();
279 std::copy(shape.begin(), shape.end(), newShape.begin() + offset);
280 return BroadcastTensor<T>(data, newShape, targetShape);
281 }
282 return BroadcastTensor<T>(data, shape, targetShape);
283}
284
285/// compute stride of a tensor given its shape (assume layout is row-major)
286std::vector<size_t> ComputeStrideFromShape(const std::vector<size_t> & shape);
287std::vector<Dim> ComputeStrideFromShape(const std::vector<Dim> & shape);
288
289/// function to check if a >> 0 and a < MAX using a single comparison
290//// use trick casting to unsigned values so it becomes a single comparison
291inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
292 return static_cast<unsigned>(a) < static_cast<unsigned>(b);
293}
294
295
296/// im2col : efficient function to re-arrange input data of convolution to a matrix
297/// that can be used by BLAS
298/// Use trick to loop on each element of filtered region first and follow input data layout
299/// By doing this reads and writes are of consecutive data in memory and one gains in efficiency
300/// The resulting matrix will be already transposed and can be used directly in BLAS
301/// since output will be a matrix : (channels*kernel_h*kernel_w , output_h*output_w)
302/// Example: with an input matrix
303/// a1 a2 a3
304/// b1 b2 b3 and a 2x2 kernel (k1,k2,k3,k4) and padding 1 :
305/// c1 c2 c3
306/// outpout will be a matrix (4 x 16)
307/// the routine will follow output order :
308// first all elements which will be operated by k1 then k2 then k3
309/// -> ( 0 0 0 0 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 ) all elements for k1
310/// ( 0 0 0 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 ) for k2
311/// ( 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 0 0 0 ) for k3
312/// ( a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 0 0 0 0 ) for k4
313///
314
315template <typename T>
316void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h,
317 const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w,
318 const int dilation_h, const int dilation_w, T *data_col)
319{
320 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
321 const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
322 const int channel_size = height * width;
323 for (int channel = channels; channel--; data_im += channel_size) {
324 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
325 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
326 int input_row = -pad_h + kernel_row * dilation_h;
327 for (int output_rows = output_h; output_rows; output_rows--) {
328 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
329 for (int output_cols = output_w; output_cols; output_cols--) {
330 *(data_col++) = 0;
331 }
332 } else {
333 int input_col = -pad_w + kernel_col * dilation_w;
334 for (int output_col = output_w; output_col; output_col--) {
335 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
336 *(data_col++) = data_im[input_row * width + input_col];
337 } else {
338 *(data_col++) = 0;
339 }
340 input_col += stride_w;
341 }
342 }
343 input_row += stride_h;
344 }
345 }
346 }
347 }
348}
349
350/// 3d implementation
351template <typename T>
352void Im2col_3d(const T *data_im, const int channels,
353 const int depth, const int height, const int width,
354 const int kernel_d, const int kernel_h, const int kernel_w,
355 const int pad_d, const int pad_h, const int pad_w,
356 const int stride_d, const int stride_h, const int stride_w,
357 const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
358{
359 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
360 const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
361 const int output_d = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1;
362 const int channel_size = height * width * depth;
363 // assume data are c x d x h x w
364 for (int channel = channels; channel--; data_im += channel_size) {
365 for (int kernel_depth = 0; kernel_depth < kernel_d; kernel_depth++) {
366 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
367 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
368 int input_dep = -pad_d + kernel_depth * dilation_d;
369 for (int output_dep = output_d; output_dep; output_dep--) {
370 if (!is_a_ge_zero_and_a_lt_b(input_dep, depth)) {
371 for (int output_rows = output_h; output_rows; output_rows--) {
372 for (int output_cols = output_w; output_cols; output_cols--) {
373 *(data_col++) = 0;
374 }
375 }
376 } else {
377 int input_row = -pad_h + kernel_row * dilation_h;
378 for (int output_rows = output_h; output_rows; output_rows--) {
379 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
380 for (int output_cols = output_w; output_cols; output_cols--) {
381 *(data_col++) = 0;
382 }
383 } else {
384 int input_col = -pad_w + kernel_col * dilation_w;
385 for (int output_col = output_w; output_col; output_col--) {
386 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
387 *(data_col++) = data_im[input_dep * width * height + input_row * width + input_col];
388 } else {
389 *(data_col++) = 0;
390 }
391 input_col += stride_w;
392 }
393 }
394 input_row += stride_h;
395 }
396 }
397 input_dep += stride_d;
398 }
399 }
400 }
401 }
402 }
403}
404
405template <typename Dtype>
406void col2im(const Dtype* data_col, const int channels,
407 const int height, const int width, const int kernel_h, const int kernel_w,
408 const int pad_h, const int pad_w,
409 const int stride_h, const int stride_w,
410 const int dilation_h, const int dilation_w,
411 Dtype* data_im) {
412 // note that output data_im needs to be set to zero value!!!!
413 std::fill(data_im, data_im + height * width * channels, 0.);
414 //caffe_set(height * width * channels, Dtype(0), data_im);
415 // data_im must be a zero vector
416 //const Dtype * data_col_0 = data_col;
417 const int output_h = (height + 2 * pad_h -
418 (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
419 const int output_w = (width + 2 * pad_w -
420 (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
421 const int channel_size = height * width;
422 for (int channel = channels; channel--; data_im += channel_size) {
423 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
424 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
425 int input_row = -pad_h + kernel_row * dilation_h;
426 for (int output_rows = output_h; output_rows; output_rows--) {
427 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
428 data_col += output_w;
429 } else {
430 int input_col = -pad_w + kernel_col * dilation_w;
431 for (int output_col = output_w; output_col; output_col--) {
432 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
433 //assert(input_row*width+input_col < height * width * channels);
434 //assert(data_col - data_col_0 < output_h*output_w*channels);
435 // std::cout << "COL2IM: input_row" << " " << input_row << " " << input_col
436 // << " <---- " << data_col - data_col_0 << " values: "
437 // << data_im[input_row * width + input_col] << " <--- " << *data_col << std::endl;
438 data_im[input_row * width + input_col] += *data_col;
439 }
440 data_col++;
441 input_col += stride_w;
442 }
443 }
444 input_row += stride_h;
445 }
446 }
447 }
448 }
449 //std::cout << "finishing col2imp" << std::endl;
450}
451
452
453
454} // end namespace UTILITY
455
456namespace BLAS{
457extern "C" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,
458 const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,
459 const float * beta, float * C, const int * ldc);
460}//BLAS
461
462
463struct GNN_Data {
464 RTensor<float> node_data; // the node feature data, tensor with shape (num_nodes, num_node_features)
465 RTensor<float> edge_data; // the edge feature data, tensor with shape (num_edges, num_edge_features)
466 RTensor<float> global_data; // the global features, tensor with shape (1, num_global_features)
467 RTensor<int> edge_index; // the edge index (receivers and senders for each edge), tensor with shape (2, num_edges)
468 // edge_index[0,:] are the receivers and edge_index[1,:] are the senders
469
470
471 // need to have default constructor since RTensor has not one
473
474};
475
476template<typename T>
478{
479 // concatenate tensor along axis. Shape must be the same except in the dimension of the concatenated axis
480 if (t1.GetMemoryLayout() != t2.GetMemoryLayout())
481 throw std::runtime_error("TMVA RTensor Concatenate - tensors have different memory layout");
482 auto & shape1 = t1.GetShape();
483 auto & shape2 = t2.GetShape();
484 if (t1.GetSize()/shape1[axis] != t2.GetSize()/shape2[axis]) {
485 std::cout << "axis " << axis << " sizes " << t1.GetSize() << " " << t2.GetSize() << " ";
486 std::cout << "shape 1 : " << ConvertShapeToString(t1.GetShape());
487 std::cout << " shape 2 : " << ConvertShapeToString(t2.GetShape()) << std::endl;
488 throw std::runtime_error("TMVA RTensor Concatenate - tensors have incompatible shapes");
489 }
490 std::vector<size_t> outShape = shape1;
491 outShape[axis] = shape1[axis] + shape2[axis];
492 TMVA::Experimental::RTensor<T> tout(outShape, t1.GetMemoryLayout());
493 if (t1.GetMemoryLayout() == TMVA::Experimental::MemoryLayout::ColumnMajor) {
494 throw std::runtime_error("TMVA RTensor Concatenate is not yet supported for column major tensors");
495 }
496
497 auto & stride1 = t1.GetStrides();
498 auto & stride2 = t2.GetStrides();
499 auto & outStride = tout.GetStrides();
500
501 size_t s1 = (axis > 0) ? stride1[axis-1] : t1.GetSize(); // block size to copy from first tensor
502 size_t s2 = (axis > 0) ? stride2[axis-1] : t2.GetSize(); // block size to copy from second tensor
503 size_t sout = (axis > 0) ? outStride[axis-1] : tout.GetSize();
504 size_t nb = t1.GetSize()/s1;
505 for (size_t i = 0; i < nb; i++) {
506 std::copy(t1.GetData() + i*s1, t1.GetData() + (i+1)*s1, tout.GetData() + i * sout );
507 std::copy(t2.GetData() + i*s2, t2.GetData() + (i+1)*s2, tout.GetData() + i * sout + s1 );
508 }
509
510 return tout;
511}
512
513
514inline GNN_Data Concatenate(GNN_Data & data1, GNN_Data & data2, int axis = 0) {
515 GNN_Data out;
516 out.node_data = Concatenate(data1.node_data,data2.node_data, axis);
517 out.edge_data = Concatenate(data1.edge_data,data2.edge_data, axis);
518 out.global_data = Concatenate<float>(data1.global_data,data2.global_data, axis-1);
519 // assume sender/receivers of data1 and data2 are the same
520 out.edge_index = data1.edge_index.Copy();
521 return out;
522}
523
524inline GNN_Data Copy(const GNN_Data & data) {
525 GNN_Data out;
526 out.node_data = RTensor<float>(data.node_data.GetShape());
527 out.edge_data = RTensor<float>(data.edge_data.GetShape());
528 out.global_data = RTensor<float>(data.global_data.GetShape());
529 out.edge_index = RTensor<int>(data.edge_index.GetShape());
530 std::copy(data.node_data.GetData(), data.node_data.GetData()+ data.node_data.GetSize(), out.node_data.GetData());
531 std::copy(data.edge_data.GetData(), data.edge_data.GetData()+ data.edge_data.GetSize(), out.edge_data.GetData());
532 std::copy(data.global_data.GetData(), data.global_data.GetData()+ data.global_data.GetSize(), out.global_data.GetData());
533 std::copy(data.edge_index.GetData(), data.edge_index.GetData()+ data.edge_index.GetSize(), out.edge_index.GetData());
534 return out;
535}
536
537}//SOFIE
538}//Experimental
539}//TMVA
540
541#endif //TMVA_SOFIE_RMODEL
#define d(i)
Definition RSha256.hxx:102
#define b(i)
Definition RSha256.hxx:100
#define a(i)
Definition RSha256.hxx:99
#define s1(x)
Definition RSha256.hxx:91
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t width
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t height
#define free
Definition civetweb.c:1539
#define malloc
Definition civetweb.c:1536
RTensor is a container with contiguous memory and shape information.
Definition RTensor.hxx:162
MemoryLayout GetMemoryLayout() const
Definition RTensor.hxx:248
RTensor< Value_t, Container_t > Copy(MemoryLayout layout=MemoryLayout::RowMajor) const
Copy RTensor to new object.
Definition RTensor.hxx:563
const Shape_t & GetStrides() const
Definition RTensor.hxx:243
std::size_t GetSize() const
Definition RTensor.hxx:241
const Shape_t & GetShape() const
Definition RTensor.hxx:242
const Int_t n
Definition legend1.C:16
void sgemm_(const char *transa, const char *transb, const int *m, const int *n, const int *k, const float *alpha, const float *A, const int *lda, const float *B, const int *ldb, const float *beta, float *C, const int *ldc)
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
void Im2col_3d(const T *data_im, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
3d implementation
T * BroadcastConvBias(const T *data, const size_t channel, const std::vector< size_t > &targetShape)
void col2im(const Dtype *data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype *data_im)
T * BroadcastTensor(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t >, std::vector< size_t >)
std::string Clean_name(std::string input_tensor_name)
bool is_a_ge_zero_and_a_lt_b(int a, int b)
function to check if a >> 0 and a < MAX using a single comparison / use trick casting to unsigned val...
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
T * UnidirectionalBroadcast(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, T *data_col)
im2col : efficient function to re-arrange input data of convolution to a matrix that can be used by B...
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
ETensorType GetTemplatedType(T)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
ETensorType ConvertStringToType(std::string type)
TMVA::Experimental::RTensor< T > Concatenate(TMVA::Experimental::RTensor< T > &t1, TMVA::Experimental::RTensor< T > &t2, int axis=0)
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
GNN_Data Copy(const GNN_Data &data)
create variable transformations
Dim(const std::string &p, size_t d=0)
TMarker m
Definition textangle.C:8
auto * t1
Definition textangle.C:20