Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
SOFIE_common.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_SOFIE_COMMON
2#define TMVA_SOFIE_SOFIE_COMMON
3
4#include "TMVA/RTensor.hxx"
5
6#include <stdexcept>
7#include <type_traits>
8#include <cstdint>
9#include <cstring>
10#include <string>
11#include <vector>
12#include <memory>
13#include <regex>
14#include <sstream>
15#include <iostream>
16
17namespace TMVA{
18namespace Experimental{
19namespace SOFIE{
20
21//typedef RTensor tensor_t;
22
23enum class ETensorType{
24 UNDEFINED = 0, FLOAT = 1, UNINT8 = 2, INT8 = 3, UINT16 = 4, INT16 = 5, INT32 = 6, INT64 = 7, STRING = 8, BOOL = 9, //order sensitive
25 FLOAT16 = 10, DOUBLE = 11, UINT32 = 12, UINT64 = 13, COMPLEX64 = 14, COMPLEX28 = 15, BFLOAT16 = 16
26};
27
28typedef std::int64_t int_t;
29
32
33struct Dim{
34 bool isParam = false;
35 size_t dim;
36 std::string param;
37};
38
39std::vector<Dim> ConvertShapeToDim(std::vector<size_t> shape);
40
41
44 std::vector<Dim> shape;
45};
46
49 std::vector<size_t> shape;
50};
51
52std::size_t ConvertShapeToLength(std::vector<size_t> shape);
53
54std::string ConvertShapeToString(std::vector<size_t> shape);
55
58 std::vector<std::size_t> fShape;
59 std::shared_ptr<void> fData; //! Transient
60 int fSize=1;
61 char* fPersistentData=nullptr; //[fSize] Persistent
62
64 for(auto item:fShape){
65 fSize*=(int)item;
66 }
67 switch(fType){
68 case ETensorType::FLOAT: fSize*=sizeof(float); break;
69 case ETensorType::DOUBLE: fSize*=sizeof(double); break;
70 case ETensorType::INT32: fSize*=sizeof(int32_t); break;
71 case ETensorType::INT64: fSize*=sizeof(int64_t); break;
72 default:
73 throw std::runtime_error("TMVA::SOFIE doesn't yet supports serialising data-type " + ConvertTypeToString(fType));
74 }
75 fPersistentData=(char*)fData.get();
76 }
78 switch (fType) {
79 case ETensorType::FLOAT: {
80 std::shared_ptr<void> tData(malloc(fSize * sizeof(float)), free);
81 std::memcpy(tData.get(), fPersistentData, fSize * sizeof(float));
82 fData = tData;
83 break;
84 }
86 std::shared_ptr<void> tData(malloc(fSize * sizeof(double)), free);
87 std::memcpy(tData.get(), fPersistentData, fSize * sizeof(double));
88 fData = tData;
89 break;
90 }
91 case ETensorType::INT32: {
92 std::shared_ptr<void> tData(malloc(fSize * sizeof(int32_t)), free);
93 std::memcpy(tData.get(), fPersistentData, fSize * sizeof(int32_t));
94 fData = tData;
95 break;
96 }
97 case ETensorType::INT64: {
98 std::shared_ptr<void> tData(malloc(fSize * sizeof(int64_t)), free);
99 std::memcpy(tData.get(), fPersistentData, fSize * sizeof(int64_t));
100 fData = tData;
101 break;
102 }
103 default: {
104 throw std::runtime_error("TMVA::SOFIE doesn't yet supports serialising data-type " +
106 }
107 }
108 }
109};
110
111template <typename T>
113 if (std::is_same<T, float>::value) return ETensorType::FLOAT;
114 if (std::is_same<T, uint8_t>::value) return ETensorType::UNINT8;
115 if (std::is_same<T, int8_t>::value) return ETensorType::INT8;
116 if (std::is_same<T, uint16_t>::value) return ETensorType::UINT16;
117 if (std::is_same<T, int16_t>::value) return ETensorType::INT16;
118 if (std::is_same<T, int32_t>::value) return ETensorType::INT32;
119 if (std::is_same<T, int64_t>::value) return ETensorType::INT64;
120 if (std::is_same<T, std::string>::value) return ETensorType::STRING;
121 if (std::is_same<T, bool>::value) return ETensorType::BOOL;
122 //float16 unimplemented
123 if (std::is_same<T, double>::value) return ETensorType::DOUBLE;
124 if (std::is_same<T, uint32_t>::value) return ETensorType::UINT32;
125 if (std::is_same<T, uint64_t>::value) return ETensorType::UINT64;
126 //complex 64, 28, bfloat 16 unimplemented
127}
128
129namespace UTILITY{
130// Check if two shapes are equal
131bool AreSameShape(const std::vector<size_t>&, const std::vector<size_t>&);
132
133// Multidirectional broadcast a list of tensors to the same shape
134std::vector<size_t> MultidirectionalBroadcastShape(std::vector<std::vector<size_t>>);
135
136// Unidirectional broadcast two shapes to the same shape
137std::vector<size_t> UnidirectionalBroadcastShape(std::vector<size_t>, std::vector<size_t>);
138
139std::string Clean_name(std::string input_tensor_name);
140
141template<typename T>
142T* BroadcastConvBias(const T* data, const size_t channel, const std::vector<size_t>& targetShape) {
143 size_t size = targetShape.size();
144 if (targetShape[1] != channel) {
145 std::stringstream ss;
146 ss << "TMVA::SOFIE - Error broadcasting Conv Bias of shape {";
147 ss << std::to_string(channel);
148 ss << "} to ";
149 ss << ConvertShapeToString(targetShape);
150 throw
151 std::runtime_error(ss.str());
152 }
153
154 size_t targetLength = ConvertShapeToLength(targetShape);
155 T* newData = new T[targetLength];
156
157 if (targetLength == channel) {
158 std::copy(data, data + channel, newData);
159 return newData;
160 }
161
162 // cStride = OutDepth * outHeight * outWidth
163 size_t cStride = 1;
164 for (size_t i = 2; i < size; i++)
165 cStride *= targetShape[i];
166 // Broadcast each element of the bias to a vector of size cStride and concatenate them
167 // into a vector of size channel * cStride
168 for (size_t i = 0; i < channel; i++) {
169 std::fill(newData + i * cStride, newData + (i + 1) * cStride, data[i]);
170 }
171 // Broadcast newData[0...channel * cStride) to newData[0...batch * channel * cStride)
172 size_t batch = targetShape[0];
173 size_t bStride = channel * cStride;
174 for (size_t i = 1; i < batch; i++) {
175 std::copy(newData, newData + bStride, newData + i * bStride);
176 }
177 return newData;
178}
179
180// Broadcast a tensor from shape to targetShape according to numpy broadcasting rules
181// See more at https://numpy.org/doc/stable/user/basics.broadcasting.html
182// and https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md .
183template<typename T>
184T* BroadcastTensor(const T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape) {
185 // Size of the shapes
186 size_t size = shape.size();
187 // Current length of the broadcasted tensor
188 size_t curLength = ConvertShapeToLength(shape);
189 size_t targetLength = ConvertShapeToLength(targetShape);
190 // newShape is an aray of size equal to dimension along which we are broadcasting the tensor
191 T* broadcastedData = new T[targetLength];
192 std::copy(data, data + curLength, broadcastedData);
193 // Product of the previous dimensions of targetShape
194 size_t arrayNum = 1;
195 // New broadcasted data
196 std::vector<T> newData(targetLength);
197
198 for (size_t idx = 0; idx < size; idx++) {
199 size_t dim = shape[idx];
200 size_t targetDim = targetShape[idx];
201 if (dim == 1 && targetDim > 1) {
202 // Set the new length of the data
203 size_t newLength = curLength * targetDim;
204 // View the data as a list of arrayNum arrays of size arrayLength
205 size_t arrayLength = curLength / arrayNum;
206 // Broadcast each array dim times
207 if (arrayLength > 1) {
208 // If each array has at least two elements
209 for (size_t arrayIdx = 0; arrayIdx < arrayNum; arrayIdx++) {
210 for (size_t targetIdx = 0; targetIdx < targetDim; targetIdx++) {
211 size_t offset = arrayIdx * arrayLength * targetDim + targetIdx * arrayLength;
212 std::copy(broadcastedData + arrayIdx * arrayLength,
213 broadcastedData + (arrayIdx + 1) * arrayLength,
214 newData.begin() + offset);
215 }
216 }
217 } else {
218 // If each array has one element
219 for (size_t arrayIdx = 0; arrayIdx < arrayNum; arrayIdx++) {
220 std::fill(newData.begin() + arrayIdx * targetDim,
221 newData.begin() + (arrayIdx + 1) * targetDim, broadcastedData[arrayIdx]);
222 }
223 }
224 // Update current length
225 curLength = newLength;
226 // Update broadcasted data
227 std::copy(newData.begin(), newData.begin() + newLength, broadcastedData);
228 }
229 // Update the number of arrays
230 arrayNum *= targetDim;
231 }
232 return broadcastedData;
233}
234
235// Unidirectional broadcasting shape to targetShape
236template<typename T>
237T* UnidirectionalBroadcast(const T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape) {
238 // Prepend shape with ones
239 if (shape.size() < targetShape.size()) {
240 size_t targetSize = targetShape.size();
241 std::vector<size_t> newShape(targetSize, 1);
242 size_t offset = targetSize - shape.size();
243 std::copy(shape.begin(), shape.end(), newShape.begin() + offset);
244 return BroadcastTensor<T>(data, newShape, targetShape);
245 }
246 return BroadcastTensor<T>(data, shape, targetShape);
247}
248
249/// compute stride of a tensor given its shape (assume layout is row-major)
250std::vector<size_t> ComputeStrideFromShape(const std::vector<size_t> & shape);
251
252/// function to check if a >> 0 and a < MAX using a single comparison
253//// use trick casting to unsigned values so it becomes a single comparison
254inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
255 return static_cast<unsigned>(a) < static_cast<unsigned>(b);
256}
257
258
259/// im2col : efficient function to re-arrange input data of convolution to a matrix
260/// that can be used by BLAS
261/// Use trick to loop on each element of filtered region first and follow input data layout
262/// By doing this reads and writes are of consecutive data in memory and one gains in efficiency
263/// The resulting matrix will be already transposed and can be used directly in BLAS
264/// since output will be a matrix : (channels*kernel_h*kernel_w , output_h*output_w)
265/// Example: with an input matrix
266/// a1 a2 a3
267/// b1 b2 b3 and a 2x2 kernel (k1,k2,k3,k4) and padding 1 :
268/// c1 c2 c3
269/// outpout will be a matrix (4 x 16)
270/// the routine will follow output order :
271// first all elements which will be operated by k1 then k2 then k3
272/// -> ( 0 0 0 0 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 ) all elements for k1
273/// ( 0 0 0 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 ) for k2
274/// ( 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 0 0 0 ) for k3
275/// ( a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 0 0 0 0 ) for k4
276///
277
278template <typename T>
279void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h,
280 const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w,
281 const int dilation_h, const int dilation_w, T *data_col)
282{
283 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
284 const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
285 const int channel_size = height * width;
286 for (int channel = channels; channel--; data_im += channel_size) {
287 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
288 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
289 int input_row = -pad_h + kernel_row * dilation_h;
290 for (int output_rows = output_h; output_rows; output_rows--) {
291 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
292 for (int output_cols = output_w; output_cols; output_cols--) {
293 *(data_col++) = 0;
294 }
295 } else {
296 int input_col = -pad_w + kernel_col * dilation_w;
297 for (int output_col = output_w; output_col; output_col--) {
298 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
299 *(data_col++) = data_im[input_row * width + input_col];
300 } else {
301 *(data_col++) = 0;
302 }
303 input_col += stride_w;
304 }
305 }
306 input_row += stride_h;
307 }
308 }
309 }
310 }
311}
312
313/// 3d implementation
314template <typename T>
315void Im2col_3d(const T *data_im, const int channels,
316 const int depth, const int height, const int width,
317 const int kernel_d, const int kernel_h, const int kernel_w,
318 const int pad_d, const int pad_h, const int pad_w,
319 const int stride_d, const int stride_h, const int stride_w,
320 const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
321{
322 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
323 const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
324 const int output_d = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1;
325 const int channel_size = height * width * depth;
326 // assume data are c x d x h x w
327 for (int channel = channels; channel--; data_im += channel_size) {
328 for (int kernel_depth = 0; kernel_depth < kernel_d; kernel_depth++) {
329 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
330 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
331 int input_dep = -pad_d + kernel_depth * dilation_d;
332 for (int output_dep = output_d; output_dep; output_dep--) {
333 if (!is_a_ge_zero_and_a_lt_b(input_dep, depth)) {
334 for (int output_rows = output_h; output_rows; output_rows--) {
335 for (int output_cols = output_w; output_cols; output_cols--) {
336 *(data_col++) = 0;
337 }
338 }
339 } else {
340 int input_row = -pad_h + kernel_row * dilation_h;
341 for (int output_rows = output_h; output_rows; output_rows--) {
342 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
343 for (int output_cols = output_w; output_cols; output_cols--) {
344 *(data_col++) = 0;
345 }
346 } else {
347 int input_col = -pad_w + kernel_col * dilation_w;
348 for (int output_col = output_w; output_col; output_col--) {
349 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
350 *(data_col++) = data_im[input_dep * width * height + input_row * width + input_col];
351 } else {
352 *(data_col++) = 0;
353 }
354 input_col += stride_w;
355 }
356 }
357 input_row += stride_h;
358 }
359 }
360 input_dep += stride_d;
361 }
362 }
363 }
364 }
365 }
366}
367
368template <typename Dtype>
369void col2im(const Dtype* data_col, const int channels,
370 const int height, const int width, const int kernel_h, const int kernel_w,
371 const int pad_h, const int pad_w,
372 const int stride_h, const int stride_w,
373 const int dilation_h, const int dilation_w,
374 Dtype* data_im) {
375 // note that output data_im needs to be set to zero value!!!!
376 std::fill(data_im, data_im + height * width * channels, 0.);
377 //caffe_set(height * width * channels, Dtype(0), data_im);
378 // data_im must be a zero vector
379 //const Dtype * data_col_0 = data_col;
380 const int output_h = (height + 2 * pad_h -
381 (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
382 const int output_w = (width + 2 * pad_w -
383 (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
384 const int channel_size = height * width;
385 for (int channel = channels; channel--; data_im += channel_size) {
386 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
387 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
388 int input_row = -pad_h + kernel_row * dilation_h;
389 for (int output_rows = output_h; output_rows; output_rows--) {
390 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
391 data_col += output_w;
392 } else {
393 int input_col = -pad_w + kernel_col * dilation_w;
394 for (int output_col = output_w; output_col; output_col--) {
395 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
396 //assert(input_row*width+input_col < height * width * channels);
397 //assert(data_col - data_col_0 < output_h*output_w*channels);
398 // std::cout << "COL2IM: input_row" << " " << input_row << " " << input_col
399 // << " <---- " << data_col - data_col_0 << " values: "
400 // << data_im[input_row * width + input_col] << " <--- " << *data_col << std::endl;
401 data_im[input_row * width + input_col] += *data_col;
402 }
403 data_col++;
404 input_col += stride_w;
405 }
406 }
407 input_row += stride_h;
408 }
409 }
410 }
411 }
412 //std::cout << "finishing col2imp" << std::endl;
413}
414
415
416
417} // end namespace UTILITY
418
419namespace BLAS{
420extern "C" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,
421 const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,
422 const float * beta, float * C, const int * ldc);
423}//BLAS
424
425
426struct GNN_Data {
430
431 std::vector<int> receivers;
432 std::vector<int> senders;
433
434 // need to have default constructor since RTensor has not one
436
437};
438
439template<typename T>
441{
442 // concatenate tensor along axis. Shape must be the same except in the dimension of the concatenated axis
443 if (t1.GetMemoryLayout() != t2.GetMemoryLayout())
444 throw std::runtime_error("TMVA RTensor Concatenate - tensors have different memory layout");
445 auto & shape1 = t1.GetShape();
446 auto & shape2 = t2.GetShape();
447 if (t1.GetSize()/shape1[axis] != t2.GetSize()/shape2[axis])
448 throw std::runtime_error("TMVA RTensor Concatenate - tensors have incompatible shapes");
449 std::vector<size_t> outShape = shape1;
450 outShape[axis] = shape1[axis] + shape2[axis];
451 TMVA::Experimental::RTensor<T> tout(outShape, t1.GetMemoryLayout());
452 if (t1.GetMemoryLayout() == TMVA::Experimental::MemoryLayout::ColumnMajor) {
453 throw std::runtime_error("TMVA RTensor Concatenate is not yet supported for column major tensors");
454 }
455
456 auto & stride1 = t1.GetStrides();
457 auto & stride2 = t2.GetStrides();
458 auto & outStride = tout.GetStrides();
459
460 size_t s1 = (axis > 0) ? stride1[axis-1] : t1.GetSize(); // block size to copy from first tensor
461 size_t s2 = (axis > 0) ? stride2[axis-1] : t2.GetSize(); // block size to copy from second tensor
462 size_t sout = (axis > 0) ? outStride[axis-1] : tout.GetSize();
463 size_t nb = t1.GetSize()/s1;
464 for (size_t i = 0; i < nb; i++) {
465 std::copy(t1.GetData() + i*s1, t1.GetData() + (i+1)*s1, tout.GetData() + i * sout );
466 std::copy(t2.GetData() + i*s2, t2.GetData() + (i+1)*s2, tout.GetData() + i * sout + s1 );
467 }
468
469 return tout;
470}
471
472
473inline GNN_Data Concatenate(GNN_Data & data1, GNN_Data & data2, int axis = 0) {
474 GNN_Data out;
475 out.node_data = Concatenate(data1.node_data,data2.node_data, axis);
476 out.edge_data = Concatenate(data1.edge_data,data2.edge_data, axis);
477 out.global_data = Concatenate<float>(data1.global_data,data2.global_data, axis-1);
478 // assume sender/receivers of data1 and data2 are the same
479 if (data1.receivers != data2.receivers || data1.senders != data2.senders)
480 throw std::runtime_error("GNN_Data Concatenate: data1 and data2 have different net structures");
481 out.receivers = data1.receivers;
482 out.senders = data1.senders;
483 return out;
484}
485
486inline GNN_Data Copy(const GNN_Data & data) {
487 GNN_Data out;
488 out.node_data = RTensor<float>(data.node_data.GetShape());
489 out.edge_data = RTensor<float>(data.edge_data.GetShape());
490 out.global_data = RTensor<float>(data.global_data.GetShape());
491 std::copy(data.node_data.GetData(), data.node_data.GetData()+ data.node_data.GetSize(), out.node_data.GetData());
492 std::copy(data.edge_data.GetData(), data.edge_data.GetData()+ data.edge_data.GetSize(), out.edge_data.GetData());
493 std::copy(data.global_data.GetData(), data.global_data.GetData()+ data.global_data.GetSize(), out.global_data.GetData());
494 out.receivers = data.receivers;
495 out.senders = data.senders;
496 return out;
497}
498
499}//SOFIE
500}//Experimental
501}//TMVA
502
503#endif //TMVA_SOFIE_RMODEL
#define b(i)
Definition RSha256.hxx:100
#define a(i)
Definition RSha256.hxx:99
#define s1(x)
Definition RSha256.hxx:91
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t width
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t height
#define free
Definition civetweb.c:1539
#define malloc
Definition civetweb.c:1536
RTensor is a container with contiguous memory and shape information.
Definition RTensor.hxx:162
MemoryLayout GetMemoryLayout() const
Definition RTensor.hxx:248
const Shape_t & GetStrides() const
Definition RTensor.hxx:243
std::size_t GetSize() const
Definition RTensor.hxx:241
const Shape_t & GetShape() const
Definition RTensor.hxx:242
const Int_t n
Definition legend1.C:16
void sgemm_(const char *transa, const char *transb, const int *m, const int *n, const int *k, const float *alpha, const float *A, const int *lda, const float *B, const int *ldb, const float *beta, float *C, const int *ldc)
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
void Im2col_3d(const T *data_im, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
3d implementation
T * BroadcastConvBias(const T *data, const size_t channel, const std::vector< size_t > &targetShape)
void col2im(const Dtype *data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype *data_im)
T * BroadcastTensor(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t >, std::vector< size_t >)
std::string Clean_name(std::string input_tensor_name)
bool is_a_ge_zero_and_a_lt_b(int a, int b)
function to check if a >> 0 and a < MAX using a single comparison / use trick casting to unsigned val...
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
T * UnidirectionalBroadcast(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, T *data_col)
im2col : efficient function to re-arrange input data of convolution to a matrix that can be used by B...
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
ETensorType GetTemplatedType(T)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertTypeToString(ETensorType type)
ETensorType ConvertStringToType(std::string type)
TMVA::Experimental::RTensor< T > Concatenate(TMVA::Experimental::RTensor< T > &t1, TMVA::Experimental::RTensor< T > &t2, int axis=0)
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
GNN_Data Copy(const GNN_Data &data)
create variable transformations
TMarker m
Definition textangle.C:8
auto * t1
Definition textangle.C:20