Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
SOFIE_common.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_SOFIE_COMMON
2#define TMVA_SOFIE_SOFIE_COMMON
3
4#include "TMVA/RTensor.hxx"
5
6#include "ROOT/RSpan.hxx"
7
8#include <stdexcept>
9#include <type_traits>
10#include <cstdint>
11#include <cstring>
12#include <string>
13#include <vector>
14#include <memory>
15#include <regex>
16#include <sstream>
17#include <iostream>
18#include <iomanip>
19#include <cassert>
20#include <limits>
21
22namespace TMVA{
23namespace Experimental{
24namespace SOFIE{
25
26//typedef RTensor tensor_t;
27
28enum class ETensorType{
29 UNDEFINED = 0, FLOAT = 1, UNINT8 = 2, INT8 = 3, UINT16 = 4, INT16 = 5, INT32 = 6, INT64 = 7, STRING = 8, BOOL = 9, //order sensitive
30 FLOAT16 = 10, DOUBLE = 11, UINT32 = 12, UINT64 = 13, COMPLEX64 = 14, COMPLEX28 = 15, BFLOAT16 = 16
31};
32
33typedef std::int64_t int_t;
34
37
38struct Dim{
39 bool isParam = false;
40 size_t dim = 0;
41 std::string param;
42
43 // default constructor (for I/O)
44 Dim() {}
45
46 // constructor for a parametric dimension with the option to pass a default dim value
47 Dim(const std::string & p, size_t d = 0) : isParam(true), dim(d), param(p) {}
48
49 // constructor for a non-parametric dimension
50 Dim(size_t d) : dim(d) {}
51
52 std::string GetVal() const {
53 return (isParam) ? param : std::to_string(dim);
54 }
55};
56
57
60 std::vector<Dim> shape;
61};
62
65 std::vector<size_t> shape;
66};
67
70 std::vector<Dim> shape;
71};
72
73// template traits for Tensor type
74template <typename T>
75struct TensorType {};
76template<>
77struct TensorType<float> {
78 static const std::string Name() { return "float"; }
79};
80template<>
82 static const std::string Name() { return "double"; }
83};
84template<>
85struct TensorType<int64_t> {
86 static const std::string Name() { return "int64_t"; }
87};
88template<>
89struct TensorType<int32_t> {
90 static const std::string Name() { return "int32_t"; }
91};
92template<>
93struct TensorType<uint32_t> {
94 static const std::string Name() { return "uint32_t"; }
95};
96template<>
97struct TensorType<uint64_t> {
98 static const std::string Name() { return "uint64_t"; }
99};
100
101
102std::vector<Dim> ConvertShapeToDim(std::vector<size_t> shape);
103
104std::vector<size_t> ConvertShapeToInt(std::vector<Dim> shape);
105
106std::size_t ConvertShapeToLength(std::vector<size_t> shape);
107
108std::string ConvertShapeToString(std::vector<size_t> shape);
109std::string ConvertDynamicShapeToString(std::vector<Dim> shape);
110// std::string ConvertShapeToString(std::vector<Dim> shape) {
111// return ConvertDynamicShapeToString(shape);
112// }
113
114std::string ConvertDynamicShapeToLength(std::vector<Dim> shape);
115
116template<class T>
117std::string ConvertValToString(T value) {
118 std::stringstream ret;
119 if (std::is_floating_point_v<T>)
120 ret << std::setprecision(std::numeric_limits<T>::max_digits10);
121 ret << value;
122 return ret.str();
123}
124
125
126// convert list of values in a string taking into account the precision
127template<class T>
128std::string ConvertValuesToString(size_t n, const T * data) {
129 std::stringstream ret;
130 ret << "{ ";
131 for (size_t i = 0; i < n; i++) {
132 if (std::is_floating_point_v<T>)
133 ret << std::setprecision(std::numeric_limits<T>::max_digits10);
134 ret << data[i];
135 if (i < n-1) ret << ", ";
136 }
137 ret << "}";
138 return ret.str();
139}
140template<class T>
141std::string ConvertValuesToString(const std::vector<T> & data) {
142 return ConvertValuesToString(data.size(), data.data());
143}
144
146public:
147 InitializedTensor() = default;
148 InitializedTensor(ETensorType type, std::span<std::size_t> shape, std::shared_ptr<void> data, bool typeConstant = false)
149 : fConstant(typeConstant), fType{type}, fShape{shape.begin(), shape.end()}, fData{data}
150 {
151 }
152
153 ETensorType const &type() const { return fType; }
154 std::vector<std::size_t> const &shape() const { return fShape; }
155 std::shared_ptr<void> const &sharedptr() const { return fData; }
156 // query if tensor comes from a Constant operator
157 bool IsConstantTensor() const { return fConstant;}
158 // query if tensor needs to be written in a weight file. Constant tensors are not written in a file
159 bool IsWeightTensor() const { return !fConstant && !fIsNotWritable;}
160 // set not writable initialized tensors - i.e. tensor that must not be written in a file
162
163 template <class T = void>
164 T const *data() const
165 {
166 return static_cast<T const *>(fData.get());
167 }
168
170 {
171 // We only calculate fSize here, because it is only used for IO to know
172 // the size of the persistent data.
173 fSize = 1;
174 for (std::size_t item : fShape) {
175 fSize *= static_cast<int>(item);
176 }
177 switch (fType) {
178 case ETensorType::FLOAT: fSize *= sizeof(float); break;
179 case ETensorType::DOUBLE: fSize *= sizeof(double); break;
180 case ETensorType::INT32: fSize *= sizeof(int32_t); break;
181 case ETensorType::INT64: fSize *= sizeof(int64_t); break;
182 case ETensorType::BOOL: fSize *= sizeof(bool); break;
183 default:
184 throw std::runtime_error("TMVA::SOFIE doesn't yet supports serialising data-type " +
186 }
187 fPersistentData = static_cast<char *>(fData.get());
188 }
190 {
191 // If there is no persistent data, do nothing
192 if (fSize == 0 || fPersistentData == nullptr) {
193 return;
194 }
195
196 // Nothing to be done if the pointed-to data is the same
197 if (fPersistentData == static_cast<char *>(fData.get())) {
198 return;
199 }
200
201 // Initialize the shared_ptr
202 fData = std::shared_ptr<void>{malloc(fSize), free};
203 std::memcpy(fData.get(), fPersistentData, fSize);
204
205 // Make sure the data read from disk doesn't leak and delete the
206 // persistent data
207 delete[] fPersistentData;
208 fPersistentData = nullptr;
209 fSize = 0;
210 }
211
212private:
213 bool fConstant = false; ///< Flag specifying if tensor is a Constant one (coming from a Constant operator)
214 bool fIsNotWritable = false; ///< Flag to indicate that tensor values do not need to be written as weight or generated code
215 ETensorType fType; ///< Encodes the type of the data
216 std::vector<std::size_t> fShape; ///< The shape of the data in terms of elements in each dimension
217 std::shared_ptr<void> fData; ///<! Transient shared data
218 int fSize = 0; ///< The size of the persistent data in bytes (not number of elements!)
219 char *fPersistentData = nullptr; ///<[fSize] Persistent version of the data
220};
221
222template <typename T>
224 if (std::is_same<T, float>::value) return ETensorType::FLOAT;
225 if (std::is_same<T, uint8_t>::value) return ETensorType::UNINT8;
226 if (std::is_same<T, int8_t>::value) return ETensorType::INT8;
227 if (std::is_same<T, uint16_t>::value) return ETensorType::UINT16;
228 if (std::is_same<T, int16_t>::value) return ETensorType::INT16;
229 if (std::is_same<T, int32_t>::value) return ETensorType::INT32;
230 if (std::is_same<T, int64_t>::value) return ETensorType::INT64;
231 if (std::is_same<T, std::string>::value) return ETensorType::STRING;
232 if (std::is_same<T, bool>::value) return ETensorType::BOOL;
233 //float16 unimplemented
234 if (std::is_same<T, double>::value) return ETensorType::DOUBLE;
235 if (std::is_same<T, uint32_t>::value) return ETensorType::UINT32;
236 if (std::is_same<T, uint64_t>::value) return ETensorType::UINT64;
237 //complex 64, 28, bfloat 16 unimplemented
238}
239
240namespace UTILITY{
241// Check if two shapes are equal
242bool AreSameShape(const std::vector<size_t>&, const std::vector<size_t>&);
243bool AreSameShape(const std::vector<size_t>&, const std::vector<Dim>&);
244bool AreSameShape(const std::vector<Dim>&, const std::vector<Dim>&);
245
246
247// Multidirectional broadcast a list of tensors to the same shape
248std::vector<size_t> MultidirectionalBroadcastShape(std::vector<std::vector<size_t>>);
249
250// Unidirectional broadcast two shapes to the same shape
251std::vector<size_t> UnidirectionalBroadcastShape(std::vector<size_t>, std::vector<size_t>);
252
253std::string Clean_name(std::string input_tensor_name);
254
255template<typename T>
256T* BroadcastConvBias(const T* data, const size_t channel, const std::vector<size_t>& targetShape) {
257 size_t size = targetShape.size();
258 if (targetShape[1] != channel) {
259 std::stringstream ss;
260 ss << "TMVA::SOFIE - Error broadcasting Conv Bias of shape {";
261 ss << std::to_string(channel);
262 ss << "} to ";
264 throw
265 std::runtime_error(ss.str());
266 }
267
269 T* newData = new T[targetLength];
270
271 if (targetLength == channel) {
272 std::copy(data, data + channel, newData);
273 return newData;
274 }
275
276 // cStride = OutDepth * outHeight * outWidth
277 size_t cStride = 1;
278 for (size_t i = 2; i < size; i++)
279 cStride *= targetShape[i];
280 // Broadcast each element of the bias to a vector of size cStride and concatenate them
281 // into a vector of size channel * cStride
282 for (size_t i = 0; i < channel; i++) {
283 std::fill(newData + i * cStride, newData + (i + 1) * cStride, data[i]);
284 }
285 // Broadcast newData[0...channel * cStride) to newData[0...batch * channel * cStride)
286 size_t batch = targetShape[0];
287 size_t bStride = channel * cStride;
288 for (size_t i = 1; i < batch; i++) {
289 std::copy(newData, newData + bStride, newData + i * bStride);
290 }
291 return newData;
292}
293
294// Broadcast a tensor from shape to targetShape according to numpy broadcasting rules
295// See more at https://numpy.org/doc/stable/user/basics.broadcasting.html
296// and https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md .
297template<typename T, class ContT = std::span<T> >
298void BroadcastTensor(ContT data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape, ContT broadcastedData) {
299 // Size of the shapes (tensor input here have shapes with same sizes, we have already added the needed ones )
300 size_t size = shape.size();
301 // Current length of the broadcasted tensor
302 size_t curLength = data.size();
303 size_t targetLength = broadcastedData.size();
305 // special case when broadcasting last dimensions (initial shapes must be the same)
306 if (shape.front() == targetShape.front() && shape.back() == 1 && size > 1) {
307 size_t bsize = targetShape.back();
308 // compute the size of the data to broadcast
309 for (int k = int(size)-2; k >=0; k--) {
310 if (shape[k] != 1) break;
311 bsize *= targetShape[k];
312 }
313 for (size_t i = 0; i < curLength; i++) {
314 std::fill(broadcastedData.begin() + i*bsize, broadcastedData.begin() + (i+1)*bsize , data[i]);
315 }
316 return;
317 }
318
319 std::copy(data.begin(), data.end(), broadcastedData.begin());
320 // Product of the previous dimensions of targetShape
321 size_t arrayNum = 1;
322 // New broadcasted data: is this needed?
323 std::vector<T> newData(targetLength);
324
325 for (size_t idx = 0; idx < size; idx++) {
326 size_t dim = shape[idx];
327 size_t targetDim = targetShape[idx];
328 if (dim == 1 && targetDim > 1) {
329 // Set the new length of the data
330 size_t newLength = curLength * targetDim;
331 // View the data as a list of arrayNum arrays of size arrayLength
332 size_t arrayLength = curLength / arrayNum;
333 // Broadcast each array dim times
334 if (arrayLength > 1) {
335 // If each array has at least two elements
336 for (size_t arrayIdx = 0; arrayIdx < arrayNum; arrayIdx++) {
337 for (size_t targetIdx = 0; targetIdx < targetDim; targetIdx++) {
341 newData.begin() + offset);
342 }
343 }
344 } else {
345 // If each array has one element
346 for (size_t arrayIdx = 0; arrayIdx < arrayNum; arrayIdx++) {
347 std::fill(newData.begin() + arrayIdx * targetDim,
349 }
350 }
351 // Update current length
353 // Update broadcasted data
355 }
356 // Update the number of arrays
358 }
359 //return broadcastedData;
360}
361
362// interface where we allocate a new array for broadcasted data
363template<typename T>
364T* CreateBroadcastTensor(T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape, size_t targetLength) {
365 // newShape is an array of size equal to dimension along which we are broadcasting the tensor
366 T* broadcastedData = new T[targetLength];
368 size_t curLength = ConvertShapeToLength(shape);
369 std::span<T> inData(data, data+curLength);
371 return broadcastedData;
372}
373// Unidirectional broadcasting shape to targetShape// In unidirectional broadcast - only tensor B can have the shape changed not
374// tensor A - otherwise is a multidirectional broadcast
375template<typename T>
376T* UnidirectionalBroadcast(T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape) {
377 // Prepend shape with ones
378 if (shape.size() < targetShape.size()) {
379 size_t targetSize = targetShape.size();
380 std::vector<size_t> newShape(targetSize, 1);
381 size_t offset = targetSize - shape.size();
382 std::copy(shape.begin(), shape.end(), newShape.begin() + offset);
384 }
386}
387
388// Unidirectional broadcasting shape to targetShape using a passed vector to avoid allocations
389template<typename T>
390void UnidirectionalBroadcast(T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape, std::span<T> broadcastedData) {
391 size_t curLength = ConvertShapeToLength(shape);
392 std::span<T> inData(data, data+curLength);
393 // Prepend shape with ones
394 if (shape.size() < targetShape.size()) {
395 size_t targetSize = targetShape.size();
396 std::vector<size_t> newShape(targetSize, 1);
397 size_t offset = targetSize - shape.size();
398 std::copy(shape.begin(), shape.end(), newShape.begin() + offset);
400 }
402}
403// specialization for vector of boolean
404void UnidirectionalBroadcast(const std::vector<bool> & data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape, std::vector<bool> & broadcastedData);
405
406/// compute stride of a tensor given its shape (assume layout is row-major)
407std::vector<size_t> ComputeStrideFromShape(const std::vector<size_t> & shape);
408std::vector<Dim> ComputeStrideFromShape(const std::vector<Dim> & shape);
409
410/// function to check if a >> 0 and a < MAX using a single comparison
411//// use trick casting to unsigned values so it becomes a single comparison
412inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
413 return static_cast<unsigned>(a) < static_cast<unsigned>(b);
414}
415
416
417/// im2col : efficient function to re-arrange input data of convolution to a matrix
418/// that can be used by BLAS
419/// Use trick to loop on each element of filtered region first and follow input data layout
420/// By doing this reads and writes are of consecutive data in memory and one gains in efficiency
421/// The resulting matrix will be already transposed and can be used directly in BLAS
422/// since output will be a matrix : (channels*kernel_h*kernel_w , output_h*output_w)
423/// Example: with an input matrix
424/// a1 a2 a3
425/// b1 b2 b3 and a 2x2 kernel (k1,k2,k3,k4) and padding 1 :
426/// c1 c2 c3
427/// outpout will be a matrix (4 x 16)
428/// the routine will follow output order :
429// first all elements which will be operated by k1 then k2 then k3
430/// -> ( 0 0 0 0 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 ) all elements for k1
431/// ( 0 0 0 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 ) for k2
432/// ( 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 0 0 0 ) for k3
433/// ( a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 0 0 0 0 ) for k4
434///
435
436template <typename T>
437void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h,
438 const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w,
439 const int dilation_h, const int dilation_w, T *data_col)
440{
441 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
442 const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
443 const int channel_size = height * width;
444 for (int channel = channels; channel--; data_im += channel_size) {
445 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
446 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
451 *(data_col++) = 0;
452 }
453 } else {
455 for (int output_col = output_w; output_col; output_col--) {
458 } else {
459 *(data_col++) = 0;
460 }
462 }
463 }
465 }
466 }
467 }
468 }
469}
470
471/// 3d implementation
472template <typename T>
473void Im2col_3d(const T *data_im, const int channels,
474 const int depth, const int height, const int width,
475 const int kernel_d, const int kernel_h, const int kernel_w,
476 const int pad_d, const int pad_h, const int pad_w,
477 const int stride_d, const int stride_h, const int stride_w,
478 const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
479{
480 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
481 const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
482 const int output_d = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1;
483 const int channel_size = height * width * depth;
484 // assume data are c x d x h x w
485 for (int channel = channels; channel--; data_im += channel_size) {
486 for (int kernel_depth = 0; kernel_depth < kernel_d; kernel_depth++) {
487 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
488 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
490 for (int output_dep = output_d; output_dep; output_dep--) {
494 *(data_col++) = 0;
495 }
496 }
497 } else {
502 *(data_col++) = 0;
503 }
504 } else {
506 for (int output_col = output_w; output_col; output_col--) {
509 } else {
510 *(data_col++) = 0;
511 }
513 }
514 }
516 }
517 }
519 }
520 }
521 }
522 }
523 }
524}
525
526template <typename Dtype>
527void col2im(const Dtype* data_col, const int channels,
528 const int height, const int width, const int kernel_h, const int kernel_w,
529 const int pad_h, const int pad_w,
530 const int stride_h, const int stride_w,
531 const int dilation_h, const int dilation_w,
532 Dtype* data_im) {
533 // note that output data_im needs to be set to zero value!!!!
534 std::fill(data_im, data_im + height * width * channels, 0.);
535 //caffe_set(height * width * channels, Dtype(0), data_im);
536 // data_im must be a zero vector
537 //const Dtype * data_col_0 = data_col;
538 const int output_h = (height + 2 * pad_h -
539 (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
540 const int output_w = (width + 2 * pad_w -
541 (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
542 const int channel_size = height * width;
543 for (int channel = channels; channel--; data_im += channel_size) {
544 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
545 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
550 } else {
552 for (int output_col = output_w; output_col; output_col--) {
554 //assert(input_row*width+input_col < height * width * channels);
555 //assert(data_col - data_col_0 < output_h*output_w*channels);
556 // std::cout << "COL2IM: input_row" << " " << input_row << " " << input_col
557 // << " <---- " << data_col - data_col_0 << " values: "
558 // << data_im[input_row * width + input_col] << " <--- " << *data_col << std::endl;
560 }
561 data_col++;
563 }
564 }
566 }
567 }
568 }
569 }
570 //std::cout << "finishing col2imp" << std::endl;
571}
572
573
574
575} // end namespace UTILITY
576
577namespace BLAS{
578extern "C" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,
579 const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,
580 const float * beta, float * C, const int * ldc);
581}//BLAS
582
583
584struct GNN_Data {
585 RTensor<float> node_data; // the node feature data, tensor with shape (num_nodes, num_node_features)
586 RTensor<float> edge_data; // the edge feature data, tensor with shape (num_edges, num_edge_features)
587 RTensor<float> global_data; // the global features, tensor with shape (1, num_global_features)
588 RTensor<int> edge_index; // the edge index (receivers and senders for each edge), tensor with shape (2, num_edges)
589 // edge_index[0,:] are the receivers and edge_index[1,:] are the senders
590
591
592 // need to have default constructor since RTensor has not one
594
595};
596
597template<typename T>
599{
600 // concatenate tensor along axis. Shape must be the same except in the dimension of the concatenated axis
601 if (t1.GetMemoryLayout() != t2.GetMemoryLayout())
602 throw std::runtime_error("TMVA RTensor Concatenate - tensors have different memory layout");
603 auto & shape1 = t1.GetShape();
604 auto & shape2 = t2.GetShape();
605 if (t1.GetSize()/shape1[axis] != t2.GetSize()/shape2[axis]) {
606 std::cout << "axis " << axis << " sizes " << t1.GetSize() << " " << t2.GetSize() << " ";
607 std::cout << "shape 1 : " << ConvertShapeToString(t1.GetShape());
608 std::cout << " shape 2 : " << ConvertShapeToString(t2.GetShape()) << std::endl;
609 throw std::runtime_error("TMVA RTensor Concatenate - tensors have incompatible shapes");
610 }
611 std::vector<size_t> outShape = shape1;
612 outShape[axis] = shape1[axis] + shape2[axis];
614 if (t1.GetMemoryLayout() == TMVA::Experimental::MemoryLayout::ColumnMajor) {
615 throw std::runtime_error("TMVA RTensor Concatenate is not yet supported for column major tensors");
616 }
617
618 auto & stride1 = t1.GetStrides();
619 auto & stride2 = t2.GetStrides();
620 auto & outStride = tout.GetStrides();
621
622 size_t s1 = (axis > 0) ? stride1[axis-1] : t1.GetSize(); // block size to copy from first tensor
623 size_t s2 = (axis > 0) ? stride2[axis-1] : t2.GetSize(); // block size to copy from second tensor
624 size_t sout = (axis > 0) ? outStride[axis-1] : tout.GetSize();
625 size_t nb = t1.GetSize()/s1;
626 for (size_t i = 0; i < nb; i++) {
627 std::copy(t1.GetData() + i*s1, t1.GetData() + (i+1)*s1, tout.GetData() + i * sout );
628 std::copy(t2.GetData() + i*s2, t2.GetData() + (i+1)*s2, tout.GetData() + i * sout + s1 );
629 }
630
631 return tout;
632}
633
634
635inline GNN_Data Concatenate(GNN_Data & data1, GNN_Data & data2, int axis = 0) {
636 GNN_Data out;
637 out.node_data = Concatenate(data1.node_data,data2.node_data, axis);
638 out.edge_data = Concatenate(data1.edge_data,data2.edge_data, axis);
639 out.global_data = Concatenate<float>(data1.global_data,data2.global_data, axis-1);
640 // assume sender/receivers of data1 and data2 are the same
641 out.edge_index = data1.edge_index.Copy();
642 return out;
643}
644
645inline GNN_Data Copy(const GNN_Data & data) {
646 GNN_Data out;
647 out.node_data = RTensor<float>(data.node_data.GetShape());
648 out.edge_data = RTensor<float>(data.edge_data.GetShape());
649 out.global_data = RTensor<float>(data.global_data.GetShape());
650 out.edge_index = RTensor<int>(data.edge_index.GetShape());
651 std::copy(data.node_data.GetData(), data.node_data.GetData()+ data.node_data.GetSize(), out.node_data.GetData());
652 std::copy(data.edge_data.GetData(), data.edge_data.GetData()+ data.edge_data.GetSize(), out.edge_data.GetData());
653 std::copy(data.global_data.GetData(), data.global_data.GetData()+ data.global_data.GetSize(), out.global_data.GetData());
654 std::copy(data.edge_index.GetData(), data.edge_index.GetData()+ data.edge_index.GetSize(), out.edge_index.GetData());
655 return out;
656}
657
658}//SOFIE
659}//Experimental
660}//TMVA
661
662#endif //TMVA_SOFIE_RMODEL
#define d(i)
Definition RSha256.hxx:102
#define b(i)
Definition RSha256.hxx:100
#define a(i)
Definition RSha256.hxx:99
#define s1(x)
Definition RSha256.hxx:91
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t width
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t height
#define malloc
Definition civetweb.c:1536
const_iterator begin() const
RTensor is a container with contiguous memory and shape information.
Definition RTensor.hxx:163
std::shared_ptr< void > const & sharedptr() const
std::shared_ptr< void > fData
! Transient shared data
ETensorType fType
Encodes the type of the data.
std::vector< std::size_t > const & shape() const
char * fPersistentData
[fSize] Persistent version of the data
std::vector< std::size_t > fShape
The shape of the data in terms of elements in each dimension.
bool fIsNotWritable
Flag to indicate that tensor values do not need to be written as weight or generated code.
bool fConstant
Flag specifying if tensor is a Constant one (coming from a Constant operator)
InitializedTensor(ETensorType type, std::span< std::size_t > shape, std::shared_ptr< void > data, bool typeConstant=false)
int fSize
The size of the persistent data in bytes (not number of elements!)
const Int_t n
Definition legend1.C:16
void sgemm_(const char *transa, const char *transb, const int *m, const int *n, const int *k, const float *alpha, const float *A, const int *lda, const float *B, const int *ldb, const float *beta, float *C, const int *ldc)
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
void Im2col_3d(const T *data_im, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
3d implementation
T * UnidirectionalBroadcast(T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
T * BroadcastConvBias(const T *data, const size_t channel, const std::vector< size_t > &targetShape)
void col2im(const Dtype *data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype *data_im)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t >, std::vector< size_t >)
std::string Clean_name(std::string input_tensor_name)
bool is_a_ge_zero_and_a_lt_b(int a, int b)
function to check if a >> 0 and a < MAX using a single comparison / use trick casting to unsigned val...
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, T *data_col)
im2col : efficient function to re-arrange input data of convolution to a matrix that can be used by B...
void BroadcastTensor(ContT data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape, ContT broadcastedData)
T * CreateBroadcastTensor(T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape, size_t targetLength)
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
ETensorType GetTemplatedType(T)
std::string ConvertValuesToString(size_t n, const T *data)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
ETensorType ConvertStringToType(std::string type)
TMVA::Experimental::RTensor< T > Concatenate(TMVA::Experimental::RTensor< T > &t1, TMVA::Experimental::RTensor< T > &t2, int axis=0)
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::string ConvertValToString(T value)
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
GNN_Data Copy(const GNN_Data &data)
create variable transformations
Dim(const std::string &p, size_t d=0)
TMarker m
Definition textangle.C:8
auto * t1
Definition textangle.C:20