1#ifndef TMVA_SOFIE_SOFIE_COMMON
2#define TMVA_SOFIE_SOFIE_COMMON
25namespace Experimental{
29 UNDEFINED = 0,
FLOAT = 1,
UINT8 = 2,
INT8 = 3,
UINT16 = 4,
INT16 = 5,
INT32 = 6,
INT64 = 7,
STRING = 8, BOOL = 9,
100 static const std::string
Name() {
return "float"; }
104 static const std::string
Name() {
return "double"; }
108 static const std::string
Name() {
return "int64_t"; }
112 static const std::string
Name() {
return "int32_t"; }
116 static const std::string
Name() {
return "uint32_t"; }
120 static const std::string
Name() {
return "uint64_t"; }
129 throw std::invalid_argument(
"New size exceeds available tensor size.");
166 std::stringstream
ret;
167 if (std::is_floating_point_v<T>)
168 ret << std::setprecision(std::numeric_limits<T>::max_digits10);
177 std::stringstream
ret;
179 for (
size_t i = 0; i <
n; i++) {
180 if (std::is_floating_point_v<T>)
181 ret << std::setprecision(std::numeric_limits<T>::max_digits10);
183 if (i <
n-1)
ret <<
", ";
211 template <
class T =
void>
214 return static_cast<T
const *
>(
fData.get());
232 throw std::runtime_error(
"TMVA::SOFIE doesn't yet supports serialising data-type " +
290bool AreSameShape(
const std::vector<size_t>&,
const std::vector<size_t>&);
291bool AreSameShape(
const std::vector<size_t>&,
const std::vector<Dim>&);
292bool AreSameShape(
const std::vector<Dim>&,
const std::vector<Dim>&);
307 std::stringstream
ss;
308 ss <<
"TMVA::SOFIE - Error broadcasting Conv Bias of shape {";
309 ss << std::to_string(channel);
313 std::runtime_error(
ss.str());
326 for (
size_t i = 2; i <
size; i++)
330 for (
size_t i = 0; i < channel; i++) {
336 for (
size_t i = 1; i <
batch; i++) {
345template<
typename T,
class ConstContT = std::span<const T>,
class ContT = std::span<T> >
348 size_t size = shape.size();
354 if (shape.front() ==
targetShape.front() && shape.back() == 1 &&
size > 1) {
357 for (
int k =
int(
size)-2; k >=0; k--) {
358 if (shape[k] != 1)
break;
373 for (
size_t idx = 0; idx <
size; idx++) {
374 size_t dim = shape[idx];
461 return static_cast<unsigned>(
a) <
static_cast<unsigned>(
b);
574template <
typename Dtype>
626 for (std::size_t i = 0; i <
n; ++i) {
632inline void FillOutput(std::vector<bool>
const &
vec, std::vector<std::uint8_t> &out, std::size_t
n)
635 for (std::size_t i = 0; i <
n; ++i) {
644 const float * alpha,
const float * A,
const int *
lda,
const float * B,
const int *
ldb,
645 const float * beta,
float * C,
const int *
ldc);
666 if (
t1.GetMemoryLayout() !=
t2.GetMemoryLayout())
667 throw std::runtime_error(
"TMVA RTensor Concatenate - tensors have different memory layout");
671 std::cout <<
"axis " << axis <<
" sizes " <<
t1.GetSize() <<
" " <<
t2.GetSize() <<
" ";
674 throw std::runtime_error(
"TMVA RTensor Concatenate - tensors have incompatible shapes");
679 if (
t1.GetMemoryLayout() == TMVA::Experimental::MemoryLayout::ColumnMajor) {
680 throw std::runtime_error(
"TMVA RTensor Concatenate is not yet supported for column major tensors");
687 size_t s1 = (axis > 0) ?
stride1[axis-1] :
t1.GetSize();
688 size_t s2 = (axis > 0) ?
stride2[axis-1] :
t2.GetSize();
690 size_t nb =
t1.GetSize()/
s1;
691 for (
size_t i = 0; i <
nb; i++) {
692 std::copy(
t1.GetData() + i*
s1,
t1.GetData() + (i+1)*
s1,
tout.GetData() + i *
sout );
693 std::copy(
t2.GetData() + i*
s2,
t2.GetData() + (i+1)*
s2,
tout.GetData() + i *
sout +
s1 );
706 out.edge_index =
data1.edge_index.Copy();
716 std::copy(
data.node_data.GetData(),
data.node_data.GetData()+
data.node_data.GetSize(), out.node_data.GetData());
717 std::copy(
data.edge_data.GetData(),
data.edge_data.GetData()+
data.edge_data.GetSize(), out.edge_data.GetData());
718 std::copy(
data.global_data.GetData(),
data.global_data.GetData()+
data.global_data.GetSize(), out.global_data.GetData());
719 std::copy(
data.edge_index.GetData(),
data.edge_index.GetData()+
data.edge_index.GetSize(), out.edge_index.GetData());
724 const float *B,
float beta,
const float *C)
734 TMVA::Experimental::SOFIE::BLAS::sgemm_(
transa ? &
ct : &cn,
transb ? &
ct : &cn, &
m, &
n, &k, &alpha, A,
lda, B,
ldb,
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t height
const_iterator begin() const
RTensor is a container with contiguous memory and shape information.
bool IsWeightTensor() const
std::shared_ptr< void > const & sharedptr() const
std::shared_ptr< void > fData
! Transient shared data
InitializedTensor()=default
ETensorType fType
Encodes the type of the data.
std::vector< std::size_t > const & shape() const
char * fPersistentData
[fSize] Persistent version of the data
std::vector< std::size_t > fShape
The shape of the data in terms of elements in each dimension.
bool fIsNotWritable
Flag to indicate that tensor values do not need to be written as weight or generated code.
bool IsConstantTensor() const
void CastSharedToPersistent()
bool fConstant
Flag specifying if tensor is a Constant one (coming from a Constant operator)
ETensorType const & type() const
void CastPersistentToShared()
InitializedTensor(ETensorType type, std::span< std::size_t > shape, std::shared_ptr< void > data, bool typeConstant=false)
int fSize
The size of the persistent data in bytes (not number of elements!)
void sgemm_(const char *transa, const char *transb, const int *m, const int *n, const int *k, const float *alpha, const float *A, const int *lda, const float *B, const int *ldb, const float *beta, float *C, const int *ldc)
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
void Im2col_3d(const T *data_im, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
3d implementation
void FillOutput(T const *arr, std::vector< T > &out, std::size_t n)
T * BroadcastConvBias(const T *data, const size_t channel, const std::vector< size_t > &targetShape)
void BroadcastTensor(ConstContT data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape, ContT broadcastedData)
void col2im(const Dtype *data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype *data_im)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t >, std::vector< size_t >)
std::string Clean_name(std::string input_tensor_name)
bool is_a_ge_zero_and_a_lt_b(int a, int b)
function to check if a >> 0 and a < MAX using a single comparison / use trick casting to unsigned val...
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
T * UnidirectionalBroadcast(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, T *data_col)
im2col : efficient function to re-arrange input data of convolution to a matrix that can be used by B...
T * CreateBroadcastTensor(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape, size_t targetLength)
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
constexpr size_t GetTypeSize(ETensorType type)
ETensorType GetTemplatedType(T)
std::string ConvertValuesToString(size_t n, const T *data)
void Gemm_Call(float *output, bool transa, bool transb, int m, int n, int k, float alpha, const float *A, const float *B, float beta, const float *C)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
ETensorType ConvertStringToType(std::string type)
TMVA::Experimental::RTensor< T > Concatenate(TMVA::Experimental::RTensor< T > &t1, TMVA::Experimental::RTensor< T > &t2, int axis=0)
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::string ConvertValToString(T value)
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
GNN_Data Copy(const GNN_Data &data)
create variable transformations
Dim(const std::string &p, size_t d=0)
std::string GetVal() const
RTensor< float > global_data
RTensor< float > edge_data
RTensor< int > edge_index
RTensor< float > node_data
std::map< size_t, TensorMemoryInfo > total_stack
std::map< size_t, size_t > available_stack
std::vector< size_t > shape
void merge(const TensorMemoryInfo &other)
std::string_view tensor_name
TensorMemoryInfo split(const std::string_view new_name, size_t new_size)
static const std::string Name()
static const std::string Name()
static const std::string Name()
static const std::string Name()
static const std::string Name()
static const std::string Name()