#include "TMVA/RTensor.hxx"
#include <stdexcept>
#include <type_traits>
#include <cstdint>
#include <cstring>
#include <string>
#include <vector>
#include <memory>
#include <regex>
#include <sstream>
#include <iostream>
Classes | |
struct | TMVA::Experimental::SOFIE::Dim |
struct | TMVA::Experimental::SOFIE::GNN_Data |
struct | TMVA::Experimental::SOFIE::InitializedTensor |
struct | TMVA::Experimental::SOFIE::InputTensorInfo |
struct | TMVA::Experimental::SOFIE::TensorInfo |
Namespaces | |
namespace | TMVA |
create variable transformations | |
namespace | TMVA::Experimental |
namespace | TMVA::Experimental::SOFIE |
namespace | TMVA::Experimental::SOFIE::BLAS |
namespace | TMVA::Experimental::SOFIE::UTILITY |
Typedefs | |
typedef std::int64_t | TMVA::Experimental::SOFIE::int_t |
Enumerations | |
enum class | TMVA::Experimental::SOFIE::ETensorType { TMVA::Experimental::SOFIE::UNDEFINED = 0 , TMVA::Experimental::SOFIE::FLOAT = 1 , TMVA::Experimental::SOFIE::UNINT8 = 2 , TMVA::Experimental::SOFIE::INT8 = 3 , TMVA::Experimental::SOFIE::UINT16 = 4 , TMVA::Experimental::SOFIE::INT16 = 5 , TMVA::Experimental::SOFIE::INT32 = 6 , TMVA::Experimental::SOFIE::INT64 = 7 , TMVA::Experimental::SOFIE::STRING = 8 , TMVA::Experimental::SOFIE::BOOL = 9 , TMVA::Experimental::SOFIE::FLOAT16 = 10 , TMVA::Experimental::SOFIE::DOUBLE = 11 , TMVA::Experimental::SOFIE::UINT32 = 12 , TMVA::Experimental::SOFIE::UINT64 = 13 , TMVA::Experimental::SOFIE::COMPLEX64 = 14 , TMVA::Experimental::SOFIE::COMPLEX28 = 15 , TMVA::Experimental::SOFIE::BFLOAT16 = 16 } |
Functions | |
bool | TMVA::Experimental::SOFIE::UTILITY::AreSameShape (const std::vector< size_t > &, const std::vector< size_t > &) |
template<typename T > | |
T * | TMVA::Experimental::SOFIE::UTILITY::BroadcastConvBias (const T *data, const size_t channel, const std::vector< size_t > &targetShape) |
template<typename T > | |
T * | TMVA::Experimental::SOFIE::UTILITY::BroadcastTensor (const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape) |
std::string | TMVA::Experimental::SOFIE::UTILITY::Clean_name (std::string input_tensor_name) |
template<typename Dtype > | |
void | TMVA::Experimental::SOFIE::UTILITY::col2im (const Dtype *data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype *data_im) |
std::vector< size_t > | TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape (const std::vector< size_t > &shape) |
compute stride of a tensor given its shape (assume layout is row-major) | |
GNN_Data | TMVA::Experimental::SOFIE::Concatenate (GNN_Data &data1, GNN_Data &data2, int axis=0) |
template<typename T > | |
TMVA::Experimental::RTensor< T > | TMVA::Experimental::SOFIE::Concatenate (TMVA::Experimental::RTensor< T > &t1, TMVA::Experimental::RTensor< T > &t2, int axis=0) |
std::vector< Dim > | TMVA::Experimental::SOFIE::ConvertShapeToDim (std::vector< size_t > shape) |
std::size_t | TMVA::Experimental::SOFIE::ConvertShapeToLength (std::vector< size_t > shape) |
std::string | TMVA::Experimental::SOFIE::ConvertShapeToString (std::vector< size_t > shape) |
ETensorType | TMVA::Experimental::SOFIE::ConvertStringToType (std::string type) |
std::string | TMVA::Experimental::SOFIE::ConvertTypeToString (ETensorType type) |
GNN_Data | TMVA::Experimental::SOFIE::Copy (const GNN_Data &data) |
template<typename T > | |
ETensorType | TMVA::Experimental::SOFIE::GetTemplatedType (T) |
template<typename T > | |
void | TMVA::Experimental::SOFIE::UTILITY::Im2col (const T *data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, T *data_col) |
im2col : efficient function to re-arrange input data of convolution to a matrix that can be used by BLAS Use trick to loop on each element of filtered region first and follow input data layout By doing this reads and writes are of consecutive data in memory and one gains in efficiency The resulting matrix will be already transposed and can be used directly in BLAS since output will be a matrix : (channels*kernel_h*kernel_w , output_h*output_w) Example: with an input matrix a1 a2 a3 b1 b2 b3 and a 2x2 kernel (k1,k2,k3,k4) and padding 1 : c1 c2 c3 outpout will be a matrix (4 x 16) the routine will follow output order : | |
template<typename T > | |
void | TMVA::Experimental::SOFIE::UTILITY::Im2col_3d (const T *data_im, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, T *data_col) |
3d implementation | |
bool | TMVA::Experimental::SOFIE::UTILITY::is_a_ge_zero_and_a_lt_b (int a, int b) |
function to check if a >> 0 and a < MAX using a single comparison / use trick casting to unsigned values so it becomes a single comparison | |
std::vector< size_t > | TMVA::Experimental::SOFIE::UTILITY::MultidirectionalBroadcastShape (std::vector< std::vector< size_t > >) |
void | TMVA::Experimental::SOFIE::BLAS::sgemm_ (const char *transa, const char *transb, const int *m, const int *n, const int *k, const float *alpha, const float *A, const int *lda, const float *B, const int *ldb, const float *beta, float *C, const int *ldc) |
template<typename T > | |
T * | TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast (const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape) |
std::vector< size_t > | TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcastShape (std::vector< size_t >, std::vector< size_t >) |