1#ifndef TMVA_SOFIE_ROPERATOR_GEMM
2#define TMVA_SOFIE_ROPERATOR_GEMM
17namespace Experimental{
49 fNB(UTILITY::Clean_name(nameB)),
fNY(UTILITY::Clean_name(nameY))
52 static_assert(std::is_same_v<T, float>,
53 "TMVA::SOFIE - Unsupported type parsing a Gemm operator");
56 ROperator_Gemm(
float alpha,
float beta,
int_t transA,
int_t transB, std::string nameA, std::string nameB, std::string nameC, std::string nameY):
58 fNB(UTILITY::Clean_name(nameB)),
fNC(UTILITY::Clean_name(nameC)),
fNY(UTILITY::Clean_name(nameY))
61 static_assert(std::is_same_v<T, float>,
62 "TMVA::SOFIE - Unsupported type parsing a Gemm operator");
72 if (
input.size() > 3)
throw std::runtime_error(
"TMVA SOFIE Gemm Op Shape Inference only need 2 or 3 input tensor");
75 throw std::runtime_error(
"TMVA SOFIE Gemm Op Shape Inference only accept input tensor with 2 dimensions");
78 std::vector<std::vector<U>> ret;
79 if (
input.size() == 3){
80 ret.push_back(
input[2]);
83 std::vector<U> s_a(
input[0]);
84 std::vector<U> s_b(
input[1]);
86 std::reverse(s_a.begin(), s_a.end());
89 std::reverse(s_b.begin(), s_b.end());
91 std::vector<U> s_y(2);
99 return DoShapeInference<size_t>(
input);
102 return DoShapeInference<Dim>(
input);
111 throw std::runtime_error(
"TMVA SOFIE Gemm Op Input Tensor " +
fNA +
" or " +
fNB +
" is not found in model");
115 throw std::runtime_error(
"TMVA SOFIE Gemm Op Input Tensor" +
fNC +
" is not found in model");
125 if (shapeA_int.size() == 1)
126 shapeA_int = {1,shapeA_int[0]};
139 throw std::runtime_error(
"TMVA SOFIE Gemm Op Input Tensor" +
fNA +
142 throw std::runtime_error(
"TMVA SOFIE Gemm Op Input Tensor" +
fNB +
146 std::vector<size_t> shapeY;
149 if (shapeY.empty()) {
158 throw std::runtime_error(
"TMVA SOFIE Gemm Op Input Tensor" +
fNC +
" is dynamic and is not supported");
173 if (broadcast_needed) {
177 throw std::runtime_error(
"TMVA SOFIE Gemm Op: dynamic tensors not supported without a session");
181 if (
fType ==
"float") {
182 std::shared_ptr<void> new_data_ptr(UTILITY::UnidirectionalBroadcast<float>(
183 static_cast<float *
>(original_data.get()),
fShapeC, targetShape),
184 std::default_delete<
float[]>());
213 std::stringstream out;
221 out <<
"//--- broadcast bias tensor " <<
fNC <<
"for Gemm op\n";
223 out <<
" float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_"
226 out <<
SP <<
SP <<
"std::copy(data, data + " <<
length <<
", tensor_" <<
fNC2 <<
");\n";
227 out <<
SP <<
SP <<
"delete [] data;\n";
234 OpName =
"op_" + OpName;
237 throw std::runtime_error(
"TMVA SOFIE Gemm Op called to Generate without being initialized first");
239 std::stringstream out;
240 out <<
"\n//--------- Gemm\n";
241 out <<
SP <<
"char " << OpName <<
"_transA = " << (
fAttrTransA ?
"\'t\'" :
"\'n\'") <<
";\n";
242 out <<
SP <<
"char " << OpName <<
"_transB = " << (
fAttrTransB ?
"\'t\'" :
"\'n\'") <<
";\n";
249 out <<
SP <<
"int " << OpName <<
"_m = " <<
m <<
";\n";
250 out <<
SP <<
"int " << OpName <<
"_n = " <<
n <<
";\n";
251 out <<
SP <<
"int " << OpName <<
"_k = " << k <<
";\n";
252 out <<
SP <<
"float " << OpName <<
"_alpha = " << std::setprecision(std::numeric_limits<float>::max_digits10) <<
fAttrAlpha <<
";\n";
253 out <<
SP <<
"float " << OpName <<
"_beta = " << std::setprecision(std::numeric_limits<float>::max_digits10) <<
fAttrBeta <<
";\n";
254 out <<
SP <<
"int " << OpName <<
"_lda = " << (
fAttrTransA ?
m : k) <<
";\n";
255 out <<
SP <<
"int " << OpName <<
"_ldb = " << (
fAttrTransB ? k :
n) <<
";\n";
262 throw std::runtime_error(
"TMVA SOFIE Gemm Op " + OpName +
" Bias tensor has not correct size "
269 out <<
SP <<
"std::copy(" <<
"tensor_" <<
fNC2 <<
", " <<
"tensor_" <<
fNC2 <<
" + " <<
length <<
", " <<
"tensor_" <<
fNY <<
");\n";
274 throw std::runtime_error(
"TMVA SOFIE Gemm Op " + OpName +
" Bias tensor is not present but beta value in Gemm is not zero");
277 if (
fType ==
"float"){
278 out <<
SP <<
"BLAS::sgemm_(&" << OpName <<
"_transB, &" << OpName <<
"_transA, &" << OpName
279 <<
"_n, &" << OpName <<
"_m, &" << OpName <<
"_k, &" << OpName <<
"_alpha, " <<
"tensor_" <<
fNB
280 <<
", &" << OpName <<
"_ldb, " <<
"tensor_" <<
fNA <<
", &" << OpName <<
"_lda, &" << OpName <<
"_beta, " <<
"tensor_" <<
fNY <<
", &"
281 << OpName <<
"_n);\n";
288 std::vector<std::string>
GetBlasRoutines() {
return { std::string(
"Gemm"), std::string(
"Gemv") }; }
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
void AddNeededStdLib(std::string libname)
const ETensorType & GetTensorType(std::string name)
bool IsDynamicTensor(const std::string &name) const
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
std::vector< Dim > GetDynamicTensorShape(std::string name)
bool CheckIfTensorAlreadyExist(std::string tensor_name)
void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector< Dim > shape)
const std::vector< size_t > & GetTensorShape(std::string name)
bool IsInputTensor(const std::string &name) const
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
ROperator_Gemm(float alpha, float beta, int_t transA, int_t transB, std::string nameA, std::string nameB, std::string nameC, std::string nameY)
std::vector< std::vector< U > > DoShapeInference(const std::vector< std::vector< U > > &input)
std::vector< Dim > fShapeY
std::vector< Dim > fShapeA
std::string GenerateInitCode()
std::vector< Dim > fShapeB
std::vector< size_t > fShapeC
std::string Generate(std::string OpName)
std::vector< std::string > GetBlasRoutines()
std::vector< std::vector< Dim > > DynamicShapeInference(const std::vector< std::vector< Dim > > &input)
std::vector< std::vector< size_t > > ShapeInference(std::vector< std::vector< size_t > > input)
void Initialize(RModel &model)
ROperator_Gemm(float alpha, float beta, int_t transA, int_t transB, std::string nameA, std::string nameB, std::string nameY)
std::vector< ETensorType > TypeInference(std::vector< ETensorType > input)
const std::string SP
space used to correctly indent the generated C++ code
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t >, std::vector< size_t >)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations