1#ifndef TMVA_SOFIE_ROPERATOR_GEMM
2#define TMVA_SOFIE_ROPERATOR_GEMM
17namespace Experimental{
49 fNB(UTILITY::Clean_name(nameB)),
fNY(UTILITY::Clean_name(nameY))
52 static_assert(std::is_same_v<T, float>,
53 "TMVA::SOFIE - Unsupported type parsing a Gemm operator");
56 ROperator_Gemm(
float alpha,
float beta,
int_t transA,
int_t transB, std::string nameA, std::string nameB, std::string nameC, std::string nameY):
58 fNB(UTILITY::Clean_name(nameB)),
fNC(UTILITY::Clean_name(nameC)),
fNY(UTILITY::Clean_name(nameY))
61 static_assert(std::is_same_v<T, float>,
62 "TMVA::SOFIE - Unsupported type parsing a Gemm operator");
72 if (
input.size() > 3)
throw std::runtime_error(
"TMVA SOFIE Gemm Op Shape Inference only need 2 or 3 input tensor");
77 throw std::runtime_error(
"TMVA SOFIE Gemm Op Shape Inference only accept input tensor with >=2 dimensions");
81 std::vector<std::vector<U>> ret;
83 if (
input.size() == 3){
84 ret.push_back(
input[2]);
88 int ioffset =
input[0].size()-2;
90 std::vector<U> s_a(
input[0].begin() + ioffset,
input[0].begin() + ioffset + 2);
91 std::vector<U> s_b(
input[1].begin() + ioffset,
input[1].begin() + ioffset + 2);
94 std::reverse(s_a.begin(), s_a.end());
97 std::reverse(s_b.begin(), s_b.end());
103 for (
size_t i = 0; i <
input[0].size()-2; i++)
104 s_y.push_back(
input[0][i]);
107 s_y.push_back(s_a[0]);
108 s_y.push_back(s_b[1]);
114 return DoShapeInference<size_t>(
input);
117 return DoShapeInference<Dim>(
input);
126 throw std::runtime_error(
"TMVA SOFIE Gemm Op Input Tensor " +
fNA +
" or " +
fNB +
" is not found in model");
130 throw std::runtime_error(
"TMVA SOFIE Gemm Op Input Tensor" +
fNC +
" is not found in model");
141 bool appendOne =
false;
159 throw std::runtime_error(
"TMVA SOFIE Gemm Op Input Tensors have not compatible shapes. A " +
163 std::vector<size_t> shapeY;
166 if (shapeY.empty()) {
175 throw std::runtime_error(
"TMVA SOFIE Gemm Op Input Tensor" +
fNC +
" is dynamic and is not supported");
187 if (broadcast_needed) {
191 throw std::runtime_error(
"TMVA SOFIE Gemm Op: dynamic tensors not supported without a session");
195 if (
fType ==
"float") {
196 std::shared_ptr<void> new_data_ptr(UTILITY::UnidirectionalBroadcast<float>(
197 static_cast<float *
>(original_data.get()),
fShapeC, targetShape),
198 std::default_delete<
float[]>());
221 shapeY.erase(shapeY.begin());
230 std::cout <<
"Gemm (or MatMul) " <<
" ---> " <<
fNY <<
" shape ";
243 std::stringstream out;
251 out <<
"//--- broadcast bias tensor " <<
fNC <<
"for Gemm op\n";
253 out <<
" float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_"
256 out <<
SP <<
SP <<
"std::copy(data, data + " <<
length <<
", tensor_" <<
fNC2 <<
");\n";
257 out <<
SP <<
SP <<
"delete [] data;\n";
264 opName =
"op_" + opName;
267 throw std::runtime_error(
"TMVA SOFIE Gemm Op called to Generate without being initialized first");
269 std::stringstream out;
270 out <<
"\n//--------- Gemm\n";
271 out <<
SP <<
"char " << opName <<
"_transA = " << (
fAttrTransA ?
"\'t\'" :
"\'n\'") <<
";\n";
272 out <<
SP <<
"char " << opName <<
"_transB = " << (
fAttrTransB ?
"\'t\'" :
"\'n\'") <<
";\n";
277 if (dimA != dimB || dimA != dimY) {
278 throw std::runtime_error(
"TMVA SOFIE Gemm(MatMul) has invalid shape for inputs or output");
285 for (int64_t i = 0; i < dimY-2; i++) {
291 out <<
SP <<
"int " << opName <<
"_m = " <<
m <<
";\n";
292 out <<
SP <<
"int " << opName <<
"_n = " <<
n <<
";\n";
293 out <<
SP <<
"int " << opName <<
"_k = " << k <<
";\n";
294 out <<
SP <<
"float " << opName <<
"_alpha = " << std::setprecision(std::numeric_limits<float>::max_digits10) <<
fAttrAlpha <<
";\n";
295 out <<
SP <<
"float " << opName <<
"_beta = " << std::setprecision(std::numeric_limits<float>::max_digits10) <<
fAttrBeta <<
";\n";
296 out <<
SP <<
"int " << opName <<
"_lda = " << (
fAttrTransA ?
m : k) <<
";\n";
297 out <<
SP <<
"int " << opName <<
"_ldb = " << (
fAttrTransB ? k :
n) <<
";\n";
306 throw std::runtime_error(
"TMVA SOFIE Gemm Op " + opName +
" Bias tensor has not correct size "
317 throw std::runtime_error(
"TMVA SOFIE Gemm Op " + opName +
" Bias tensor is not present but beta value in Gemm is not zero");
323 bool doStackMul = dimY > 2 && (
fIsDynamic || std::stoi(lengthExtra) > 1);
325 out <<
SP <<
"size_t " << opName <<
"_yoffset = 0;\n";
326 out <<
SP <<
"for (int i = 0; i < " << lengthExtra <<
"; i++){\n";
331 out <<
SP <<
"std::copy(" <<
"tensor_" <<
fNC2 <<
", " <<
"tensor_" <<
fNC2 <<
" + " << lengthGemm <<
", "
333 if (doStackMul) out <<
" + " << opName <<
"_yoffset";
338 if (
fType ==
"float"){
340 out <<
SP <<
"BLAS::sgemm_(&" << opName <<
"_transB, &" << opName <<
"_transA, &" << opName
341 <<
"_n, &" << opName <<
"_m, &" << opName <<
"_k, &" << opName <<
"_alpha, " <<
"tensor_" <<
fNB
342 <<
", &" << opName <<
"_ldb, " <<
"tensor_" <<
fNA <<
", &" << opName <<
"_lda, &" << opName <<
"_beta, "
344 if (doStackMul) out <<
" + " << opName <<
"_yoffset";
345 out <<
", &" << opName <<
"_n);\n";
349 out <<
SP <<
SP << opName <<
"_yoffset += " << lengthGemm <<
";\n";
356 std::vector<std::string>
GetBlasRoutines() {
return { std::string(
"Gemm"), std::string(
"Gemv") }; }
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
void AddNeededStdLib(std::string libname)
const ETensorType & GetTensorType(std::string name)
bool IsDynamicTensor(const std::string &name) const
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
std::vector< Dim > GetDynamicTensorShape(std::string name)
bool CheckIfTensorAlreadyExist(std::string tensor_name)
void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector< Dim > shape)
const std::vector< size_t > & GetTensorShape(std::string name)
bool IsInputTensor(const std::string &name) const
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
std::string Generate(std::string opName)
ROperator_Gemm(float alpha, float beta, int_t transA, int_t transB, std::string nameA, std::string nameB, std::string nameC, std::string nameY)
std::vector< std::vector< U > > DoShapeInference(const std::vector< std::vector< U > > &input)
std::vector< Dim > fShapeY
std::vector< Dim > fShapeA
std::string GenerateInitCode()
std::vector< Dim > fShapeB
std::vector< size_t > fShapeC
std::vector< std::string > GetBlasRoutines()
std::vector< std::vector< Dim > > DynamicShapeInference(const std::vector< std::vector< Dim > > &input)
std::vector< std::vector< size_t > > ShapeInference(std::vector< std::vector< size_t > > input)
void Initialize(RModel &model)
ROperator_Gemm(float alpha, float beta, int_t transA, int_t transB, std::string nameA, std::string nameB, std::string nameY)
std::vector< ETensorType > TypeInference(std::vector< ETensorType > input)
const std::string SP
space used to correctly indent the generated C++ code
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t >, std::vector< size_t >)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations