1#ifndef TMVA_SOFIE_ROPERATOR_GEMM
2#define TMVA_SOFIE_ROPERATOR_GEMM
17namespace Experimental{
48 fNB(UTILITY::Clean_name(nameB)),
fNY(UTILITY::Clean_name(nameY)) {
50 if (std::is_same<T, float>::value) {
53 throw std::runtime_error(
"TMVA SOFIE Encountered unsupported type parsing a gemm operator");
57 ROperator_Gemm(
float alpha,
float beta,
int_t transA,
int_t transB, std::string nameA, std::string nameB, std::string nameC, std::string nameY):
59 fNB(UTILITY::Clean_name(nameB)),
fNC(UTILITY::Clean_name(nameC)),
fNY(UTILITY::Clean_name(nameY)) {
61 if (std::is_same<T, float>::value) {
64 throw std::runtime_error(
"TMVA SOFIE Encountered unsupported type parsing a gemm operator");
73 std::vector<std::vector<size_t>>
ShapeInference(std::vector<std::vector<size_t>> input){
74 if (input.size() > 3)
throw std::runtime_error(
"TMVA SOFIE Gemm Op Shape Inference only need 2 or 3 input tensor");
77 throw std::runtime_error(
"TMVA SOFIE Gemm Op Shape Inference only accept input tensor with 2 dimensions");
80 std::vector<std::vector<size_t>> ret;
81 if (input.size() == 3){
82 ret.push_back(input[2]);
85 std::vector<size_t> s_a(input[0]);
86 std::vector<size_t> s_b(input[1]);
88 std::reverse(s_a.begin(), s_a.end());
91 std::reverse(s_b.begin(), s_b.end());
93 std::vector<size_t> s_y(2);
106 throw std::runtime_error(
"TMVA SOFIE Gemm Op Input Tensor " +
fNA +
" or " +
fNB +
" is not found in model");
110 throw std::runtime_error(
"TMVA SOFIE Gemm Op Input Tensor" +
fNC +
" is not found in model");
115 throw std::runtime_error(
"TMVA SOFIE Gemm Op Input Tensor" +
fNA +
126 bool broadcast_needed =
false;
130 broadcast_needed =
true;
137 if (broadcast_needed) {
140 if (
fType ==
"float") {
142 std::shared_ptr<void> new_data_ptr(UTILITY::Unidirectional_broadcast<float>(
144 std::default_delete<
float[]>());
168 std::stringstream out;
175 std::string original_bias_tensor =
"tensor_" +
fNC;
176 std::string new_bias_tensor =
"tensor_" +
fNC2;
177 out <<
" float * newData_ptr = TMVA::Experimental::SOFIE::UTILITY::Unidirectional_broadcast<float>("
178 << original_bias_tensor <<
", oldShape, newShape);\n";
180 out <<
" std::copy(newData_ptr, newData_ptr + " << length <<
", " << new_bias_tensor <<
");\n";
181 out <<
" delete [] newData_ptr;\n";
188 OpName =
"op_" + OpName;
191 throw std::runtime_error(
"TMVA SOFIE Gemm Op called to Generate without being initialized first");
193 std::stringstream out;
194 out <<
"\n//--------- Gemm\n";
195 out <<
SP <<
"char " << OpName <<
"_transA = " << (
fAttrTransA ?
"\'t\'" :
"\'n\'") <<
";\n";
196 out <<
SP <<
"char " << OpName <<
"_transB = " << (
fAttrTransB ?
"\'t\'" :
"\'n\'") <<
";\n";
200 out <<
SP <<
"int " << OpName <<
"_m = " <<
m <<
";\n";
201 out <<
SP <<
"int " << OpName <<
"_n = " <<
n <<
";\n";
202 out <<
SP <<
"int " << OpName <<
"_k = " << k <<
";\n";
203 out <<
SP <<
"float " << OpName <<
"_alpha = " << std::setprecision(std::numeric_limits<float>::max_digits10) <<
fAttrAlpha <<
";\n";
204 out <<
SP <<
"float " << OpName <<
"_beta = " << std::setprecision(std::numeric_limits<float>::max_digits10) <<
fAttrBeta <<
";\n";
205 out <<
SP <<
"int " << OpName <<
"_lda = " << (
fAttrTransA ?
m : k) <<
";\n";
206 out <<
SP <<
"int " << OpName <<
"_ldb = " << (
fAttrTransB ? k :
n) <<
";\n";
212 out <<
SP <<
"std::copy(" <<
"tensor_" <<
fNC2 <<
", " <<
"tensor_" <<
fNC2 <<
" + " << length <<
", " <<
"tensor_" <<
fNY <<
");\n";
214 if (
fType ==
"float"){
215 out <<
SP <<
"BLAS::sgemm_(&" << OpName <<
"_transB, &" << OpName <<
"_transA, &" << OpName
216 <<
"_n, &" << OpName <<
"_m, &" << OpName <<
"_k, &" << OpName <<
"_alpha, " <<
"tensor_" <<
fNB
217 <<
", &" << OpName <<
"_ldb, " <<
"tensor_" <<
fNA <<
", &" << OpName <<
"_lda, &" << OpName <<
"_beta, " <<
"tensor_" <<
fNY <<
", &"
218 << OpName <<
"_n);\n";
const ETensorType & GetTensorType(std::string name)
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape)
bool CheckIfTensorAlreadyExist(std::string tensor_name)
void AddNeededStdLib(std::string libname)
const std::vector< size_t > & GetTensorShape(std::string name)
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
ROperator_Gemm(float alpha, float beta, int_t transA, int_t transB, std::string nameA, std::string nameB, std::string nameC, std::string nameY)
std::vector< size_t > fShapeY
std::string GenerateInitCode()
std::vector< size_t > fShapeC
std::string Generate(std::string OpName)
std::vector< std::vector< size_t > > ShapeInference(std::vector< std::vector< size_t > > input)
void Initialize(RModel &model)
std::vector< size_t > fShapeA
ROperator_Gemm(float alpha, float beta, int_t transA, int_t transB, std::string nameA, std::string nameB, std::string nameY)
std::vector< size_t > fShapeB
std::vector< ETensorType > TypeInference(std::vector< ETensorType > input)
const std::string SP
space used to correctly indent the generated C++ code
std::string ConvertShapeToString(std::vector< size_t > shape)
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations