1#ifndef TMVA_SOFIE_ROPERATOR_CONVTRANSPOSE_I
2#define TMVA_SOFIE_ROPERATOR_CONVTRANSPOSE_I
14namespace Experimental {
19 -> std::vector<std::vector<size_t>>
21 const std::vector<size_t> &inputShape =
input[0];
22 const std::vector<size_t> &weightShape =
input[1];
23 size_t size = inputShape.size();
29 if (fAttrStrides.empty()) {
30 fAttrStrides = std::vector<size_t>(fDim, 1);
32 if (fAttrDilations.empty()) {
33 fAttrDilations = std::vector<size_t>(fDim, 1);
36 if (fAttrKernelShape.empty()) {
37 fAttrKernelShape.resize(fDim);
38 for (
size_t i = 0; i < fDim; i++)
39 fAttrKernelShape[i] = fShapeW[i + 2] + (fAttrDilations[i] - 1) * (fShapeW[i + 2] - 1);
41 if (fAttrOutputPadding.empty())
42 fAttrOutputPadding = std::vector<size_t>(fDim, 0);
48 std::vector<size_t> outShape(
size);
49 outShape[0] = inputShape[0];
50 outShape[1] = weightShape[1] * fAttrGroup;
54 if (fAttrPads.empty() ) {
55 fAttrPads = std::vector<size_t>(2 * fDim, 0);
56 if (fAttrOutputShape.size() == fDim) {
60 std::runtime_error(
"ConvTranspose with output_shape explicitly set not yet supported.");
81 if (fAttrAutopad !=
"NOTSET") {
83 std::runtime_error(
"ConvTranspose with padding SAME_UPPER or SMAE_LOWER not supported");
86 if (fAttrOutputShape.empty()) {
87 fAttrOutputShape.resize(fDim);
88 for (
size_t i = 0; i < fDim; i++) {
90 fAttrOutputShape[i] = fAttrStrides[i] * (inputShape[j] - 1) + fAttrKernelShape[i] + fAttrOutputPadding[i] - fAttrPads[i] - fAttrPads[fDim+i];
96 std::runtime_error(
"ConvTranspose with output_shape explicitly set not yet supported.");
99 for (
size_t i = 0; i < fDim; i++)
100 outShape[i + 2] = fAttrOutputShape[i];
101 std::vector<std::vector<size_t>> ret({outShape});
110 throw std::runtime_error(
"TMVA SOFIE Conv Transpose op Input Tensor " + fNX +
" is not found in model");
113 if (fShapeX.size() < 3 || fShapeX.size() > 5) {
115 throw std::runtime_error(
"TMVA SOFIE Conv Transpose Op input data tensor" + fNX +
116 " is not of 3,4 or 5 dimensions");
118 fDim = fShapeX.size() - 2;
120 throw std::runtime_error(
"TMVA SOFIE Conv op Input weight Tensor " + fNW +
" is not found in model");
123 if (fShapeW.size() < 3 || fShapeW.size() > 5) {
125 throw std::runtime_error(
"TMVA SOFIE Conv Transpose Op input weight tensor" + fNW +
126 " is not of 3,4 or 5 dimensions");
128 fShapeY = ShapeInference({fShapeX, fShapeW})[0];
133 throw std::runtime_error(
"TMVA SOFIE ConvTrans op Input Tensor " + fNB +
" is not found in model");
136 if (fShapeB.size() < 1)
137 throw std::runtime_error(
"TMVA SOFIE ConvTrans op: Bias Tensor has empty shape");
142 bool broadcast_needed = (bsize != ysize);
144 if (broadcast_needed) {
147 if (bsize != fShapeY[1] )
148 throw std::runtime_error(
"TMVA SOFIE ConvTrans op: Bias Tensor has wrong shape: " +
153 if (fType !=
"float")
154 throw std::runtime_error(
"TMVA SOFIE ConvTrans op: Broadcasting for non-float type tensors is not supported");
158 std::shared_ptr<void> new_data_ptr(
159 UTILITY::BroadcastConvBias<float>(
static_cast<float *
>(original_data.get()), bsize, fShapeY),
160 std::default_delete<
float[]>());
164 fNBroadcastedB = fNB;
168 fNBroadcastedB =
"Broadcasted" + fNB;
174 if (fShapeY != fShapeB)
175 throw std::runtime_error(
"TMVA SOFIE ConvTrans op: Broadcasting is not needed but bias has wrong shape" +
177 fNBroadcastedB = fNB;
185 std::stringstream out;
189 if (bsize != ysize && !fNBroadcastedB.empty()) {
192 out << SP << SP <<
"float * data = TMVA::Experimental::SOFIE::UTILITY::BroadcastConvBias<float>(tensor_"
194 out << SP << SP <<
"std::copy(data, data + " <<
ConvertShapeToLength(fShapeY) <<
", tensor_" << fNBroadcastedB <<
");\n";
195 out << SP << SP <<
"delete[] data;\n";
207 size_t kernelSize = 1;
208 size_t inputSize = 1;
209 for (
size_t i = 0; i < fDim; i++) {
210 inputSize *= fShapeX[2+ i];
211 kernelSize *= fAttrKernelShape[i];
214 opName =
"op_" + opName;
215 std::stringstream out;
217 out <<
"std::vector<" << fType <<
"> fVec_" << opName <<
"_f = std::vector<" << fType <<
">("
218 << fShapeW[0] * fShapeW[1] * kernelSize <<
");\n";
220 out <<
"std::vector<" << fType <<
"> fVec_" << opName <<
"_xcol = std::vector<" << fType <<
">("
221 << kernelSize * fShapeW[1] * inputSize <<
");\n";
230 OpName =
"op_" + OpName;
232 if (fShapeX.empty() || fShapeW.empty() || (fNB !=
"" && fShapeB.empty()) || fShapeY.empty()) {
233 throw std::runtime_error(
"TMVA SOFIE Conv Op called to Generate without being initialized first");
236 std::stringstream out;
238 size_t bsize = fShapeX[0];
239 size_t kDepth = (fDim > 2) ? fShapeW[2] : 1;
240 size_t kHeight = (fDim > 1) ? fShapeW[fDim] : 1;
241 size_t kWidth = fShapeW[fDim + 1];
243 size_t iDepth = (fDim > 2) ? fShapeX[2] : 1;
244 size_t iHeight = (fDim > 1) ? fShapeX[fDim] : 1;
245 size_t iWidth = fShapeX[fDim + 1];
247 size_t oDepth = (fDim > 2) ? fShapeY[2] : 1;
248 size_t oHeight = (fDim > 1) ? fShapeY[fDim] : 1;
249 size_t oWidth = fShapeY[fDim + 1];
251 out <<
"\n//---- operator ConvTranspose " << OpName <<
"\n";
255 out << SP << fType <<
" * " << OpName <<
"_f = fVec_" << OpName <<
"_f.data();\n";
257 size_t kernelSize = fAttrKernelShape[0];
259 kernelSize *= fAttrKernelShape[1];
260 out << SP << fType <<
" " << OpName <<
"_f[" << fShapeW[0] * fShapeW[1] * kernelSize <<
"] = {0};\n";
266 size_t id = (fDim > 2) ? fDim - 3 : 2;
267 size_t ih = (fDim > 1) ? fDim - 2 : 1;
268 size_t iw = fDim - 1;
269 size_t wstrideDil = fAttrDilations[iw];
270 size_t hstride = kWidth;
271 size_t hstrideDil = fAttrKernelShape[iw];
273 hstrideDil *= fAttrDilations[ih];
275 size_t dstride = kHeight * kWidth;
276 size_t dstrideDil = fAttrKernelShape[iw];
278 dstrideDil *= fAttrKernelShape[ih];
280 dstrideDil *= fAttrDilations[
id];
281 size_t icstride = kHeight * kWidth * kDepth;
282 size_t icstrideDil = 1;
283 for (
size_t i = 0; i < fDim; i++)
284 icstrideDil *= fAttrKernelShape[i];
285 size_t ocstride = fShapeW[1] * icstride;
286 size_t ocstrideDil = fShapeW[1] * icstrideDil;
289 out << SP <<
"for (std::size_t ic = 0; ic < " << fShapeW[0] <<
"; ic++) {\n";
290 out << SP << SP <<
"for (std::size_t oc = 0; oc < " << fShapeW[1] <<
"; oc++) {\n";
293 out << SP << SP << SP <<
"for (std::size_t kd = 0; kd < " << kDepth <<
"; kd++) {\n";
295 out << SP << SP << SP <<
"for (std::size_t kh = 0; kh < " << kHeight <<
"; kh++) {\n";
296 out << SP << SP << SP << SP <<
"for (std::size_t kw = 0; kw < " << kWidth <<
"; kw++) {\n";
298 out << SP << SP << SP << SP << SP << OpName <<
"_f[ic * " << ocstrideDil <<
" + oc * " << icstrideDil;
300 out <<
" + kd * " << dstrideDil;
302 out <<
" + kh * " << hstrideDil;
303 out <<
" + kw * " << wstrideDil <<
" ] = tensor_" << fNW <<
"[ic * " << ocstride <<
" + oc * " << icstride;
306 out <<
" + kd * " << dstride;
308 out <<
" + kh * " << hstride;
315 out << SP << SP << SP << SP <<
"}\n";
317 out << SP << SP << SP <<
"}\n";
319 out << SP << SP << SP <<
"}\n";
321 out << SP << SP <<
"}\n";
324 out << SP <<
"char " << OpName <<
"_transA = 'N';\n";
325 out << SP <<
"char " << OpName <<
"_transB = 'T';\n";
326 out << SP <<
"int " << OpName <<
"_m = " << iHeight * iWidth * iDepth <<
";\n";
327 out << SP <<
"int " << OpName <<
"_n = " << icstrideDil*fShapeW[1] <<
";\n";
328 out << SP <<
"int " << OpName <<
"_k = " << fShapeW[0] <<
";\n";
329 out << SP <<
"float " << OpName <<
"_alpha = 1.0;\n";
330 out << SP <<
"float " << OpName <<
"_beta = 0.0;\n";
333 out << SP << fType <<
" * " << OpName <<
"_xcol = fVec_" << OpName <<
"_xcol.data();\n";
335 out << SP << fType <<
" " << OpName <<
"_xcol[" << fShapeW[0]*icstrideDil * oDepth * oHeight * oWidth <<
"] = {0};\n";
339 out << SP <<
"for (size_t n = 0; n < " << bsize <<
"; n++) {\n";
349 if (fAttrPads[0] != fAttrPads[1]) {
350 std::cout <<
"TMVA SOFIE Operator Conv: asymmetric padding not supported. Assume an average padding "
352 fAttrPads[0] = (fAttrPads[0] + fAttrPads[1]) / 2;
357 if (fAttrPads[0] != fAttrPads[2] || fAttrPads[1] != fAttrPads[3]) {
358 std::cout <<
"TMVA SOFIE Operator ConvTranspose: asymmetric padding not supported. Assume an average padding "
360 fAttrPads[0] = (fAttrPads[0] + fAttrPads[2]) / 2;
361 fAttrPads[1] = (fAttrPads[1] + fAttrPads[3]) / 2;
365 if (fAttrPads[0] != fAttrPads[3] || fAttrPads[1] != fAttrPads[4] || fAttrPads[2] != fAttrPads[5]) {
366 std::cout <<
"TMVA SOFIE Operator ConvTranspose: asymmetric padding not supported. Assume an average padding "
368 fAttrPads[0] = (fAttrPads[0] + fAttrPads[3]) / 2;
369 fAttrPads[1] = (fAttrPads[1] + fAttrPads[4]) / 2;
370 fAttrPads[2] = (fAttrPads[2] + fAttrPads[5]) / 2;
374 if (fAttrGroup == 1) {
375 out << SP << SP <<
"size_t x_offset = n * " << fShapeX[1] * iDepth * iHeight * iWidth <<
";\n";
376 out << SP << SP <<
"size_t out_offset = n * " << fShapeY[1] * oDepth * oHeight * oWidth <<
";\n";
380 out << SP << SP <<
"BLAS::sgemm_(&" << OpName <<
"_transA, &" << OpName <<
"_transB, &" << OpName <<
"_m, &"
381 << OpName <<
"_n, &" << OpName <<
"_k, &" << OpName <<
"_alpha, "
382 <<
"tensor_" << fNX <<
" + x_offset, &" << OpName <<
"_m,\n";
383 out << SP << SP << SP << OpName <<
"_f, &" << OpName <<
"_n, &" << OpName <<
"_beta, "
384 << OpName <<
"_xcol, &" << OpName <<
"_m);\n";
390 out << SP << SP <<
"TMVA::Experimental::SOFIE::UTILITY::col2im<float>(" << OpName <<
"_xcol,"
393 << fShapeY[1] <<
"," << oHeight <<
"," << oWidth <<
",";
395 out <<
"1, " << fAttrKernelShape[0] <<
",0," << fAttrPads[0] <<
",1," << fAttrStrides[0] <<
",1,"
396 << fAttrDilations[0];
398 out << fAttrKernelShape[0] <<
"," << fAttrKernelShape[1] <<
"," << fAttrPads[0] <<
"," << fAttrPads[1]
399 <<
"," << fAttrStrides[0] <<
"," << fAttrStrides[1] <<
"," << fAttrDilations[0] <<
","
400 << fAttrDilations[1];
401 out <<
", tensor_" << fNY <<
" + out_offset);\n\n ";
404 throw std::runtime_error(
"TMVA SOFIE 3D Conv Transpose not yet supported");
405 out << SP << SP <<
"TMVA::Experimental::SOFIE::UTILITY::Im2col_3d<float>(tensor_" << fNX
410 << fShapeX[1] <<
"," << oDepth <<
"," << oHeight <<
"," << oWidth <<
"," << fAttrKernelShape[0] <<
","
411 << fAttrKernelShape[1] <<
"," << fAttrKernelShape[2] <<
"," << fAttrPads[0] <<
"," << fAttrPads[1] <<
","
412 << fAttrPads[2] <<
"," << fAttrStrides[0] <<
"," << fAttrStrides[1] <<
"," << fAttrStrides[2] <<
","
413 << fAttrDilations[0] <<
"," << fAttrDilations[1] <<
"," << fAttrDilations[2] <<
"," << OpName
426 out << SP << SP <<
"for (size_t g = 0; g < " << fAttrGroup <<
"; g++) {\n";
427 out << SP << SP <<
"size_t x_offset = n * " << fShapeX[1] * iHeight * iWidth <<
" + g * "
428 << fShapeX[1] * iHeight * iWidth / fAttrGroup <<
";\n ";
429 out << SP << SP <<
"size_t out_offset = n * " << fShapeY[1] * oHeight * oWidth <<
" + g * "
430 << fShapeY[1] * oHeight * oWidth / fAttrGroup <<
";\n ";
433 out << SP << SP <<
"BLAS::sgemm_(&" << OpName <<
"_transA, &" << OpName <<
"_transB, &" << OpName <<
"_m, &"
434 << OpName <<
"_n, &" << OpName <<
"_k, &" << OpName <<
"_alpha, "
435 <<
"tensor_" << fNX <<
" + x_offset, &" << OpName
437 out << SP << SP << SP << OpName <<
"_f, &" << OpName <<
"_n, &" << OpName
438 <<
"_beta, " << OpName <<
"_xcol , &" << OpName <<
"_m);\n";
441 out << SP << SP <<
"TMVA::Experimental::SOFIE::UTILITY::col2im<float>(" << OpName <<
"_xcol,"
444 << fShapeY[1] <<
"," << oHeight <<
"," << oWidth <<
",";
446 out <<
"1, " << fAttrKernelShape[0] <<
",0," << fAttrPads[0] <<
",1," << fAttrStrides[0] <<
",1,"
447 << fAttrDilations[0];
449 out << fAttrKernelShape[0] <<
"," << fAttrKernelShape[1] <<
"," << fAttrPads[0] <<
"," << fAttrPads[1]
450 <<
"," << fAttrStrides[0] <<
"," << fAttrStrides[1] <<
"," << fAttrDilations[0] <<
","
451 << fAttrDilations[1];
452 out <<
", tensor_" << fNY <<
" + out_offset);\n\n ";
455 throw std::runtime_error(
"TMVA SOFIE 3D Conv Transpose not yet supported");
457 out << SP << SP <<
"TMVA::Experimental::SOFIE::UTILITY::Im2col_3d<float>(tensor_" << fNX
462 << fShapeX[1] <<
"," << oDepth <<
"," << oHeight <<
"," << oWidth <<
"," << fAttrKernelShape[0] <<
","
463 << fAttrKernelShape[1] <<
"," << fAttrKernelShape[2] <<
"," << fAttrPads[0] <<
"," << fAttrPads[1] <<
","
464 << fAttrPads[2] <<
"," << fAttrStrides[0] <<
"," << fAttrStrides[1] <<
"," << fAttrStrides[2] <<
","
465 << fAttrDilations[0] <<
"," << fAttrDilations[1] <<
"," << fAttrDilations[2] <<
"," << OpName
479 out << SP << SP <<
"}\n";
484 if (fNBroadcastedB !=
"") {
485 out << SP <<
"int " << OpName <<
"_size = " << fShapeY[0] * fShapeY[1] * oDepth * oHeight * oWidth <<
";\n";
486 out << SP <<
"float " << OpName <<
"_gamma = 1.0;\n";
487 out << SP <<
"int " << OpName <<
"_incx = 1;\n";
488 out << SP <<
"int " << OpName <<
"_incy = 1;\n";
490 out << SP <<
"BLAS::saxpy_(&" << OpName <<
"_size, &" << OpName <<
"_gamma, tensor_" << fNBroadcastedB <<
", &"
491 << OpName <<
"_incx, tensor_" << fNY <<
", &" << OpName <<
"_incy);\n";
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize id
const ETensorType & GetTensorType(std::string name)
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
bool CheckIfTensorAlreadyExist(std::string tensor_name)
const std::vector< size_t > & GetTensorShape(std::string name)
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
void Initialize(RModel &) override
Initialize the model.
std::string GenerateSessionMembersCode(std::string) override
Generate code for Session data members (e.g.
std::string GenerateInitCode() override
Generate code for initializing the op.
std::string Generate(std::string opName) override
Generate the inference code.
std::vector< std::vector< size_t > > ShapeInference(std::vector< std::vector< size_t > >) override
Infers the shape of the input tensors.
std::string ConvertShapeToString(std::vector< size_t > shape)
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations