1#ifndef TMVA_SOFIE_ROPERATOR_LAYERNORMALIZATION
2#define TMVA_SOFIE_ROPERATOR_LAYERNORMALIZATION
11namespace Experimental {
56 const std::string &nameScale,
const std::string &nameB,
const std::string &nameY,
57 const std::string &nameMean,
const std::string &nameInvStdDev)
59 fNScale(UTILITY::Clean_name(nameScale)),
fNB(UTILITY::Clean_name(nameB)),
60 fNY(UTILITY::Clean_name(nameY)),
fNMean(UTILITY::Clean_name(nameMean)),
fNInvStdDev(UTILITY::Clean_name(nameInvStdDev))
71 throw std::runtime_error(
"TMVA::SOFIE - Tensor " +
fNX +
" not found.");
124 if (isDynamic || lengthB <
static_cast<size_t>(std::stoi(
fLength))) {
134 std::stringstream out;
136 out <<
SP <<
"// Broadcasting the bias of LayerNormalization op\n";
138 out <<
SP <<
SP <<
"float* data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_";
141 out <<
SP <<
"delete[] data;\n";
149 OpName =
"op_" + OpName;
151 throw std::runtime_error(
"TMVA::SOFIE LayerNormalization operator " + OpName +
152 " called to generate without being initialized first.");
155 throw std::runtime_error(
"TMVA::SOFIE LayerNormalization operator not "
156 "implemented for input tensor of size > 5.");
159 std::stringstream out;
161 out <<
"//---- Layer Normalization operator " << OpName <<
"\n";
164 out <<
SP <<
"std::vector<size_t> " << OpName <<
"_InputShape ({";
165 for (
size_t i = 0; i <
fSize; i++) {
172 std::string inputShape = OpName +
"_InputShape";
175 std::string InputIndex =
"axis_0 * " + strides[0].GetVal();
176 for (
size_t i = 1; i <
fSize; i++) {
177 InputIndex +=
" + axis_" + std::to_string(i) +
" * " + strides[i].GetVal();
181 std::string axesIndex =
"axis_" + std::to_string(0) +
" * " + axesStrides[0].GetVal();
182 for (
size_t i = 1; i <
fAxis; i++) {
183 axesIndex +=
" + axis_" + std::to_string(i) +
" * " + axesStrides[i].GetVal();
187 std::string normalizedIndex =
"axis_" + std::to_string(
fAxis) +
" * " + normalizedStrides[0].GetVal();
189 normalizedIndex +=
" + axis_" + std::to_string(i) +
" * " + normalizedStrides[i -
fAxis].GetVal();
194 out <<
SP <<
"for (size_t i = 0; i < " <<
fLength <<
"; i++) {\n";
195 out <<
SP <<
SP <<
"tensor_" <<
fNCastedX <<
"[i] = " <<
"static_cast<float>(tensor_" <<
fNX;
200 out <<
SP <<
"// Compute the mean\n";
202 for (
size_t i = 0; i <
fAxis; i++) {
203 std::string iIdx =
"axis_" + std::to_string(i);
204 out <<
SP <<
"for (size_t " << iIdx <<
" = 0; " << iIdx <<
" < " << inputShape;
205 out <<
"[" << i <<
"]; " << iIdx <<
"++) {\n";
207 out <<
SP <<
SP <<
fType <<
" sum = 0.;\n";
210 std::string jIdx =
"axis_" + std::to_string(j);
211 out <<
SP <<
SP <<
"for (size_t " << jIdx <<
" = 0; " << jIdx <<
" < " << inputShape;
212 out <<
"[" << j <<
"]; " << jIdx <<
"++) {\n";
214 out <<
SP <<
SP <<
SP <<
"sum += tensor_" <<
fNX <<
"[" << InputIndex <<
"];\n";
216 out <<
SP <<
SP <<
"}\n";
218 out <<
SP <<
SP <<
"tensor_" <<
fNMean <<
"[" << axesIndex <<
"] = sum / " <<
fType <<
"(";
224 out <<
SP <<
"// Compute the inverse Standard Deviation\n";
226 for (
size_t i = 0; i <
fAxis; i++) {
227 std::string iIdx =
"axis_" + std::to_string(i);
228 out <<
SP <<
"for (size_t " << iIdx <<
" = 0; " << iIdx <<
" < " << inputShape;
229 out <<
"[" << i <<
"]; " << iIdx <<
"++){\n";
232 out <<
SP <<
SP <<
fType <<
" sum = 0.;\n";
235 std::string jIdx =
"axis_" + std::to_string(j);
236 out <<
SP <<
SP <<
"for (size_t " << jIdx <<
" = 0; " << jIdx <<
" < " << inputShape;
237 out <<
"[" << j <<
"]; " << jIdx <<
"++){\n";
239 out <<
SP <<
SP <<
SP <<
"sum += std::pow(tensor_" <<
fNX <<
"[" << InputIndex <<
"] - tensor_";
240 out <<
fNMean <<
"[" << axesIndex <<
"], 2);\n";
242 out <<
SP <<
SP <<
"}\n";
244 out <<
SP <<
SP <<
"tensor_" <<
fNInvStdDev <<
"[" << axesIndex <<
"] = 1 / std::sqrt(";
246 for (
size_t i = 0; i <
fAxis; i++) {
251 out <<
"// NormalizedX = InvStdDev * (CastedX - Mean)\n";
252 for (
size_t i = 0; i <
fAxis; i++) {
253 std::string iIdx =
"axis_" + std::to_string(i);
254 out <<
SP <<
"for (size_t " << iIdx <<
" = 0; " << iIdx <<
" < " << inputShape;
255 out <<
"[" << i <<
"]; " << iIdx <<
"++){\n";
258 std::string jIdx =
"axis_" + std::to_string(j);
259 out <<
SP <<
SP <<
"for (size_t " << jIdx <<
" = 0; " << jIdx <<
" < " << inputShape;
260 out <<
"[" << j <<
"]; " << jIdx <<
"++){\n";
264 out <<
"] - tensor_" <<
fNMean <<
"[" << axesIndex <<
"])\n";
266 out <<
SP <<
SP <<
"}\n";
271 out <<
"// Y = Scale o NormalizedX";
272 for (
size_t i = 0; i <
fAxis; i++) {
273 std::string iIdx =
"axis_" + std::to_string(i);
274 out <<
SP <<
"for (size_t " << iIdx <<
" = 0; " << iIdx <<
" < " << inputShape;
275 out <<
"[" << i <<
"]; " << iIdx <<
"++){\n";
278 std::string jIdx =
"axis_" + std::to_string(j);
279 out <<
SP <<
SP <<
"for (size_t " << jIdx <<
" = 0; " << jIdx <<
" < " << inputShape;
280 out <<
"[" << j <<
"]; " << jIdx <<
"++){\n";
282 out <<
SP <<
SP <<
SP <<
"tensor_" <<
fNY <<
"[" << InputIndex <<
"] = tensor_" <<
fNScale;
283 out <<
"[" << axesIndex <<
"] * static_cast<" <<
fType <<
">(tensor_" <<
fNCastedX <<
"[" << InputIndex;
286 out <<
SP <<
SP <<
"}\n";
292 out <<
SP <<
"// Y = Scale o InvStdDev (X - Mean)\n";
293 for (
size_t i = 0; i <
fAxis; i++) {
294 std::string iIdx =
"axis_" + std::to_string(i);
295 out <<
SP <<
"for (size_t " << iIdx <<
" = 0; " << iIdx <<
" < " << inputShape;
296 out <<
"[" << i <<
"]; " << iIdx <<
"++){\n";
299 std::string jIdx =
"axis_" + std::to_string(j);
300 out <<
SP <<
SP <<
"for (size_t " << jIdx <<
" = 0; " << jIdx <<
" < " << inputShape;
301 out <<
"[" << j <<
"]; " << jIdx <<
"++){\n";
303 out <<
SP <<
SP <<
SP <<
"tensor_" <<
fNY <<
"[" << InputIndex <<
"] = tensor_" <<
fNScale;
304 out <<
"[" << normalizedIndex <<
"] * tensor_" <<
fNInvStdDev <<
"[" << axesIndex;
305 out <<
"] * (tensor_" <<
fNX <<
"[" << InputIndex <<
"] - tensor_" <<
fNMean <<
"[";
306 out << axesIndex <<
"]);\n";
308 out <<
SP <<
SP <<
"}\n";
317 out <<
SP <<
"// Add the bias to Y\n";
318 out <<
SP <<
"int " << OpName <<
"_n = " <<
fLength <<
";\n";
319 out <<
SP <<
"float " << OpName <<
"_alpha = 1.;\n";
320 out <<
SP <<
"int " << OpName <<
"_inc = 1;\n";
321 out <<
SP <<
"BLAS::saxpy_(&" << OpName <<
"_n, &" << OpName <<
"_alpha, " << Bias <<
", &";
322 out << OpName <<
"_inc, " <<
"tensor_" <<
fNY <<
", &" << OpName <<
"_inc);\n";
328 std::vector<std::string>
GetBlasRoutines()
override {
return { std::string(
"Axpy") }; }
330 std::vector<std::string>
GetStdLibs()
override {
return { std::string(
"cmath") }; }
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
void AddNeededStdLib(std::string libname)
const ETensorType & GetTensorType(std::string name)
bool IsDynamicTensor(const std::string &name) const
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
std::vector< Dim > GetDynamicTensorShape(std::string name)
bool CheckIfTensorAlreadyExist(std::string tensor_name)
const std::vector< size_t > & GetTensorShape(std::string name)
std::vector< std::string > GetBlasRoutines() override
std::vector< Dim > fShapeX
ROperator_LayerNormalization()
std::vector< Dim > fShapeInvStdDev
ROperator_LayerNormalization(int axis, float epsilon, size_t stashType, const std::string &nameX, const std::string &nameScale, const std::string &nameB, const std::string &nameY, const std::string &nameMean, const std::string &nameInvStdDev)
std::string fNormalizedLength
std::vector< std::string > GetStdLibs() override
std::string GenerateInitCode() override
std::vector< Dim > fNormalizedShape
std::vector< Dim > fShapeScale
void Initialize(RModel &model) override
std::vector< Dim > fShapeY
std::vector< size_t > fShapeB
std::string Generate(std::string OpName) override
std::vector< std::vector< size_t > > ShapeInference(std::vector< std::vector< size_t > > input) override
std::string fNBroadcastedB
std::vector< ETensorType > TypeInference(std::vector< ETensorType > input) override
std::vector< Dim > fShapeMean
std::string fNNormalizedX
std::vector< Dim > fAxesShape
const std::string SP
space used to correctly indent the generated C++ code
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
ETensorType ConvertStringToType(std::string type)
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations