10 std::vector<Dim> fshape(shape.size());
11 for (
size_t i =0; i < shape.size(); i++){
12 fshape[i].dim = shape[i];
18 std::size_t fLength = 1;
19 for (
auto& dim: shape) fLength *= dim;
35 if(
type ==
"float32" ||
type ==
"Float"){
44 std::stringstream out;
46 for (
size_t i = 0; i < shape.size(); i++) {
48 if (i < shape.size()-1) out <<
" , ";
56static inline void copy_vector_data(
int_t no_of_copies,
int_t input_size, T* input, T* target){
57 std::memcpy(target, input, input_size *
sizeof(T));
58 int_t already_copied = 1;
60 while (already_copied * 2 <= no_of_copies){
61 std::memcpy(target + already_copied * input_size, target, already_copied * input_size *
sizeof(T));
65 if (already_copied < no_of_copies){
66 std::memcpy(target + already_copied * input_size, target, (no_of_copies - already_copied) * input_size *
sizeof(T));
75 int original_length = 1;
76 int target_length = 1;
77 for (
size_t i = 0; i < original_shape.size(); i++){
78 original_length *= original_shape[i];
80 for (
size_t i = 0; i < target_shape.size(); i++){
81 target_length *= target_shape[i];
83 if (original_shape.size() > target_shape.size()) {
86 throw std::runtime_error(
87 "TMVA::SOFIE Error in Broadcasting Tensor : original array has more dimensions than target shape," + originalShape +
", " + targetShape);
91 std::vector<size_t> current_shape(original_shape);
92 auto it = current_shape.begin();
93 while (current_shape.size() < target_shape.size()) {
94 it = current_shape.insert(it, 1);
107 T* new_datavector =
new T[target_length];
108 std::memcpy(new_datavector, original_data, original_length *
sizeof(T));
110 for (
int dim = (
int) target_shape.size() - 1; dim >= 0; dim--){
111 if (current_shape[dim] != target_shape[dim]){
112 if (current_shape[dim] != 1) {
115 throw std::runtime_error (
"TMVA::SOFIE Error in Broadcasting Tensor at least one dimension to be broadcast of the original array is not 1, " + originalShape +
", " + targetShape);
117 int_t group_size = 1;
118 int_t no_of_groups = 1;
119 int_t no_of_copies = target_shape[dim];
121 for (
size_t i = dim + 1; i < target_shape.size(); i++){
122 group_size *= current_shape[i];
124 for (
int i = 0; i < dim; i++){
125 no_of_groups *= current_shape[i];
128 for (
int curr_group = no_of_groups - 1; curr_group >= 0; curr_group--){
129 copy_vector_data<T>(no_of_copies, group_size, new_datavector + curr_group * group_size,new_datavector + curr_group * group_size * no_of_copies);
132 current_shape[dim] = target_shape[dim];
135 return new_datavector;
139 std::string s (input_tensor_name);
140 s.erase(std::remove_if(s.begin(), s.end(), [](
char const&
c ) ->
bool { return !std::isalnum(c); } ), s.end());
std::string Clean_name(std::string input_tensor_name)
T * Unidirectional_broadcast(const T *original_data, const std::vector< size_t > original_shape, const std::vector< size_t > target_shape)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertTypeToString(ETensorType type)
ETensorType ConvertStringToType(std::string type)
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations