Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RTensor.hxx
Go to the documentation of this file.
1#ifndef TMVA_RTENSOR
2#define TMVA_RTENSOR
3
4#include <vector>
5#include <cstddef> // std::size_t
6#include <stdexcept> // std::runtime_error
7#include <sstream> // std::stringstream
8#include <memory> // std::shared_ptr
9#include <type_traits> // std::is_convertible
10#include <algorithm> // std::reverse
11#include <iterator> // std::random_access_iterator_tag
12
13namespace TMVA {
14namespace Experimental {
15
16/// Memory layout type
17enum class MemoryLayout : uint8_t {
18 RowMajor = 0x01,
19 ColumnMajor = 0x02
20};
21
22namespace Internal {
23
24/// \brief Get size of tensor from shape vector
25/// \param[in] shape Shape vector
26/// \return Size of contiguous memory
27template <typename T>
28inline std::size_t GetSizeFromShape(const T &shape)
29{
30 if (shape.size() == 0)
31 return 0;
32 std::size_t size = 1;
33 for (auto &s : shape)
34 size *= s;
35 return size;
36}
37
38/// \brief Compute strides from shape vector.
39/// \param[in] shape Shape vector
40/// \param[in] layout Memory layout
41/// \return Size of contiguous memory
42///
43/// This information is needed for the multi-dimensional indexing. See here:
44/// https://en.wikipedia.org/wiki/Row-_and_column-major_order
45/// https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.strides.html
46template <typename T>
47inline std::vector<std::size_t> ComputeStridesFromShape(const T &shape, MemoryLayout layout)
48{
49 const auto size = shape.size();
50 T strides(size);
51 if (layout == MemoryLayout::RowMajor) {
52 for (std::size_t i = 0; i < size; i++) {
53 if (i == 0) {
54 strides[size - 1 - i] = 1;
55 } else {
56 strides[size - 1 - i] = strides[size - 1 - i + 1] * shape[size - 1 - i + 1];
57 }
58 }
59 } else if (layout == MemoryLayout::ColumnMajor) {
60 for (std::size_t i = 0; i < size; i++) {
61 if (i == 0) {
62 strides[i] = 1;
63 } else {
64 strides[i] = strides[i - 1] * shape[i - 1];
65 }
66 }
67 } else {
68 std::stringstream ss;
69 ss << "Memory layout type is not valid for calculating strides.";
70 throw std::runtime_error(ss.str());
71 }
72 return strides;
73}
74
75/// \brief Compute indices from global index
76/// \param[in] shape Shape vector
77/// \param[in] idx Global index
78/// \param[in] layout Memory layout
79/// \return Indice vector
80template <typename T>
81inline T ComputeIndicesFromGlobalIndex(const T& shape, MemoryLayout layout, const typename T::value_type idx)
82{
83 const auto size = shape.size();
84 auto strides = ComputeStridesFromShape(shape, layout);
85 T indices(size);
86 auto r = idx;
87 for (std::size_t i = 0; i < size; i++) {
88 indices[i] = int(r / strides[i]);
89 r = r % strides[i];
90 }
91 return indices;
92}
93
94/// \brief Compute global index from indices
95/// \param[in] strides Strides vector
96/// \param[in] idx Indice vector
97/// \return Global index
98template <typename U, typename V>
99inline std::size_t ComputeGlobalIndex(const U& strides, const V& idx)
100{
101 std::size_t globalIndex = 0;
102 const auto size = idx.size();
103 for (std::size_t i = 0; i < size; i++) {
104 globalIndex += strides[size - 1 - i] * idx[size - 1 - i];
105 }
106 return globalIndex;
107}
108
109/// \brief Type checking for all types of a parameter pack, e.g., used in combination with std::is_convertible
110template <class... Ts>
111struct and_types : std::true_type {
112};
113
114template <class T0, class... Ts>
115struct and_types<T0, Ts...> : std::integral_constant<bool, T0() && and_types<Ts...>()> {
116};
117
118/// \brief Copy slice of a tensor recursively from here to there
119/// \param[in] here Source tensor
120/// \param[in] there Target tensor (slice of source tensor)
121/// \param[in] mins Minimum of indices for each dimension
122/// \param[in] maxs Maximum of indices for each dimension
123/// \param[in] idx Current indices
124/// \param[in] active Active index needed to stop the recursion
125///
126/// Copy the content of a slice of a tensor from source to target. This is done
127/// by recursively iterating over the ranges of the slice for each dimension.
128template <typename T>
129void RecursiveCopy(const T &here, T &there,
130 const std::vector<std::size_t> &mins, const std::vector<std::size_t> &maxs,
131 std::vector<std::size_t> idx, std::size_t active)
132{
133 const auto size = idx.size();
134 for (std::size_t i = mins[active]; i < maxs[active]; i++) {
135 idx[active] = i;
136 if (active == size - 1) {
137 auto idxThere = idx;
138 for (std::size_t j = 0; j < size; j++) {
139 idxThere[j] -= mins[j];
140 }
141 there(idxThere) = here(idx);
142 } else {
143 Internal::RecursiveCopy(here, there, mins, maxs, idx, active + 1);
144 }
145 }
146}
147
148} // namespace TMVA::Experimental::Internal
149
150/// \class TMVA::Experimental::RTensor
151/// \brief RTensor is a container with contiguous memory and shape information.
152/// \tparam T Data-type of the tensor
153///
154/// An RTensor is a vector-like container, which has additional shape information.
155/// The elements of the multi-dimensional container can be accessed by their
156/// indices in a coherent way without taking care about the one-dimensional memory
157/// layout of the contiguous storage. This also allows to manipulate the shape
158/// of the container without moving the actual elements in memory. Another feature
159/// is that an RTensor can own the underlying contiguous memory but can also represent
160/// only a view on existing data without owning it.
161template <typename V, typename C = std::vector<V>>
162class RTensor {
163public:
164 // Typedefs
165 using Value_t = V;
166 using Shape_t = std::vector<std::size_t>;
168 using Slice_t = std::vector<Shape_t>;
169 using Container_t = C;
170
171private:
174 std::size_t fSize;
177 std::shared_ptr<Container_t> fContainer;
178
179protected:
180 void ReshapeInplace(const Shape_t &shape);
181
182public:
183 // Constructors
184
185 /// \brief Construct a tensor as view on data
186 /// \param[in] data Pointer to data contiguous in memory
187 /// \param[in] shape Shape vector
188 /// \param[in] layout Memory layout
189 RTensor(Value_t *data, Shape_t shape, MemoryLayout layout = MemoryLayout::RowMajor)
190 : fShape(shape), fLayout(layout), fData(data), fContainer(nullptr)
191 {
194 }
195
196 /// \brief Construct a tensor as view on data
197 /// \param[in] data Pointer to data contiguous in memory
198 /// \param[in] shape Shape vector
199 /// \param[in] strides Strides vector
200 /// \param[in] layout Memory layout
201 RTensor(Value_t *data, Shape_t shape, Shape_t strides, MemoryLayout layout = MemoryLayout::RowMajor)
202 : fShape(shape), fStrides(strides), fLayout(layout), fData(data), fContainer(nullptr)
203 {
205 }
206
207 /// \brief Construct a tensor owning externally provided data
208 /// \param[in] container Shared pointer to data container
209 /// \param[in] shape Shape vector
210 /// \param[in] layout Memory layout
211 RTensor(std::shared_ptr<Container_t> container, Shape_t shape,
212 MemoryLayout layout = MemoryLayout::RowMajor)
213 : fShape(shape), fLayout(layout), fContainer(container)
214 {
217 fData = std::data(*fContainer);
218 }
219
220 /// \brief Construct a tensor owning data initialized with new container
221 /// \param[in] shape Shape vector
222 /// \param[in] layout Memory layout
223 RTensor(Shape_t shape, MemoryLayout layout = MemoryLayout::RowMajor)
224 : fShape(shape), fLayout(layout)
225 {
226 // TODO: Document how data pointer is determined using STL iterator interface.
227 // TODO: Sanitize given container type with type traits
230 fContainer = std::make_shared<Container_t>(fSize);
231 fData = std::data(*fContainer);
232 }
233
234 // Access elements
236 const Value_t &operator() (const Index_t &idx) const;
237 template <typename... Idx> Value_t &operator()(Idx... idx);
238 template <typename... Idx> const Value_t &operator() (Idx... idx) const;
239
240 // Access properties
241 std::size_t GetSize() const { return fSize; }
242 const Shape_t &GetShape() const { return fShape; }
243 const Shape_t &GetStrides() const { return fStrides; }
244 Value_t *GetData() { return fData; }
245 const Value_t *GetData() const { return fData; }
246 std::shared_ptr<Container_t> GetContainer() { return fContainer; }
247 const std::shared_ptr<Container_t> GetContainer() const { return fContainer; }
249 bool IsView() const { return fContainer == nullptr; }
250 bool IsOwner() const { return !IsView(); }
251
252 // Copy
253 RTensor<Value_t, Container_t> Copy(MemoryLayout layout = MemoryLayout::RowMajor) const;
254
255 // Transformations
262
263 // Iterator class
264 class Iterator {
265 private:
267 Index_t::value_type fGlobalIndex;
268 public:
269 using iterator_category = std::random_access_iterator_tag;
271 using difference_type = std::ptrdiff_t;
272 using pointer = Value_t *;
274
275 Iterator(RTensor<Value_t, Container_t>& x, typename Index_t::value_type idx) : fTensor(x), fGlobalIndex(idx) {}
276 Iterator& operator++() { fGlobalIndex++; return *this; }
277 Iterator operator++(int) { auto tmp = *this; operator++(); return tmp; }
278 Iterator& operator--() { fGlobalIndex--; return *this; }
279 Iterator operator--(int) { auto tmp = *this; operator--(); return tmp; }
283 Iterator& operator+=(difference_type rhs) { fGlobalIndex += rhs; return *this; }
284 Iterator& operator-=(difference_type rhs) { fGlobalIndex -= rhs; return *this; }
286 {
288 return fTensor(idx);
289 }
290 bool operator==(const Iterator& rhs) const
291 {
292 if (fGlobalIndex == rhs.GetGlobalIndex()) return true;
293 return false;
294 }
295 bool operator!=(const Iterator& rhs) const { return !operator==(rhs); };
296 bool operator>(const Iterator& rhs) const { return fGlobalIndex > rhs.GetGlobalIndex(); }
297 bool operator<(const Iterator& rhs) const { return fGlobalIndex < rhs.GetGlobalIndex(); }
298 bool operator>=(const Iterator& rhs) const { return fGlobalIndex >= rhs.GetGlobalIndex(); }
299 bool operator<=(const Iterator& rhs) const { return fGlobalIndex <= rhs.GetGlobalIndex(); }
300 typename Index_t::value_type GetGlobalIndex() const { return fGlobalIndex; };
301 };
302
303 // Iterator interface
304 // TODO: Document that the iterator always iterates following the physical memory layout.
305 Iterator begin() noexcept {
306 return Iterator(*this, 0);
307 }
308 Iterator end() noexcept {
309 return Iterator(*this, fSize);
310 }
311};
312
313/// \brief Reshape tensor in place
314/// \param[in] shape Shape vector
315/// Reshape tensor without changing the overall size
316template <typename Value_t, typename Container_t>
318{
319 const auto size = Internal::GetSizeFromShape(shape);
320 if (size != fSize) {
321 std::stringstream ss;
322 ss << "Cannot reshape tensor with size " << fSize << " into shape { ";
323 for (std::size_t i = 0; i < shape.size(); i++) {
324 if (i != shape.size() - 1) {
325 ss << shape[i] << ", ";
326 } else {
327 ss << shape[i] << " }.";
328 }
329 }
330 throw std::runtime_error(ss.str());
331 }
332
333 // Compute new strides from shape
334 auto strides = Internal::ComputeStridesFromShape(shape, fLayout);
335 fShape = shape;
336 fStrides = strides;
337}
338
339
340/// \brief Access elements
341/// \param[in] idx Index vector
342/// \return Reference to element
343template <typename Value_t, typename Container_t>
345{
346 const auto globalIndex = Internal::ComputeGlobalIndex(fStrides, idx);
347 return fData[globalIndex];
348}
349
350/// \brief Access elements
351/// \param[in] idx Index vector
352/// \return Reference to element
353template <typename Value_t, typename Container_t>
355{
356 const auto globalIndex = Internal::ComputeGlobalIndex(fStrides, idx);
357 return fData[globalIndex];
358}
359
360/// \brief Access elements
361/// \param[in] idx Indices
362/// \return Reference to element
363template <typename Value_t, typename Container_t>
364template <typename... Idx>
366{
368 "Indices are not convertible to std::size_t.");
369 return operator()({static_cast<std::size_t>(idx)...});
370}
371
372/// \brief Access elements
373/// \param[in] idx Indices
374/// \return Reference to element
375template <typename Value_t, typename Container_t>
376template <typename... Idx>
378{
380 "Indices are not convertible to std::size_t.");
381 return operator()({static_cast<std::size_t>(idx)...});
382}
383
384/// \brief Transpose
385/// \returns New RTensor
386/// The tensor is transposed by inverting the associated memory layout from row-
387/// major to column-major and vice versa. Therefore, the underlying data is not
388/// touched.
389template <typename Value_t, typename Container_t>
391{
392 MemoryLayout layout;
393 // Transpose by inverting memory layout
394 if (fLayout == MemoryLayout::RowMajor) {
395 layout = MemoryLayout::ColumnMajor;
396 } else if (fLayout == MemoryLayout::ColumnMajor) {
397 layout = MemoryLayout::RowMajor;
398 } else {
399 throw std::runtime_error("Memory layout is not known.");
400 }
401
402 // Create copy of container
403 RTensor<Value_t, Container_t> x(fData, fShape, fStrides, layout);
404
405 // Reverse shape
406 std::reverse(x.fShape.begin(), x.fShape.end());
407
408 // Reverse strides
409 std::reverse(x.fStrides.begin(), x.fStrides.end());
410
411 return x;
412}
413
414/// \brief Squeeze dimensions
415/// \returns New RTensor
416/// Squeeze removes the dimensions of size one from the shape.
417template <typename Value_t, typename Container_t>
419{
420 // Remove dimensions of one and associated strides
421 Shape_t shape;
422 Shape_t strides;
423 for (std::size_t i = 0; i < fShape.size(); i++) {
424 if (fShape[i] != 1) {
425 shape.emplace_back(fShape[i]);
426 strides.emplace_back(fStrides[i]);
427 }
428 }
429
430 // If all dimensions are 1, we need to keep one.
431 // This does not apply if the inital shape is already empty. Then, return
432 // the empty shape.
433 if (shape.size() == 0 && fShape.size() != 0) {
434 shape.emplace_back(1);
435 strides.emplace_back(1);
436 }
437
438 // Create copy, attach new shape and strides and return
440 x.fShape = shape;
441 x.fStrides = strides;
442 return x;
443}
444
445/// \brief Expand dimensions
446/// \param[in] idx Index in shape vector where dimension is added
447/// \returns New RTensor
448/// Inserts a dimension of one into the shape.
449template <typename Value_t, typename Container_t>
451{
452 // Compose shape vector with additional dimensions and adjust strides
453 const int len = fShape.size();
454 auto shape = fShape;
455 auto strides = fStrides;
456 if (idx < 0) {
457 idx = len + 1 + idx;
458 }
459 if (idx < 0) {
460 throw std::runtime_error("Given negative index is invalid.");
461 }
462 else if (idx > len) {
463 throw std::runtime_error("Given index is invalid.");
464 }
465 shape.insert(shape.begin() + idx, 1);
466 strides = Internal::ComputeStridesFromShape(shape, fLayout);
467
468 // Create view copy, attach new shape and strides and return
470 x.fShape = shape;
471 x.fStrides = strides;
472 return x;
473}
474
475/// \brief Reshape tensor
476/// \param[in] shape Shape vector
477/// \returns New RTensor
478/// Reshape tensor without changing the overall size
479template <typename Value_t, typename Container_t>
481{
482 // Create copy, replace and return
484 x.ReshapeInplace(shape);
485 return x;
486}
487
488/// \brief Resize tensor
489/// \param[in] shape Shape vector
490/// \returns New RTensor
491/// Resize tensor into new shape
492template <typename Value_t, typename Container_t>
494{
495 // Create new tensor with the specified shape
496 RTensor <Value_t, Container_t> x(shape, fLayout);
497
498 // Copying contents from previous tensor
499 size_t n = (x.GetSize()>fSize) ? fSize : x.GetSize();
500 std::copy(this->GetData(), this->GetData() + n, x.GetData() );
501
502 return x;
503}
504
505/// \brief Create a slice of the tensor
506/// \param[in] slice Slice vector
507/// \returns New RTensor
508/// A slice is a subset of the tensor defined by a vector of pairs of indices.
509template <typename Value_t, typename Container_t>
511{
512 // Sanitize size of slice
513 const auto sliceSize = slice.size();
514 const auto shapeSize = fShape.size();
515 if (sliceSize != shapeSize) {
516 std::stringstream ss;
517 ss << "Size of slice (" << sliceSize << ") is unequal number of dimensions (" << shapeSize << ").";
518 throw std::runtime_error(ss.str());
519 }
520
521 // Sanitize slice indices
522 // TODO: Sanitize slice indices
523 /*
524 for (std::size_t i = 0; i < sliceSize; i++) {
525 }
526 */
527
528 // Convert -1 in slice to proper pair of indices
529 // TODO
530
531 // Recompute shape and size
532 Shape_t shape(sliceSize);
533 for (std::size_t i = 0; i < sliceSize; i++) {
534 shape[i] = slice[i][1] - slice[i][0];
535 }
536 auto size = Internal::GetSizeFromShape(shape);
537
538 // Determine first element contributing to the slice and get the data pointer
539 Value_t *data;
540 Shape_t idx(sliceSize);
541 for (std::size_t i = 0; i < sliceSize; i++) {
542 idx[i] = slice[i][0];
543 }
544 data = &operator()(idx);
545
546 // Create copy and modify properties
548 x.fData = data;
549 x.fShape = shape;
550 x.fSize = size;
551
552 // Squeeze tensor and return
553 return x.Squeeze();
554}
555
556/// Copy RTensor to new object
557/// \param[in] layout Memory layout of the new RTensor
558/// \returns New RTensor
559/// The operation copies all elements of the current RTensor to a new RTensor
560/// with the given layout contiguous in memory. Note that this copies by default
561/// to a row major memory layout.
562template <typename Value_t, typename Container_t>
564{
565 // Create new tensor with zeros owning the memory
567
568 // Copy over the elements from this tensor
569 const auto mins = Shape_t(fShape.size());
570 const auto maxs = fShape;
571 auto idx = mins;
572 Internal::RecursiveCopy(*this, r, mins, maxs, idx, 0);
573
574 return r;
575}
576
577/// \brief Pretty printing
578/// \param[in] os Output stream
579/// \param[in] x RTensor
580/// \return Modified output stream
581template <typename T>
582std::ostream &operator<<(std::ostream &os, RTensor<T> &x)
583{
584 const auto shapeSize = x.GetShape().size();
585 if (shapeSize == 1) {
586 os << "{ ";
587 const auto size = x.GetSize();
588 for (std::size_t i = 0; i < size; i++) {
589 os << x({i});
590 if (i != size - 1)
591 os << ", ";
592 }
593 os << " }";
594 } else if (shapeSize == 2) {
595 os << "{";
596 const auto shape = x.GetShape();
597 for (std::size_t i = 0; i < shape[0]; i++) {
598 os << " { ";
599 for (std::size_t j = 0; j < shape[1]; j++) {
600 os << x({i, j});
601 if (j < shape[1] - 1) {
602 os << ", ";
603 } else {
604 os << " ";
605 }
606 }
607 os << "}";
608 }
609 os << " }";
610 } else {
611 os << "{ printing not yet implemented for this rank }";
612 }
613 return os;
614}
615
616} // namespace TMVA::Experimental
617} // namespace TMVA
618
619namespace cling {
620template <typename T>
621std::string printValue(TMVA::Experimental::RTensor<T> *x)
622{
623 std::stringstream ss;
624 ss << *x;
625 return ss.str();
626}
627} // namespace cling
628
629#endif // TMVA_RTENSOR
dims_t fShape
dim_t fSize
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
TBuffer & operator<<(TBuffer &buf, const Tmpl *obj)
Definition TBuffer.h:397
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t r
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t UChar_t len
TRObject operator()(const T1 &t1) const
bool operator>=(const Iterator &rhs) const
Definition RTensor.hxx:298
bool operator==(const Iterator &rhs) const
Definition RTensor.hxx:290
std::random_access_iterator_tag iterator_category
Definition RTensor.hxx:269
Iterator(RTensor< Value_t, Container_t > &x, typename Index_t::value_type idx)
Definition RTensor.hxx:275
bool operator!=(const Iterator &rhs) const
Definition RTensor.hxx:295
difference_type operator-(const Iterator &rhs)
Definition RTensor.hxx:282
Iterator operator+(difference_type rhs) const
Definition RTensor.hxx:280
bool operator<(const Iterator &rhs) const
Definition RTensor.hxx:297
Iterator & operator+=(difference_type rhs)
Definition RTensor.hxx:283
Iterator & operator-=(difference_type rhs)
Definition RTensor.hxx:284
RTensor< Value_t, Container_t > & fTensor
Definition RTensor.hxx:266
bool operator>(const Iterator &rhs) const
Definition RTensor.hxx:296
Iterator operator-(difference_type rhs) const
Definition RTensor.hxx:281
Index_t::value_type GetGlobalIndex() const
Definition RTensor.hxx:300
bool operator<=(const Iterator &rhs) const
Definition RTensor.hxx:299
RTensor is a container with contiguous memory and shape information.
Definition RTensor.hxx:162
void ReshapeInplace(const Shape_t &shape)
Reshape tensor in place.
Definition RTensor.hxx:317
RTensor< Value_t, Container_t > Squeeze() const
Squeeze dimensions.
Definition RTensor.hxx:418
const std::shared_ptr< Container_t > GetContainer() const
Definition RTensor.hxx:247
Value_t & operator()(const Index_t &idx)
Access elements.
Definition RTensor.hxx:344
RTensor(Shape_t shape, MemoryLayout layout=MemoryLayout::RowMajor)
Construct a tensor owning data initialized with new container.
Definition RTensor.hxx:223
MemoryLayout GetMemoryLayout() const
Definition RTensor.hxx:248
Iterator end() noexcept
Definition RTensor.hxx:308
std::vector< Shape_t > Slice_t
Definition RTensor.hxx:168
RTensor(Value_t *data, Shape_t shape, Shape_t strides, MemoryLayout layout=MemoryLayout::RowMajor)
Construct a tensor as view on data.
Definition RTensor.hxx:201
RTensor< Value_t, Container_t > ExpandDims(int idx) const
Expand dimensions.
Definition RTensor.hxx:450
RTensor< Value_t, Container_t > Transpose() const
Transpose.
Definition RTensor.hxx:390
std::shared_ptr< Container_t > GetContainer()
Definition RTensor.hxx:246
Value_t & operator()(Idx... idx)
Access elements.
Definition RTensor.hxx:365
std::shared_ptr< Container_t > fContainer
Definition RTensor.hxx:177
RTensor(std::shared_ptr< Container_t > container, Shape_t shape, MemoryLayout layout=MemoryLayout::RowMajor)
Construct a tensor owning externally provided data.
Definition RTensor.hxx:211
RTensor< Value_t, Container_t > Resize(const Shape_t &shape)
Resize tensor.
Definition RTensor.hxx:493
RTensor< Value_t, Container_t > Copy(MemoryLayout layout=MemoryLayout::RowMajor) const
Copy RTensor to new object.
Definition RTensor.hxx:563
const Shape_t & GetStrides() const
Definition RTensor.hxx:243
std::size_t GetSize() const
Definition RTensor.hxx:241
RTensor< Value_t, Container_t > Reshape(const Shape_t &shape) const
Reshape tensor.
Definition RTensor.hxx:480
RTensor(Value_t *data, Shape_t shape, MemoryLayout layout=MemoryLayout::RowMajor)
Construct a tensor as view on data.
Definition RTensor.hxx:189
RTensor< Value_t, Container_t > Slice(const Slice_t &slice)
Create a slice of the tensor.
Definition RTensor.hxx:510
const Value_t * GetData() const
Definition RTensor.hxx:245
Iterator begin() noexcept
Definition RTensor.hxx:305
const Shape_t & GetShape() const
Definition RTensor.hxx:242
std::vector< std::size_t > Shape_t
Definition RTensor.hxx:166
Double_t x[n]
Definition legend1.C:17
const Int_t n
Definition legend1.C:16
void RecursiveCopy(const T &here, T &there, const std::vector< std::size_t > &mins, const std::vector< std::size_t > &maxs, std::vector< std::size_t > idx, std::size_t active)
Copy slice of a tensor recursively from here to there.
Definition RTensor.hxx:129
std::vector< std::size_t > ComputeStridesFromShape(const T &shape, MemoryLayout layout)
Compute strides from shape vector.
Definition RTensor.hxx:47
T ComputeIndicesFromGlobalIndex(const T &shape, MemoryLayout layout, const typename T::value_type idx)
Compute indices from global index.
Definition RTensor.hxx:81
std::size_t GetSizeFromShape(const T &shape)
Get size of tensor from shape vector.
Definition RTensor.hxx:28
std::size_t ComputeGlobalIndex(const U &strides, const V &idx)
Compute global index from indices.
Definition RTensor.hxx:99
MemoryLayout
Memory layout type (copy from RTensor.hxx)
Definition CudaTensor.h:47
create variable transformations
Type checking for all types of a parameter pack, e.g., used in combination with std::is_convertible.
Definition RTensor.hxx:111