29template <
typename AReal>
 
   37template <
typename AReal>
 
   46template <
typename AReal>
 
   56template <
typename AReal>
 
   64template <
typename AReal>
 
   77   size_t n = inputMatrix.GetNcols();
 
   79   for (
size_t i = 0; i < batchSize; i++) {
 
   81      for (
size_t j = 0; 
j < 
n; 
j++) {
 
 
   95   size_t n = outputMatrix.GetNcols();
 
   97   for (
size_t i = 0; i < batchSize; i++) {
 
   99      for (
size_t j = 0; 
j < 
n; 
j++) {
 
 
  114   for (
size_t i = 0; i < batchSize; i++) {
 
 
  127   size_t n = inputMatrix.GetNcols();
 
  129   for (
size_t i = 0; i < batchSize; i++) {
 
  131      for (
size_t j = 0; 
j < 
n; 
j++) {
 
 
  145   size_t n = outputMatrix.GetNcols();
 
  147   for (
size_t i = 0; i < batchSize; i++) {
 
  149      for (
size_t j = 0; 
j < 
n; 
j++) {
 
 
  164   for (
size_t i = 0; i < batchSize; i++) {
 
 
  176   Event *
event = std::get<0>(fData)[0];
 
  177   size_t n  = 
event->GetNVariables();
 
  178   for (
size_t i = 0; i < batchSize; i++) {
 
  181      for (
size_t j = 0; 
j < 
n; 
j++) {
 
 
  194  size_t n = buffer.GetSize() / batchSize;
 
  198  for (
size_t i = 0; i < batchSize; i++) {
 
  201    for (
size_t j = 0; 
j < 
n; 
j++) {
 
  205      if (event->GetNTargets() == 0) {
 
  212          if (
j == event->GetClass()) {
 
 
  228   for (
size_t i = 0; i < batchSize; i++) {
 
  231      buffer[i] = 
event->GetWeight();
 
 
  240   Event *
event = std::get<0>(fData)[0];
 
  241   size_t n  = 
event->GetNVariables();
 
  242   for (
size_t i = 0; i < batchSize; i++) {
 
  245      for (
size_t j = 0; 
j < 
n; 
j++) {
 
 
  258  size_t n = buffer.GetSize() / batchSize;
 
  262  for (
size_t i = 0; i < batchSize; i++) {
 
  265    for (
size_t j = 0; 
j < 
n; 
j++) {
 
  269      if (event->GetNTargets() == 0) {
 
  276          if (
j == event->GetClass()) {
 
 
  292   for (
size_t i = 0; i < batchSize; i++) {
 
  295      buffer[i] = 
static_cast<Float_t>(
event->GetWeight());
 
 
  304   const std::vector<TMatrixT<Double_t>> &inputTensor = std::get<0>(fData);
 
  306   if (fBatchDepth == 1) {
 
  307      for (
size_t i = 0; i < fBatchHeight; i++) {
 
  309         for (
size_t j = 0; 
j < fBatchWidth; 
j++) {
 
  316      for (
size_t i = 0; i < fBatchDepth; i++) {
 
  318         for (
size_t j = 0; 
j < fBatchHeight; 
j++) {
 
  319            for (
size_t k = 0; k < fBatchWidth; k++) {
 
  320               size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + 
j;
 
 
  335   size_t n = outputMatrix.GetNcols();
 
  337   for (
size_t i = 0; i < fBatchSize; i++) {
 
  339      for (
size_t j = 0; 
j < 
n; 
j++) {
 
 
  354   for (
size_t i = 0; i < fBatchSize; i++) {
 
 
  370   Tensor_t inputTensor( std::get<0>(
DeviceBuffers), { fBatchHeight, fBatchWidth, fBatchSize } );
 
  376   Matrix_t outputMatrix(std::get<1>(
DeviceBuffers), fBatchSize, fNOutputFeatures);
 
  377   Matrix_t weightMatrix(std::get<2>(
DeviceBuffers), fBatchSize, fNOutputFeatures);
 
  389   const std::vector<TMatrixT<Double_t>> &inputTensor = std::get<0>(fData);
 
  391   if (fBatchDepth == 1) {
 
  392      for (
size_t i = 0; i < fBatchHeight; i++) {
 
  394         for (
size_t j = 0; 
j < fBatchWidth; 
j++) {
 
  401      for (
size_t i = 0; i < fBatchDepth; i++) {
 
  403         for (
size_t j = 0; 
j < fBatchHeight; 
j++) {
 
  404            for (
size_t k = 0; k < fBatchWidth; k++) {
 
  405               size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + 
j;
 
 
  420   size_t n = outputMatrix.GetNcols();
 
  422   for (
size_t i = 0; i < fBatchSize; i++) {
 
  424      for (
size_t j = 0; 
j < 
n; 
j++) {
 
 
  439   for (
size_t i = 0; i < fBatchSize; i++) {
 
 
  454   Tensor_t inputTensor( std::get<0>(
DeviceBuffers), { fBatchHeight, fBatchWidth, fBatchSize } );
 
  461   Matrix_t outputMatrix(std::get<1>(
DeviceBuffers), fBatchSize, fNOutputFeatures);
 
  462   Matrix_t weightMatrix(std::get<2>(
DeviceBuffers), fBatchSize, fNOutputFeatures);
 
  478   if (fBatchDepth == 1 && fBatchHeight == fBatchSize) {
 
  479      for (
size_t i = 0; i < fBatchHeight; i++) {
 
  482         for (
size_t j = 0; 
j < fBatchWidth; 
j++) {
 
  488   } 
else if (fBatchDepth == fBatchSize) {
 
  490      for (
size_t i = 0; i < fBatchDepth; i++) {
 
  493         for (
size_t j = 0; 
j < fBatchHeight; 
j++) {
 
  494            for (
size_t k = 0; k < fBatchWidth; k++) {
 
  496               size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + 
j;
 
  497               buffer[
bufferIndex] = 
event->GetValue(
j * fBatchWidth + k);
 
  504      Error(
"TTensorDataLoader",
"Inconsistency between batch depth and batch size");
 
 
  515   size_t n = buffer.GetSize() / fBatchSize;
 
  519   for (
size_t i = 0; i < fBatchSize; i++) {
 
  522      for (
size_t j = 0; 
j < 
n; 
j++) {
 
  526         if (event->GetNTargets() == 0) {
 
  533               if (
j == event->GetClass()) {
 
 
  549   for (
size_t i = 0; i < fBatchSize; i++) {
 
  552       buffer[i] = 
event->GetWeight();
 
 
  566   Tensor_t inputTensor( std::get<0>(
DeviceBuffers), { fBatchHeight, fBatchWidth, fBatchSize } );
 
  572   Matrix_t outputMatrix(std::get<1>(
DeviceBuffers), fBatchSize, fNOutputFeatures);
 
  573   Matrix_t weightMatrix(std::get<2>(
DeviceBuffers), fBatchSize, fNOutputFeatures);
 
  588   if (fBatchDepth == 1 && fBatchHeight == fBatchSize) {
 
  589      for (
size_t i = 0; i < fBatchHeight; i++) {
 
  592         for (
size_t j = 0; 
j < fBatchWidth; 
j++) {
 
  598   } 
else if (fBatchDepth == fBatchSize) {
 
  600      for (
size_t i = 0; i < fBatchDepth; i++) {
 
  603         for (
size_t j = 0; 
j < fBatchHeight; 
j++) {
 
  604            for (
size_t k = 0; k < fBatchWidth; k++) {
 
  606               size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + 
j;
 
  607               buffer[
bufferIndex] = 
event->GetValue(
j * fBatchWidth + k);
 
  614      Error(
"TTensorDataLoader",
"Inconsistency between batch depth and batch size");
 
 
  625   size_t n = buffer.GetSize() / fBatchSize;
 
  629   for (
size_t i = 0; i < fBatchSize; i++) {
 
  632      for (
size_t j = 0; 
j < 
n; 
j++) {
 
  636         if (event->GetNTargets() == 0) {
 
  643               if (
j == event->GetClass()) {
 
 
  659   for (
size_t i = 0; i < fBatchSize; i++) {
 
  662      buffer[i] = 
event->GetWeight();
 
 
  675   Tensor_t inputTensor( std::get<0>(
DeviceBuffers), { fBatchHeight, fBatchWidth, fBatchSize } );
 
  682   Matrix_t outputMatrix(std::get<1>(
DeviceBuffers), fBatchSize, fNOutputFeatures);
 
  683   Matrix_t weightMatrix(std::get<2>(
DeviceBuffers), fBatchSize, fNOutputFeatures);
 
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
 
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
 
void Error(const char *location, const char *msgfmt,...)
Use this function in case an error occurred.
 
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
 
TCpuBuffer GetSubBuffer(size_t offset, size_t start) const
Return sub-buffer of size start starting at element offset.
 
void CopyTo(TCpuBuffer &) const
Copy data to another buffer.
 
void CopyFrom(const TCpuBuffer &)
Copy data from another buffer.
 
std::shared_ptr< AFloat * > fBuffer
 
struct TMVA::DNN::TCpuBuffer::TDestructor fDestructor
 
Class that contains all the data information.
 
create variable transformations
 
void operator()(AFloat **pointer)