17#include "TMVA/DNN/Architectures/Cpu/DataLoader.h"
29template<
typename AData,
typename AReal>
30TCpuBatchIterator<AData, AReal>::TCpuBatchIterator(
31 TCpuDataLoader<AData, AReal> & dataLoader,
33 : fDataLoader(dataLoader), fBatchIndex(batchIndex)
39template<
typename AData,
typename AReal>
42 return fDataLoader.GetBatch(fBatchIndex);
46template<
typename AData,
typename AReal>
47TCpuBatchIterator<AData, AReal> & TCpuBatchIterator<AData, AReal>::operator++()
54template<
typename AData,
typename AReal>
57 return fBatchIndex != other.GetBatchIndex();
61template<
typename AData,
typename AReal>
64 return fBatchIndex == other.GetBatchIndex();
69template<
typename AData,
typename AReal>
70TCpuDataLoader<AData, AReal>::TCpuDataLoader(
const AData &input,
73 size_t ninputFeatures,
74 size_t noutputFeatures,
76 : fInput(input), fNSamples(nsamples), fBatchSize(batchSize),
77 fBufferSize(bufferSize), fNInputFeatures(ninputFeatures),
78 fNOutputFeatures(noutputFeatures), fNBatches(nsamples / batchSize),
79 fInputMatrices(), fOutputMatrices(), fSampleIndices()
81 fInputMatrices.reserve(fBufferSize);
82 fOutputMatrices.reserve(fBufferSize);
83 for (
size_t i = 0; i < fBufferSize; i++) {
84 fInputMatrices.emplace_back(fBatchSize, fNInputFeatures);
85 fOutputMatrices.emplace_back(fBatchSize, fNOutputFeatures);
88 fSampleIndices.reserve(fNBatches);
89 for (
size_t i = 0; i < fNSamples; i++) {
90 fSampleIndices.emplace_back(i);
95template<
typename AData,
typename AReal>
96inline void TCpuDataLoader<AData, AReal>::CopyData(
size_t batchIndex)
98 auto copy = [
this](
UInt_t workerID)
100 CopyBatch(this->fInputMatrices[workerID % this->fBufferSize],
101 this->fOutputMatrices[workerID % this->fBufferSize],
103 this->fSampleIndices.begin() + sampleIndex,
104 this->fSampleIndices.begin() + sampleIndex + this->fBatchSize);
105 sampleIndex += this->fBatchSize;
109 size_t end = std::min(batchIndex + fBufferSize, fNBatches);
110 size_t start = batchIndex;
116template<
typename AData,
typename AReal>
117TCpuBatch<AReal> TCpuDataLoader<AData, AReal>::GetBatch(
size_t batchIndex)
119 size_t fBufferIndex = batchIndex % fBufferSize;
120 if (fBufferIndex == 0) {
121 CopyData(batchIndex);
123 return TCpuBatch<AReal>(fInputMatrices[fBufferIndex],
124 fOutputMatrices[fBufferIndex]);
128template<
typename AData,
typename AReal>
129auto TCpuDataLoader<AData, AReal>::begin()
132 std::shuffle(fSampleIndices.begin(), fSampleIndices.end(), std::default_random_engine{});
133 return BatchIterator_t(*
this, 0);
137template<
typename AData,
typename AReal>
138auto TCpuDataLoader<AData, AReal>::end()
141 return BatchIterator_t(*
this, fNBatches);
146void TCpuDataLoader<MatrixInput_t, Double_t>::CopyBatch(
147 Matrix_t &inputMatrix,
148 Matrix_t &outputMatrix,
153 auto &in = std::get<0>(input);
154 auto &out = std::get<1>(input);
156 size_t batchIndex = 0;
159 for (
size_t j = 0; j < (size_t) in.GetNcols(); j++) {
160 inputMatrix(batchIndex, j) = in(index, j);
162 for (
size_t j = 0; j < (size_t) out.GetNcols(); j++) {
163 outputMatrix(batchIndex, j) = out(index, j);
171void TCpuDataLoader<TMVAInput_t, Double_t>::CopyBatch(
172 Matrix_t &inputMatrix,
173 Matrix_t &outputMatrix,
178 size_t batchIndex = 0;
181 Event *
event = input.at(index);
182 for (
size_t j = 0; j <
event->GetNVariables(); j++) {
183 inputMatrix(batchIndex, j) =
event->GetValue(j);
185 if (event->GetNTargets() > 0) {
186 for (
size_t j = 0; j <
event->GetNTargets(); j++) {
187 outputMatrix(batchIndex, j) =
event->GetTarget(j);
190 outputMatrix(batchIndex, 0) = (
event->GetClass() == 0) ? 1.0 : 0.0;
198template class TCpuDataLoader<MatrixInput_t, Double_t>;
199template class TCpuDataLoader<TMVAInput_t, Double_t>;
200template class TCpuBatchIterator<MatrixInput_t, Double_t>;
201template class TCpuBatchIterator<TMVAInput_t, Double_t>;
202template class TCpuBatch<Double_t>;
Bool_t operator!=(const TDatime &d1, const TDatime &d2)
Bool_t operator==(const TDatime &d1, const TDatime &d2)
TTime operator*(const TTime &t1, const TTime &t2)
A pseudo container class which is a generator of indices.
This class provides a simple interface to execute the same task multiple times in parallel,...
auto Map(F func, unsigned nTimes) -> std::vector< typename std::result_of< F()>::type >
Execute func (with no arguments) nTimes in parallel.
typename std::vector< size_t >::iterator IndexIterator_t
std::tuple< const std::vector< Event * > &, const DataSetInfo & > TMVAInput_t
std::tuple< const TMatrixT< Double_t > &, const TMatrixT< Double_t > &, const TMatrixT< Double_t > & > MatrixInput_t
create variable transformations