Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RPageSinkBuf.cxx
Go to the documentation of this file.
1/// \file RPageSinkBuf.cxx
2/// \ingroup NTuple ROOT7
3/// \author Jakob Blomer <jblomer@cern.ch>
4/// \author Max Orok <maxwellorok@gmail.com>
5/// \author Javier Lopez-Gomez <javier.lopez.gomez@cern.ch>
6/// \date 2021-03-17
7/// \warning This is part of the ROOT 7 prototype! It will change without notice. It might trigger earthquakes. Feedback
8/// is welcome!
9
10/*************************************************************************
11 * Copyright (C) 1995-2021, Rene Brun and Fons Rademakers. *
12 * All rights reserved. *
13 * *
14 * For the licensing terms see $ROOTSYS/LICENSE. *
15 * For the list of contributors see $ROOTSYS/README/CREDITS. *
16 *************************************************************************/
17
18#include <ROOT/RNTupleModel.hxx>
20#include <ROOT/RNTupleZip.hxx>
21#include <ROOT/RPageSinkBuf.hxx>
22
23#include <algorithm>
24#include <memory>
25
27{
28 fBufferedPages.clear();
29 // Each RSealedPage points to the same region as `fBuf` for some element in `fBufferedPages`; thus, no further
30 // clean-up is required
31 fSealedPages.clear();
32}
33
35 : RPageSink(inner->GetNTupleName(), inner->GetWriteOptions()), fInnerSink(std::move(inner))
36{
37 fMetrics = Detail::RNTupleMetrics("RPageSinkBuf");
38 fCounters = std::make_unique<RCounters>(RCounters{
39 *fMetrics.MakeCounter<Detail::RNTuplePlainCounter *>("ParallelZip", "", "compressing pages in parallel"),
40 *fMetrics.MakeCounter<Detail::RNTuplePlainCounter *>("timeWallCriticalSection", "ns",
41 "wall clock time spent in critical sections"),
43 "timeCpuCriticalSection", "ns", "CPU time spent in critical section")});
44 fMetrics.ObserveMetrics(fInnerSink->GetMetrics());
45}
46
48{
49 // Wait for unterminated tasks, if any, as they may still hold a reference to `this`.
50 // This cannot be moved to the base class destructor, given non-static members have been destroyed by the time the
51 // base class destructor is invoked.
52 WaitForAllTasks();
53}
54
57{
58 return ColumnHandle_t{fNColumns++, &column};
59}
60
61void ROOT::Experimental::Internal::RPageSinkBuf::ConnectFields(const std::vector<RFieldBase *> &fields,
62 NTupleSize_t firstEntry)
63{
64 auto connectField = [&](RFieldBase &f) {
65 // Field Zero would have id 0.
66 ++fNFields;
67 f.SetOnDiskId(fNFields);
68 CallConnectPageSinkOnField(f, *this, firstEntry); // issues in turn calls to `AddColumn()`
69 };
70 for (auto *f : fields) {
71 connectField(*f);
72 for (auto &descendant : *f) {
73 connectField(descendant);
74 }
75 }
76 fBufferedColumns.resize(fNColumns);
77}
78
80{
81 return fInnerSink->GetDescriptor();
82}
83
85{
86 ConnectFields(Internal::GetFieldZeroOfModel(model).GetSubFields(), 0U);
87
88 fInnerModel = model.Clone();
89 fInnerSink->Init(*fInnerModel);
90}
91
93 NTupleSize_t firstEntry)
94{
95 ConnectFields(changeset.fAddedFields, firstEntry);
96
97 // The buffered page sink maintains a copy of the RNTupleModel for the inner sink; replicate the changes there
98 // TODO(jalopezg): we should be able, in general, to simplify the buffered sink.
99 auto cloneAddField = [&](const RFieldBase *field) {
100 auto cloned = field->Clone(field->GetFieldName());
101 auto p = &(*cloned);
102 fInnerModel->AddField(std::move(cloned));
103 return p;
104 };
105 auto cloneAddProjectedField = [&](RFieldBase *field) {
106 auto cloned = field->Clone(field->GetFieldName());
107 auto p = &(*cloned);
108 auto &projectedFields = Internal::GetProjectedFieldsOfModel(changeset.fModel);
110 fieldMap[p] = &fInnerModel->GetConstField(projectedFields.GetSourceField(field)->GetQualifiedFieldName());
111 auto targetIt = cloned->begin();
112 for (auto &f : *field)
113 fieldMap[&(*targetIt++)] =
114 &fInnerModel->GetConstField(projectedFields.GetSourceField(&f)->GetQualifiedFieldName());
115 Internal::GetProjectedFieldsOfModel(*fInnerModel).Add(std::move(cloned), fieldMap);
116 return p;
117 };
118 RNTupleModelChangeset innerChangeset{*fInnerModel};
119 fInnerModel->Unfreeze();
120 std::transform(changeset.fAddedFields.cbegin(), changeset.fAddedFields.cend(),
121 std::back_inserter(innerChangeset.fAddedFields), cloneAddField);
122 std::transform(changeset.fAddedProjectedFields.cbegin(), changeset.fAddedProjectedFields.cend(),
123 std::back_inserter(innerChangeset.fAddedProjectedFields), cloneAddProjectedField);
124 fInnerModel->Freeze();
125 fInnerSink->UpdateSchema(innerChangeset, firstEntry);
126}
127
129{
130 RPageSink::RSinkGuard g(fInnerSink->GetSinkGuard());
131 Detail::RNTuplePlainTimer timer(fCounters->fTimeWallCriticalSection, fCounters->fTimeCpuCriticalSection);
132 fInnerSink->UpdateExtraTypeInfo(extraTypeInfo);
133}
134
136{
137 fSuppressedColumns.emplace_back(columnHandle);
138}
139
141{
142 auto colId = columnHandle.fPhysicalId;
143 const auto &element = *columnHandle.fColumn->GetElement();
144
145 // Safety: References are guaranteed to be valid until the element is destroyed. In other words, all buffered page
146 // elements are valid until DropBufferedPages().
147 auto &zipItem = fBufferedColumns.at(colId).BufferPage(columnHandle);
148 std::size_t maxSealedPageBytes = page.GetNBytes() + GetWriteOptions().GetEnablePageChecksums() * kNBytesPageChecksum;
149 // Do not allocate the buffer yet, in case of IMT we only need it once the task is started.
150 auto &sealedPage = fBufferedColumns.at(colId).RegisterSealedPage();
151
152 auto allocateBuf = [&zipItem, maxSealedPageBytes]() {
153 zipItem.fBuf = std::make_unique<unsigned char[]>(maxSealedPageBytes);
154 R__ASSERT(zipItem.fBuf);
155 };
156 auto shrinkSealedPage = [&zipItem, maxSealedPageBytes, &sealedPage]() {
157 // If the sealed page is smaller than the maximum size (with compression), allocate what is needed and copy the
158 // sealed page content to save memory.
159 auto sealedBufferSize = sealedPage.GetBufferSize();
160 if (sealedBufferSize < maxSealedPageBytes) {
161 auto buf = std::make_unique<unsigned char[]>(sealedBufferSize);
162 memcpy(buf.get(), sealedPage.GetBuffer(), sealedBufferSize);
163 zipItem.fBuf = std::move(buf);
164 sealedPage.SetBuffer(zipItem.fBuf.get());
165 }
166 };
167
168 if (!fTaskScheduler) {
169 allocateBuf();
170 // Seal the page right now, avoiding the allocation and copy, but making sure that the page buffer is not aliased.
171 RSealPageConfig config;
172 config.fPage = &page;
173 config.fElement = &element;
174 config.fCompressionSetting = GetWriteOptions().GetCompression();
175 config.fWriteChecksum = GetWriteOptions().GetEnablePageChecksums();
176 config.fAllowAlias = false;
177 config.fBuffer = zipItem.fBuf.get();
178 sealedPage = SealPage(config);
179 shrinkSealedPage();
180 zipItem.fSealedPage = &sealedPage;
181 return;
182 }
183
184 // TODO avoid frequent (de)allocations by holding on to allocated buffers in RColumnBuf
185 zipItem.fPage = fPageAllocator->NewPage(columnHandle.fPhysicalId, page.GetElementSize(), page.GetNElements());
186 // make sure the page is aware of how many elements it will have
187 zipItem.fPage.GrowUnchecked(page.GetNElements());
188 memcpy(zipItem.fPage.GetBuffer(), page.GetBuffer(), page.GetNBytes());
189
190 fCounters->fParallelZip.SetValue(1);
191 // Thread safety: Each thread works on a distinct zipItem which owns its
192 // compression buffer.
193 fTaskScheduler->AddTask([this, &zipItem, &sealedPage, &element, allocateBuf, shrinkSealedPage] {
194 allocateBuf();
195 RSealPageConfig config;
196 config.fPage = &zipItem.fPage;
197 config.fElement = &element;
198 config.fCompressionSetting = GetWriteOptions().GetCompression();
199 config.fWriteChecksum = GetWriteOptions().GetEnablePageChecksums();
200 // Make sure the page buffer is not aliased so that we can free the uncompressed page.
201 config.fAllowAlias = false;
202 config.fBuffer = zipItem.fBuf.get();
203 sealedPage = SealPage(config);
204 shrinkSealedPage();
205 zipItem.fSealedPage = &sealedPage;
206 // Release the uncompressed page. This works because the "page allocator must be thread-safe."
207 zipItem.fPage = RPage();
208 });
209}
210
212 const RSealedPage & /*sealedPage*/)
213{
214 throw RException(R__FAIL("should never commit sealed pages to RPageSinkBuf"));
215}
216
217void ROOT::Experimental::Internal::RPageSinkBuf::CommitSealedPageV(std::span<RPageStorage::RSealedPageGroup> /*ranges*/)
218{
219 throw RException(R__FAIL("should never commit sealed pages to RPageSinkBuf"));
220}
221
222// We implement both StageCluster() and CommitCluster() because we can call CommitCluster() on the inner sink more
223// efficiently in a single critical section. For parallel writing, it also guarantees that we produce a fully sequential
224// file.
225void ROOT::Experimental::Internal::RPageSinkBuf::FlushClusterImpl(std::function<void(void)> FlushClusterFn)
226{
227 WaitForAllTasks();
228
229 std::vector<RSealedPageGroup> toCommit;
230 toCommit.reserve(fBufferedColumns.size());
231 for (auto &bufColumn : fBufferedColumns) {
232 R__ASSERT(bufColumn.HasSealedPagesOnly());
233 const auto &sealedPages = bufColumn.GetSealedPages();
234 toCommit.emplace_back(bufColumn.GetHandle().fPhysicalId, sealedPages.cbegin(), sealedPages.cend());
235 }
236
237 {
238 RPageSink::RSinkGuard g(fInnerSink->GetSinkGuard());
239 Detail::RNTuplePlainTimer timer(fCounters->fTimeWallCriticalSection, fCounters->fTimeCpuCriticalSection);
240 fInnerSink->CommitSealedPageV(toCommit);
241
242 for (auto handle : fSuppressedColumns)
243 fInnerSink->CommitSuppressedColumn(handle);
244 fSuppressedColumns.clear();
245
246 FlushClusterFn();
247 }
248
249 for (auto &bufColumn : fBufferedColumns)
250 bufColumn.DropBufferedPages();
251}
252
254{
255 std::uint64_t nbytes;
256 FlushClusterImpl([&] { nbytes = fInnerSink->CommitCluster(nNewEntries); });
257 return nbytes;
258}
259
262{
264 FlushClusterImpl([&] { stagedCluster = fInnerSink->StageCluster(nNewEntries); });
265 return stagedCluster;
266}
267
269{
270 RPageSink::RSinkGuard g(fInnerSink->GetSinkGuard());
271 Detail::RNTuplePlainTimer timer(fCounters->fTimeWallCriticalSection, fCounters->fTimeCpuCriticalSection);
272 fInnerSink->CommitStagedClusters(clusters);
273}
274
276{
277 RPageSink::RSinkGuard g(fInnerSink->GetSinkGuard());
278 Detail::RNTuplePlainTimer timer(fCounters->fTimeWallCriticalSection, fCounters->fTimeCpuCriticalSection);
279 fInnerSink->CommitClusterGroup();
280}
281
283{
284 RPageSink::RSinkGuard g(fInnerSink->GetSinkGuard());
285 Detail::RNTuplePlainTimer timer(fCounters->fTimeWallCriticalSection, fCounters->fTimeCpuCriticalSection);
286 fInnerSink->CommitDataset();
287}
288
291{
292 return fInnerSink->ReservePage(columnHandle, nElements);
293}
#define R__FAIL(msg)
Short-hand to return an RResult<T> in an error state; the RError is implicitly converted into RResult...
Definition RError.hxx:290
#define f(i)
Definition RSha256.hxx:104
#define g(i)
Definition RSha256.hxx:105
#define R__ASSERT(e)
Checks condition e and reports a fatal error if it's false.
Definition TError.h:125
winID h TVirtualViewer3D TVirtualGLPainter p
A collection of Counter objects with a name, a unit, and a description.
void ObserveMetrics(RNTupleMetrics &observee)
CounterPtrT MakeCounter(const std::string &name, Args &&... args)
A non thread-safe integral performance counter.
An either thread-safe or non thread safe counter for CPU ticks.
Record wall time and CPU time between construction and destruction.
A column is a storage-backed array of a simple, fixed-size type, from which pages can be mapped into ...
Definition RColumn.hxx:40
RColumnElementBase * GetElement() const
Definition RColumn.hxx:329
RPageStorage::SealedPageSequence_t fSealedPages
Pages that have been already sealed by a concurrent task.
std::deque< RPageZipItem > fBufferedPages
Using a deque guarantees that element iterators are never invalidated by appends to the end of the it...
std::uint64_t CommitCluster(NTupleSize_t nNewEntries) final
Finalize the current cluster and create a new one for the following data.
void CommitStagedClusters(std::span< RStagedCluster > clusters) final
Commit staged clusters, logically appending them to the ntuple descriptor.
ColumnHandle_t AddColumn(DescriptorId_t fieldId, RColumn &column) final
Register a new column.
RStagedCluster StageCluster(NTupleSize_t nNewEntries) final
Stage the current cluster and create a new one for the following data.
std::unique_ptr< RPageSink > fInnerSink
The inner sink, responsible for actually performing I/O.
RPage ReservePage(ColumnHandle_t columnHandle, std::size_t nElements) final
Get a new, empty page for the given column that can be filled with up to nElements; nElements must be...
void FlushClusterImpl(std::function< void(void)> FlushClusterFn)
void CommitSealedPageV(std::span< RPageStorage::RSealedPageGroup > ranges) final
Write a vector of preprocessed pages to storage. The corresponding columns must have been added befor...
void UpdateSchema(const RNTupleModelChangeset &changeset, NTupleSize_t firstEntry) final
Incorporate incremental changes to the model into the ntuple descriptor.
RPageSinkBuf(std::unique_ptr< RPageSink > inner)
const RNTupleDescriptor & GetDescriptor() const final
Return the RNTupleDescriptor being constructed.
std::unique_ptr< RCounters > fCounters
void InitImpl(RNTupleModel &model) final
void CommitPage(ColumnHandle_t columnHandle, const RPage &page) final
Write a page to the storage. The column must have been added before.
void ConnectFields(const std::vector< RFieldBase * > &fields, NTupleSize_t firstEntry)
void CommitSealedPage(DescriptorId_t physicalColumnId, const RSealedPage &sealedPage) final
Write a preprocessed page to storage. The column must have been added before.
void CommitClusterGroup() final
Write out the page locations (page list envelope) for all the committed clusters since the last call ...
void UpdateExtraTypeInfo(const RExtraTypeInfoDescriptor &extraTypeInfo) final
Adds an extra type information record to schema.
void CommitSuppressedColumn(ColumnHandle_t columnHandle) final
Commits a suppressed column for the current cluster.
An RAII wrapper used to synchronize a page sink. See GetSinkGuard().
Abstract interface to write data into an ntuple.
const RNTupleWriteOptions & GetWriteOptions() const
Returns the sink's write options.
const std::string & GetNTupleName() const
Returns the NTuple name.
A page is a slice of a column that is mapped into memory.
Definition RPage.hxx:46
std::size_t GetNBytes() const
The space taken by column elements in the buffer.
Definition RPage.hxx:122
std::uint32_t GetElementSize() const
Definition RPage.hxx:130
std::uint32_t GetNElements() const
Definition RPage.hxx:131
std::unordered_map< const RFieldBase *, const RFieldBase * > FieldMap_t
The map keys are the projected target fields, the map values are the backing source fields Note that ...
RResult< void > Add(std::unique_ptr< RFieldBase > field, const FieldMap_t &fieldMap)
Adds a new projected field.
Base class for all ROOT issued exceptions.
Definition RError.hxx:78
Field specific extra type information from the header / extenstion header.
A field translates read and write calls from/to underlying columns to/from tree values.
The on-storage meta-data of an ntuple.
The RNTupleModel encapulates the schema of an ntuple.
std::unique_ptr< RNTupleModel > Clone() const
virtual TObject * Clone(const char *newname="") const
Make a clone of an object using the Streamer facility.
Definition TObject.cxx:229
RProjectedFields & GetProjectedFieldsOfModel(RNTupleModel &model)
void CallConnectPageSinkOnField(RFieldBase &, RPageSink &, NTupleSize_t firstEntry=0)
Definition RField.cxx:406
RFieldZero & GetFieldZeroOfModel(RNTupleModel &model)
std::uint64_t NTupleSize_t
Integer type long enough to hold the maximum number of entries in a column.
std::uint64_t DescriptorId_t
Distriniguishes elements of the same type within a descriptor, e.g. different fields.
The incremental changes to a RNTupleModel
std::vector< RFieldBase * > fAddedProjectedFields
Points to the projected fields in fModel that were added as part of an updater transaction.
std::vector< RFieldBase * > fAddedFields
Points to the fields in fModel that were added as part of an updater transaction.
I/O performance counters that get registered in fMetrics.
const RColumnElementBase * fElement
Corresponds to the page's elements, for size calculation etc.
void * fBuffer
Location for sealed output. The memory buffer has to be large enough.
bool fAllowAlias
If false, the output buffer must not point to the input page buffer, which would otherwise be an opti...
int fCompressionSetting
Compression algorithm and level to apply.
bool fWriteChecksum
Adds a 8 byte little-endian xxhash3 checksum to the page payload.
Cluster that was staged, but not yet logically appended to the RNTuple.
A sealed page contains the bytes of a page as written to storage (packed & compressed).