Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RPagePool.cxx
Go to the documentation of this file.
1/// \file RPagePool.cxx
2/// \ingroup NTuple ROOT7
3/// \author Jakob Blomer <jblomer@cern.ch>
4/// \date 2018-10-04
5/// \warning This is part of the ROOT 7 prototype! It will change without notice. It might trigger earthquakes. Feedback
6/// is welcome!
7
8/*************************************************************************
9 * Copyright (C) 1995-2019, Rene Brun and Fons Rademakers. *
10 * All rights reserved. *
11 * *
12 * For the licensing terms see $ROOTSYS/LICENSE. *
13 * For the list of contributors see $ROOTSYS/README/CREDITS. *
14 *************************************************************************/
15
16#include <ROOT/RPagePool.hxx>
17#include <ROOT/RColumn.hxx>
18
19#include <TError.h>
20
21#include <algorithm>
22#include <cstdlib>
23#include <utility>
24
26ROOT::Experimental::Internal::RPagePool::AddPage(RPage page, const RKey &key, std::int64_t initialRefCounter)
27{
28 assert(fLookupByBuffer.count(page.GetBuffer()) == 0);
29
30 const auto entryIndex = fEntries.size();
31
32 auto itrPageSet = fLookupByKey.find(key);
33 if (itrPageSet != fLookupByKey.end()) {
34 auto [itrEntryIdx, isNew] = itrPageSet->second.emplace(RPagePosition(page), entryIndex);
35 if (!isNew) {
36 assert(itrEntryIdx->second < fEntries.size());
37 // We require that pages cover pairwise distinct element ranges of the column
38 assert(fEntries[itrEntryIdx->second].fPage.GetGlobalRangeLast() == page.GetGlobalRangeLast());
39 fEntries[itrEntryIdx->second].fRefCounter += initialRefCounter;
40 return fEntries[itrEntryIdx->second];
41 }
42 } else {
43 fLookupByKey.emplace(key, std::map<RPagePosition, std::size_t>{{RPagePosition(page), entryIndex}});
44 }
45
46 fLookupByBuffer[page.GetBuffer()] = entryIndex;
47
48 return fEntries.emplace_back(REntry{std::move(page), key, initialRefCounter});
49}
50
52{
53 std::lock_guard<std::mutex> lockGuard(fLock);
54 return RPageRef(AddPage(std::move(page), key, 1).fPage, this);
55}
56
58{
59 std::lock_guard<std::mutex> lockGuard(fLock);
60 const auto &entry = AddPage(std::move(page), key, 0);
61 if (entry.fRefCounter == 0)
62 fUnusedPages[entry.fPage.GetClusterInfo().GetId()].emplace(entry.fPage.GetBuffer());
63}
64
66 decltype(fLookupByBuffer)::iterator lookupByBufferItr)
67{
68 fLookupByBuffer.erase(lookupByBufferItr);
69
70 auto itrPageSet = fLookupByKey.find(fEntries[entryIdx].fKey);
71 assert(itrPageSet != fLookupByKey.end());
72 itrPageSet->second.erase(RPagePosition(fEntries[entryIdx].fPage));
73 if (itrPageSet->second.empty())
74 fLookupByKey.erase(itrPageSet);
75
76 const auto N = fEntries.size();
77 assert(entryIdx < N);
78 if (entryIdx != (N - 1)) {
79 fLookupByBuffer[fEntries[N - 1].fPage.GetBuffer()] = entryIdx;
80 itrPageSet = fLookupByKey.find(fEntries[N - 1].fKey);
81 assert(itrPageSet != fLookupByKey.end());
82 auto itrEntryIdx = itrPageSet->second.find(RPagePosition(fEntries[N - 1].fPage));
83 assert(itrEntryIdx != itrPageSet->second.end());
84 itrEntryIdx->second = entryIdx;
85 fEntries[entryIdx] = std::move(fEntries[N - 1]);
86 }
87
88 fEntries.resize(N - 1);
89}
90
92{
93 if (page.IsNull()) return;
94 std::lock_guard<std::mutex> lockGuard(fLock);
95
96 auto itrLookup = fLookupByBuffer.find(page.GetBuffer());
97 assert(itrLookup != fLookupByBuffer.end());
98 const auto idx = itrLookup->second;
99
100 assert(fEntries[idx].fRefCounter >= 1);
101 if (--fEntries[idx].fRefCounter == 0) {
102 ErasePage(idx, itrLookup);
103 }
104}
105
107{
108 auto itr = fUnusedPages.find(page.GetClusterInfo().GetId());
109 assert(itr != fUnusedPages.end());
110 itr->second.erase(page.GetBuffer());
111 if (itr->second.empty())
112 fUnusedPages.erase(itr);
113}
114
117{
118 std::lock_guard<std::mutex> lockGuard(fLock);
119 auto itrPageSet = fLookupByKey.find(key);
120 if (itrPageSet == fLookupByKey.end())
121 return RPageRef();
122 assert(!itrPageSet->second.empty());
123
124 auto itrEntryIdx = itrPageSet->second.upper_bound(RPagePosition(globalIndex));
125 if (itrEntryIdx == itrPageSet->second.begin())
126 return RPageRef();
127
128 --itrEntryIdx;
129 if (fEntries[itrEntryIdx->second].fPage.Contains(globalIndex)) {
130 if (fEntries[itrEntryIdx->second].fRefCounter == 0)
131 RemoveFromUnusedPages(fEntries[itrEntryIdx->second].fPage);
132 fEntries[itrEntryIdx->second].fRefCounter++;
133 return RPageRef(fEntries[itrEntryIdx->second].fPage, this);
134 }
135 return RPageRef();
136}
137
140{
141 std::lock_guard<std::mutex> lockGuard(fLock);
142 auto itrPageSet = fLookupByKey.find(key);
143 if (itrPageSet == fLookupByKey.end())
144 return RPageRef();
145 assert(!itrPageSet->second.empty());
146
147 auto itrEntryIdx = itrPageSet->second.upper_bound(RPagePosition(clusterIndex));
148 if (itrEntryIdx == itrPageSet->second.begin())
149 return RPageRef();
150
151 --itrEntryIdx;
152 if (fEntries[itrEntryIdx->second].fPage.Contains(clusterIndex)) {
153 if (fEntries[itrEntryIdx->second].fRefCounter == 0)
154 RemoveFromUnusedPages(fEntries[itrEntryIdx->second].fPage);
155 fEntries[itrEntryIdx->second].fRefCounter++;
156 return RPageRef(fEntries[itrEntryIdx->second].fPage, this);
157 }
158 return RPageRef();
159}
160
162{
163 std::lock_guard<std::mutex> lockGuard(fLock);
164 auto itr = fUnusedPages.find(clusterId);
165 if (itr == fUnusedPages.end())
166 return;
167
168 for (auto pageBuffer : itr->second) {
169 const auto itrLookupByBuffer = fLookupByBuffer.find(pageBuffer);
170 assert(itrLookupByBuffer != fLookupByBuffer.end());
171 const auto entryIdx = itrLookupByBuffer->second;
172 assert(fEntries[entryIdx].fRefCounter == 0);
173 ErasePage(entryIdx, itrLookupByBuffer);
174 }
175
176 fUnusedPages.erase(itr);
177}
#define N
REntry & AddPage(RPage page, const RKey &key, std::int64_t initialRefCounter)
Add a new page to the fLookupByBuffer and fLookupByKey data structures.
Definition RPagePool.cxx:26
void PreloadPage(RPage page, RKey key)
Like RegisterPage() but the reference counter is initialized to 0.
Definition RPagePool.cxx:57
std::unordered_map< void *, std::size_t > fLookupByBuffer
Used in ReleasePage() to find the page index in fPages.
std::vector< REntry > fEntries
All cached pages in the page pool.
void ReleasePage(const RPage &page)
Give back a page to the pool and decrease the reference counter.
Definition RPagePool.cxx:91
void Evict(DescriptorId_t clusterId)
Removes unused pages (pages with reference counter 0) from the page pool.
void ErasePage(std::size_t entryIdx, decltype(fLookupByBuffer)::iterator lookupByBufferItr)
Called both by ReleasePage() and by Evict() to remove an unused page from the pool.
Definition RPagePool.cxx:65
std::unordered_map< RKey, std::map< RPagePosition, std::size_t >, RKeyHasher > fLookupByKey
Used in GetPage() to find the right page in fEntries.
void RemoveFromUnusedPages(const RPage &page)
Called by GetPage(), when the reference counter increases from zero to one.
RPageRef GetPage(RKey key, NTupleSize_t globalIndex)
Tries to find the page corresponding to column and index in the cache.
RPageRef RegisterPage(RPage page, RKey key)
Adds a new page to the pool.
Definition RPagePool.cxx:51
Reference to a page stored in the page pool.
A page is a slice of a column that is mapped into memory.
Definition RPage.hxx:47
const RClusterInfo & GetClusterInfo() const
Definition RPage.hxx:129
NTupleSize_t GetGlobalRangeLast() const
Definition RPage.hxx:126
Addresses a column element or field item relative to a particular cluster, instead of a global NTuple...
std::uint64_t NTupleSize_t
Integer type long enough to hold the maximum number of entries in a column.
std::uint64_t DescriptorId_t
Distriniguishes elements of the same type within a descriptor, e.g. different fields.
Every page in the page pool is annotated with a search key and a reference counter.
Definition RPagePool.hxx:78
Used in fLookupByKey to store both the absolute and the cluster-local page index of the referenced pa...
Definition RPagePool.hxx:87