30 const auto entryIndex =
fEntries.size();
34 auto [itrEntryIdx, isNew] = itrPageSet->second.emplace(
RPagePosition(page), entryIndex);
36 assert(itrEntryIdx->second <
fEntries.size());
39 fEntries[itrEntryIdx->second].fRefCounter += initialRefCounter;
40 return fEntries[itrEntryIdx->second];
48 return fEntries.emplace_back(
REntry{std::move(page), key, initialRefCounter});
53 std::lock_guard<std::mutex> lockGuard(fLock);
54 return RPageRef(AddPage(std::move(page), key, 1).fPage,
this);
59 std::lock_guard<std::mutex> lockGuard(fLock);
60 const auto &entry = AddPage(std::move(page), key, 0);
61 if (entry.fRefCounter == 0)
62 fUnusedPages[entry.fPage.GetClusterInfo().GetId()].emplace(entry.fPage.GetBuffer());
66 decltype(fLookupByBuffer)::iterator lookupByBufferItr)
68 fLookupByBuffer.erase(lookupByBufferItr);
70 auto itrPageSet = fLookupByKey.find(fEntries[entryIdx].fKey);
71 assert(itrPageSet != fLookupByKey.end());
72 itrPageSet->second.erase(
RPagePosition(fEntries[entryIdx].fPage));
73 if (itrPageSet->second.empty())
74 fLookupByKey.erase(itrPageSet);
76 const auto N = fEntries.size();
78 if (entryIdx != (
N - 1)) {
79 fLookupByBuffer[fEntries[
N - 1].fPage.GetBuffer()] = entryIdx;
80 itrPageSet = fLookupByKey.find(fEntries[
N - 1].fKey);
81 assert(itrPageSet != fLookupByKey.end());
82 auto itrEntryIdx = itrPageSet->second.find(
RPagePosition(fEntries[
N - 1].fPage));
83 assert(itrEntryIdx != itrPageSet->second.end());
84 itrEntryIdx->second = entryIdx;
85 fEntries[entryIdx] = std::move(fEntries[
N - 1]);
88 fEntries.resize(
N - 1);
94 std::lock_guard<std::mutex> lockGuard(fLock);
96 auto itrLookup = fLookupByBuffer.find(page.
GetBuffer());
97 assert(itrLookup != fLookupByBuffer.end());
98 const auto idx = itrLookup->second;
100 assert(fEntries[idx].fRefCounter >= 1);
101 if (--fEntries[idx].fRefCounter == 0) {
102 ErasePage(idx, itrLookup);
109 assert(itr != fUnusedPages.end());
111 if (itr->second.empty())
112 fUnusedPages.erase(itr);
118 std::lock_guard<std::mutex> lockGuard(fLock);
119 auto itrPageSet = fLookupByKey.find(key);
120 if (itrPageSet == fLookupByKey.end())
122 assert(!itrPageSet->second.empty());
124 auto itrEntryIdx = itrPageSet->second.upper_bound(
RPagePosition(globalIndex));
125 if (itrEntryIdx == itrPageSet->second.begin())
129 if (fEntries[itrEntryIdx->second].fPage.Contains(globalIndex)) {
130 if (fEntries[itrEntryIdx->second].fRefCounter == 0)
131 RemoveFromUnusedPages(fEntries[itrEntryIdx->second].fPage);
132 fEntries[itrEntryIdx->second].fRefCounter++;
133 return RPageRef(fEntries[itrEntryIdx->second].fPage,
this);
141 std::lock_guard<std::mutex> lockGuard(fLock);
142 auto itrPageSet = fLookupByKey.find(key);
143 if (itrPageSet == fLookupByKey.end())
145 assert(!itrPageSet->second.empty());
147 auto itrEntryIdx = itrPageSet->second.upper_bound(
RPagePosition(clusterIndex));
148 if (itrEntryIdx == itrPageSet->second.begin())
152 if (fEntries[itrEntryIdx->second].fPage.Contains(clusterIndex)) {
153 if (fEntries[itrEntryIdx->second].fRefCounter == 0)
154 RemoveFromUnusedPages(fEntries[itrEntryIdx->second].fPage);
155 fEntries[itrEntryIdx->second].fRefCounter++;
156 return RPageRef(fEntries[itrEntryIdx->second].fPage,
this);
163 std::lock_guard<std::mutex> lockGuard(fLock);
164 auto itr = fUnusedPages.find(clusterId);
165 if (itr == fUnusedPages.end())
168 for (
auto pageBuffer : itr->second) {
169 const auto itrLookupByBuffer = fLookupByBuffer.find(pageBuffer);
170 assert(itrLookupByBuffer != fLookupByBuffer.end());
171 const auto entryIdx = itrLookupByBuffer->second;
172 assert(fEntries[entryIdx].fRefCounter == 0);
173 ErasePage(entryIdx, itrLookupByBuffer);
176 fUnusedPages.erase(itr);
REntry & AddPage(RPage page, const RKey &key, std::int64_t initialRefCounter)
Add a new page to the fLookupByBuffer and fLookupByKey data structures.
void PreloadPage(RPage page, RKey key)
Like RegisterPage() but the reference counter is initialized to 0.
std::unordered_map< void *, std::size_t > fLookupByBuffer
Used in ReleasePage() to find the page index in fPages.
std::vector< REntry > fEntries
All cached pages in the page pool.
void ReleasePage(const RPage &page)
Give back a page to the pool and decrease the reference counter.
void Evict(DescriptorId_t clusterId)
Removes unused pages (pages with reference counter 0) from the page pool.
void ErasePage(std::size_t entryIdx, decltype(fLookupByBuffer)::iterator lookupByBufferItr)
Called both by ReleasePage() and by Evict() to remove an unused page from the pool.
std::unordered_map< RKey, std::map< RPagePosition, std::size_t >, RKeyHasher > fLookupByKey
Used in GetPage() to find the right page in fEntries.
void RemoveFromUnusedPages(const RPage &page)
Called by GetPage(), when the reference counter increases from zero to one.
RPageRef GetPage(RKey key, NTupleSize_t globalIndex)
Tries to find the page corresponding to column and index in the cache.
RPageRef RegisterPage(RPage page, RKey key)
Adds a new page to the pool.
NTupleSize_t GetId() const
A page is a slice of a column that is mapped into memory.
const RClusterInfo & GetClusterInfo() const
NTupleSize_t GetGlobalRangeLast() const
Addresses a column element or field item relative to a particular cluster, instead of a global NTuple...
std::uint64_t NTupleSize_t
Integer type long enough to hold the maximum number of entries in a column.
std::uint64_t DescriptorId_t
Distriniguishes elements of the same type within a descriptor, e.g. different fields.
Every page in the page pool is annotated with a search key and a reference counter.
Used in fLookupByKey to store both the absolute and the cluster-local page index of the referenced pa...