/tmp/bitcoin/src/support/allocators/pool.h
Line | Count | Source |
1 | | // Copyright (c) 2022-present The Bitcoin Core developers |
2 | | // Distributed under the MIT software license, see the accompanying |
3 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
4 | | |
5 | | #ifndef BITCOIN_SUPPORT_ALLOCATORS_POOL_H |
6 | | #define BITCOIN_SUPPORT_ALLOCATORS_POOL_H |
7 | | |
8 | | #include <array> |
9 | | #include <cassert> |
10 | | #include <cstddef> |
11 | | #include <list> |
12 | | #include <memory> |
13 | | #include <new> |
14 | | #include <type_traits> |
15 | | #include <utility> |
16 | | |
17 | | #include <util/check.h> |
18 | | #include <util/overflow.h> |
19 | | |
20 | | /** |
21 | | * A memory resource similar to std::pmr::unsynchronized_pool_resource, but |
22 | | * optimized for node-based containers. It has the following properties: |
23 | | * |
24 | | * * Owns the allocated memory and frees it on destruction, even when deallocate |
25 | | * has not been called on the allocated blocks. |
26 | | * |
27 | | * * Consists of a number of pools, each one for a different block size. |
28 | | * Each pool holds blocks of uniform size in a freelist. |
29 | | * |
30 | | * * Exhausting memory in a freelist causes a new allocation of a fixed size chunk. |
31 | | * This chunk is used to carve out blocks. |
32 | | * |
33 | | * * Block sizes or alignments that can not be served by the pools are allocated |
34 | | * and deallocated by operator new(). |
35 | | * |
36 | | * PoolResource is not thread-safe. It is intended to be used by PoolAllocator. |
37 | | * |
38 | | * @tparam MAX_BLOCK_SIZE_BYTES Maximum size to allocate with the pool. If larger |
39 | | * sizes are requested, allocation falls back to new(). |
40 | | * |
41 | | * @tparam ALIGN_BYTES Required alignment for the allocations. |
42 | | * |
43 | | * An example: If you create a PoolResource<128, 8>(262144) and perform a bunch of |
44 | | * allocations and deallocate 2 blocks with size 8 bytes, and 3 blocks with size 16, |
45 | | * the members will look like this: |
46 | | * |
47 | | * m_free_lists m_allocated_chunks |
48 | | * ┌───┐ ┌───┐ ┌────────────-------──────┐ |
49 | | * │ │ blocks │ ├─►│ 262144 B │ |
50 | | * │ │ ┌─────┐ ┌─────┐ └─┬─┘ └────────────-------──────┘ |
51 | | * │ 1 ├─►│ 8 B ├─►│ 8 B │ │ |
52 | | * │ │ └─────┘ └─────┘ : |
53 | | * │ │ │ |
54 | | * │ │ ┌─────┐ ┌─────┐ ┌─────┐ ▼ |
55 | | * │ 2 ├─►│16 B ├─►│16 B ├─►│16 B │ ┌───┐ ┌─────────────────────────┐ |
56 | | * │ │ └─────┘ └─────┘ └─────┘ │ ├─►│ ▲ │ ▲ |
57 | | * │ │ └───┘ └──────────┬──────────────┘ │ |
58 | | * │ . │ │ m_available_memory_end |
59 | | * │ . │ m_available_memory_it |
60 | | * │ . │ |
61 | | * │ │ |
62 | | * │ │ |
63 | | * │16 │ |
64 | | * └───┘ |
65 | | * |
66 | | * Here m_free_lists[1] holds the 2 blocks of size 8 bytes, and m_free_lists[2] |
67 | | * holds the 3 blocks of size 16. The blocks came from the data stored in the |
68 | | * m_allocated_chunks list. Each chunk has bytes 262144. The last chunk has still |
69 | | * some memory available for the blocks, and when m_available_memory_it is at the |
70 | | * end, a new chunk will be allocated and added to the list. |
71 | | */ |
72 | | template <std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES> |
73 | | class PoolResource final |
74 | | { |
75 | | static_assert(ALIGN_BYTES > 0, "ALIGN_BYTES must be nonzero"); |
76 | | static_assert((ALIGN_BYTES & (ALIGN_BYTES - 1)) == 0, "ALIGN_BYTES must be a power of two"); |
77 | | |
78 | | /** |
79 | | * In-place linked list of the allocations, used for the freelist. |
80 | | */ |
81 | | struct ListNode { |
82 | | ListNode* m_next; |
83 | | |
84 | 66.9M | explicit ListNode(ListNode* next) : m_next(next) {}PoolResource<152ul, 8ul>::ListNode::ListNode(PoolResource<152ul, 8ul>::ListNode*) Line | Count | Source | 84 | 66.9M | explicit ListNode(ListNode* next) : m_next(next) {} |
PoolResource<48ul, 8ul>::ListNode::ListNode(PoolResource<48ul, 8ul>::ListNode*) Line | Count | Source | 84 | 10.2k | explicit ListNode(ListNode* next) : m_next(next) {} |
PoolResource<8ul, 8ul>::ListNode::ListNode(PoolResource<8ul, 8ul>::ListNode*) Line | Count | Source | 84 | 3 | explicit ListNode(ListNode* next) : m_next(next) {} |
PoolResource<128ul, 8ul>::ListNode::ListNode(PoolResource<128ul, 8ul>::ListNode*) Line | Count | Source | 84 | 389 | explicit ListNode(ListNode* next) : m_next(next) {} |
|
85 | | }; |
86 | | static_assert(std::is_trivially_destructible_v<ListNode>, "Make sure we don't need to manually call a destructor"); |
87 | | |
88 | | /** |
89 | | * Internal alignment value. The larger of the requested ALIGN_BYTES and alignof(FreeList). |
90 | | */ |
91 | | static constexpr std::size_t ELEM_ALIGN_BYTES = std::max(alignof(ListNode), ALIGN_BYTES); |
92 | | static_assert((ELEM_ALIGN_BYTES & (ELEM_ALIGN_BYTES - 1)) == 0, "ELEM_ALIGN_BYTES must be a power of two"); |
93 | | static_assert(sizeof(ListNode) <= ELEM_ALIGN_BYTES, "Units of size ELEM_SIZE_ALIGN need to be able to store a ListNode"); |
94 | | static_assert((MAX_BLOCK_SIZE_BYTES & (ELEM_ALIGN_BYTES - 1)) == 0, "MAX_BLOCK_SIZE_BYTES needs to be a multiple of the alignment."); |
95 | | |
96 | | /** |
97 | | * Size in bytes to allocate per chunk |
98 | | */ |
99 | | const size_t m_chunk_size_bytes; |
100 | | |
101 | | /** |
102 | | * Contains all allocated pools of memory, used to free the data in the destructor. |
103 | | */ |
104 | | std::list<std::byte*> m_allocated_chunks{}; |
105 | | |
106 | | /** |
107 | | * Single linked lists of all data that came from deallocating. |
108 | | * m_free_lists[n] will serve blocks of size n*ELEM_ALIGN_BYTES. |
109 | | */ |
110 | | std::array<ListNode*, MAX_BLOCK_SIZE_BYTES / ELEM_ALIGN_BYTES + 1> m_free_lists{}; |
111 | | |
112 | | /** |
113 | | * Points to the beginning of available memory for carving out allocations. |
114 | | */ |
115 | | std::byte* m_available_memory_it = nullptr; |
116 | | |
117 | | /** |
118 | | * Points to the end of available memory for carving out allocations. |
119 | | * |
120 | | * That member variable is redundant, and is always equal to `m_allocated_chunks.back() + m_chunk_size_bytes` |
121 | | * whenever it is accessed, but `m_available_memory_end` caches this for clarity and efficiency. |
122 | | */ |
123 | | std::byte* m_available_memory_end = nullptr; |
124 | | |
125 | | /** |
126 | | * How many multiple of ELEM_ALIGN_BYTES are necessary to fit bytes. We use that result directly as an index |
127 | | * into m_free_lists. Round up for the special case when bytes==0. |
128 | | */ |
129 | | [[nodiscard]] static constexpr std::size_t NumElemAlignBytes(std::size_t bytes) |
130 | 134M | { |
131 | 134M | return CeilDiv(bytes, ELEM_ALIGN_BYTES) + (bytes == 0); |
132 | 134M | } PoolResource<152ul, 8ul>::NumElemAlignBytes(unsigned long) Line | Count | Source | 130 | 134M | { | 131 | 134M | return CeilDiv(bytes, ELEM_ALIGN_BYTES) + (bytes == 0); | 132 | 134M | } |
PoolResource<48ul, 8ul>::NumElemAlignBytes(unsigned long) Line | Count | Source | 130 | 20.0k | { | 131 | 20.0k | return CeilDiv(bytes, ELEM_ALIGN_BYTES) + (bytes == 0); | 132 | 20.0k | } |
PoolResource<8ul, 8ul>::NumElemAlignBytes(unsigned long) Line | Count | Source | 130 | 7 | { | 131 | 7 | return CeilDiv(bytes, ELEM_ALIGN_BYTES) + (bytes == 0); | 132 | 7 | } |
PoolResource<128ul, 8ul>::NumElemAlignBytes(unsigned long) Line | Count | Source | 130 | 764 | { | 131 | 764 | return CeilDiv(bytes, ELEM_ALIGN_BYTES) + (bytes == 0); | 132 | 764 | } |
|
133 | | |
134 | | /** |
135 | | * True when it is possible to make use of the freelist |
136 | | */ |
137 | | [[nodiscard]] static constexpr bool IsFreeListUsable(std::size_t bytes, std::size_t alignment) |
138 | 134M | { |
139 | 134M | return alignment <= ELEM_ALIGN_BYTES && bytes <= MAX_BLOCK_SIZE_BYTES; |
140 | 134M | } PoolResource<152ul, 8ul>::IsFreeListUsable(unsigned long, unsigned long) Line | Count | Source | 138 | 134M | { | 139 | 134M | return alignment <= ELEM_ALIGN_BYTES && bytes <= MAX_BLOCK_SIZE_BYTES; | 140 | 134M | } |
PoolResource<48ul, 8ul>::IsFreeListUsable(unsigned long, unsigned long) Line | Count | Source | 138 | 20.0k | { | 139 | 20.0k | return alignment <= ELEM_ALIGN_BYTES && bytes <= MAX_BLOCK_SIZE_BYTES; | 140 | 20.0k | } |
PoolResource<8ul, 8ul>::IsFreeListUsable(unsigned long, unsigned long) Line | Count | Source | 138 | 10 | { | 139 | 10 | return alignment <= ELEM_ALIGN_BYTES && bytes <= MAX_BLOCK_SIZE_BYTES; | 140 | 10 | } |
PoolResource<128ul, 8ul>::IsFreeListUsable(unsigned long, unsigned long) Line | Count | Source | 138 | 1.88k | { | 139 | 1.88k | return alignment <= ELEM_ALIGN_BYTES && bytes <= MAX_BLOCK_SIZE_BYTES; | 140 | 1.88k | } |
|
141 | | |
142 | | /** |
143 | | * Replaces node with placement constructed ListNode that points to the previous node |
144 | | */ |
145 | | void PlacementAddToList(void* p, ListNode*& node) |
146 | 66.9M | { |
147 | 66.9M | node = new (p) ListNode{node}; |
148 | 66.9M | } PoolResource<152ul, 8ul>::PlacementAddToList(void*, PoolResource<152ul, 8ul>::ListNode*&) Line | Count | Source | 146 | 66.9M | { | 147 | 66.9M | node = new (p) ListNode{node}; | 148 | 66.9M | } |
PoolResource<48ul, 8ul>::PlacementAddToList(void*, PoolResource<48ul, 8ul>::ListNode*&) Line | Count | Source | 146 | 10.2k | { | 147 | 10.2k | node = new (p) ListNode{node}; | 148 | 10.2k | } |
PoolResource<8ul, 8ul>::PlacementAddToList(void*, PoolResource<8ul, 8ul>::ListNode*&) Line | Count | Source | 146 | 3 | { | 147 | 3 | node = new (p) ListNode{node}; | 148 | 3 | } |
PoolResource<128ul, 8ul>::PlacementAddToList(void*, PoolResource<128ul, 8ul>::ListNode*&) Line | Count | Source | 146 | 389 | { | 147 | 389 | node = new (p) ListNode{node}; | 148 | 389 | } |
|
149 | | |
150 | | /** |
151 | | * Allocate one full memory chunk which will be used to carve out allocations. |
152 | | * Also puts any leftover bytes into the freelist. |
153 | | * |
154 | | * Precondition: leftover bytes are either 0 or few enough to fit into a place in the freelist |
155 | | */ |
156 | | void AllocateChunk() |
157 | 388k | { |
158 | | // if there is still any available memory left, put it into the freelist. |
159 | 388k | size_t remaining_available_bytes = m_available_memory_end - m_available_memory_it; |
160 | 388k | if (0 != remaining_available_bytes) { |
161 | 8.68k | ASAN_UNPOISON_MEMORY_REGION(m_available_memory_it, sizeof(ListNode)); |
162 | 8.68k | PlacementAddToList(m_available_memory_it, m_free_lists[remaining_available_bytes / ELEM_ALIGN_BYTES]); |
163 | 8.68k | ASAN_POISON_MEMORY_REGION(m_available_memory_it, sizeof(ListNode)); |
164 | 8.68k | } |
165 | | |
166 | 388k | void* storage = ::operator new (m_chunk_size_bytes, std::align_val_t{ELEM_ALIGN_BYTES}); |
167 | 388k | m_available_memory_it = new (storage) std::byte[m_chunk_size_bytes]; |
168 | 388k | m_available_memory_end = m_available_memory_it + m_chunk_size_bytes; |
169 | 388k | ASAN_POISON_MEMORY_REGION(m_available_memory_it, m_chunk_size_bytes); |
170 | 388k | m_allocated_chunks.emplace_back(m_available_memory_it); |
171 | 388k | } PoolResource<152ul, 8ul>::AllocateChunk() Line | Count | Source | 157 | 388k | { | 158 | | // if there is still any available memory left, put it into the freelist. | 159 | 388k | size_t remaining_available_bytes = m_available_memory_end - m_available_memory_it; | 160 | 388k | if (0 != remaining_available_bytes) { | 161 | 8.44k | ASAN_UNPOISON_MEMORY_REGION(m_available_memory_it, sizeof(ListNode)); | 162 | 8.44k | PlacementAddToList(m_available_memory_it, m_free_lists[remaining_available_bytes / ELEM_ALIGN_BYTES]); | 163 | 8.44k | ASAN_POISON_MEMORY_REGION(m_available_memory_it, sizeof(ListNode)); | 164 | 8.44k | } | 165 | | | 166 | 388k | void* storage = ::operator new (m_chunk_size_bytes, std::align_val_t{ELEM_ALIGN_BYTES}); | 167 | 388k | m_available_memory_it = new (storage) std::byte[m_chunk_size_bytes]; | 168 | 388k | m_available_memory_end = m_available_memory_it + m_chunk_size_bytes; | 169 | 388k | ASAN_POISON_MEMORY_REGION(m_available_memory_it, m_chunk_size_bytes); | 170 | 388k | m_allocated_chunks.emplace_back(m_available_memory_it); | 171 | 388k | } |
PoolResource<8ul, 8ul>::AllocateChunk() Line | Count | Source | 157 | 1 | { | 158 | | // if there is still any available memory left, put it into the freelist. | 159 | 1 | size_t remaining_available_bytes = m_available_memory_end - m_available_memory_it; | 160 | 1 | if (0 != remaining_available_bytes) { | 161 | 0 | ASAN_UNPOISON_MEMORY_REGION(m_available_memory_it, sizeof(ListNode)); | 162 | 0 | PlacementAddToList(m_available_memory_it, m_free_lists[remaining_available_bytes / ELEM_ALIGN_BYTES]); | 163 | 0 | ASAN_POISON_MEMORY_REGION(m_available_memory_it, sizeof(ListNode)); | 164 | 0 | } | 165 | | | 166 | 1 | void* storage = ::operator new (m_chunk_size_bytes, std::align_val_t{ELEM_ALIGN_BYTES}); | 167 | 1 | m_available_memory_it = new (storage) std::byte[m_chunk_size_bytes]; | 168 | 1 | m_available_memory_end = m_available_memory_it + m_chunk_size_bytes; | 169 | 1 | ASAN_POISON_MEMORY_REGION(m_available_memory_it, m_chunk_size_bytes); | 170 | 1 | m_allocated_chunks.emplace_back(m_available_memory_it); | 171 | 1 | } |
PoolResource<128ul, 8ul>::AllocateChunk() Line | Count | Source | 157 | 10 | { | 158 | | // if there is still any available memory left, put it into the freelist. | 159 | 10 | size_t remaining_available_bytes = m_available_memory_end - m_available_memory_it; | 160 | 10 | if (0 != remaining_available_bytes) { | 161 | 8 | ASAN_UNPOISON_MEMORY_REGION(m_available_memory_it, sizeof(ListNode)); | 162 | 8 | PlacementAddToList(m_available_memory_it, m_free_lists[remaining_available_bytes / ELEM_ALIGN_BYTES]); | 163 | 8 | ASAN_POISON_MEMORY_REGION(m_available_memory_it, sizeof(ListNode)); | 164 | 8 | } | 165 | | | 166 | 10 | void* storage = ::operator new (m_chunk_size_bytes, std::align_val_t{ELEM_ALIGN_BYTES}); | 167 | 10 | m_available_memory_it = new (storage) std::byte[m_chunk_size_bytes]; | 168 | 10 | m_available_memory_end = m_available_memory_it + m_chunk_size_bytes; | 169 | 10 | ASAN_POISON_MEMORY_REGION(m_available_memory_it, m_chunk_size_bytes); | 170 | 10 | m_allocated_chunks.emplace_back(m_available_memory_it); | 171 | 10 | } |
PoolResource<48ul, 8ul>::AllocateChunk() Line | Count | Source | 157 | 239 | { | 158 | | // if there is still any available memory left, put it into the freelist. | 159 | 239 | size_t remaining_available_bytes = m_available_memory_end - m_available_memory_it; | 160 | 239 | if (0 != remaining_available_bytes) { | 161 | 238 | ASAN_UNPOISON_MEMORY_REGION(m_available_memory_it, sizeof(ListNode)); | 162 | 238 | PlacementAddToList(m_available_memory_it, m_free_lists[remaining_available_bytes / ELEM_ALIGN_BYTES]); | 163 | 238 | ASAN_POISON_MEMORY_REGION(m_available_memory_it, sizeof(ListNode)); | 164 | 238 | } | 165 | | | 166 | 239 | void* storage = ::operator new (m_chunk_size_bytes, std::align_val_t{ELEM_ALIGN_BYTES}); | 167 | 239 | m_available_memory_it = new (storage) std::byte[m_chunk_size_bytes]; | 168 | 239 | m_available_memory_end = m_available_memory_it + m_chunk_size_bytes; | 169 | 239 | ASAN_POISON_MEMORY_REGION(m_available_memory_it, m_chunk_size_bytes); | 170 | 239 | m_allocated_chunks.emplace_back(m_available_memory_it); | 171 | 239 | } |
|
172 | | |
173 | | /** |
174 | | * Access to internals for testing purpose only |
175 | | */ |
176 | | friend class PoolResourceTester; |
177 | | |
178 | | public: |
179 | | /** |
180 | | * Construct a new PoolResource object which allocates the first chunk. |
181 | | * chunk_size_bytes will be rounded up to next multiple of ELEM_ALIGN_BYTES. |
182 | | */ |
183 | | explicit PoolResource(std::size_t chunk_size_bytes) |
184 | 376k | : m_chunk_size_bytes(NumElemAlignBytes(chunk_size_bytes) * ELEM_ALIGN_BYTES) |
185 | 376k | { |
186 | 376k | assert(m_chunk_size_bytes >= MAX_BLOCK_SIZE_BYTES); |
187 | 376k | AllocateChunk(); |
188 | 376k | } PoolResource<152ul, 8ul>::PoolResource(unsigned long) Line | Count | Source | 184 | 376k | : m_chunk_size_bytes(NumElemAlignBytes(chunk_size_bytes) * ELEM_ALIGN_BYTES) | 185 | 376k | { | 186 | 376k | assert(m_chunk_size_bytes >= MAX_BLOCK_SIZE_BYTES); | 187 | 376k | AllocateChunk(); | 188 | 376k | } |
PoolResource<8ul, 8ul>::PoolResource(unsigned long) Line | Count | Source | 184 | 1 | : m_chunk_size_bytes(NumElemAlignBytes(chunk_size_bytes) * ELEM_ALIGN_BYTES) | 185 | 1 | { | 186 | 1 | assert(m_chunk_size_bytes >= MAX_BLOCK_SIZE_BYTES); | 187 | 1 | AllocateChunk(); | 188 | 1 | } |
PoolResource<128ul, 8ul>::PoolResource(unsigned long) Line | Count | Source | 184 | 2 | : m_chunk_size_bytes(NumElemAlignBytes(chunk_size_bytes) * ELEM_ALIGN_BYTES) | 185 | 2 | { | 186 | 2 | assert(m_chunk_size_bytes >= MAX_BLOCK_SIZE_BYTES); | 187 | 2 | AllocateChunk(); | 188 | 2 | } |
PoolResource<48ul, 8ul>::PoolResource(unsigned long) Line | Count | Source | 184 | 1 | : m_chunk_size_bytes(NumElemAlignBytes(chunk_size_bytes) * ELEM_ALIGN_BYTES) | 185 | 1 | { | 186 | 1 | assert(m_chunk_size_bytes >= MAX_BLOCK_SIZE_BYTES); | 187 | 1 | AllocateChunk(); | 188 | 1 | } |
|
189 | | |
190 | | /** |
191 | | * Construct a new Pool Resource object, defaults to 2^18=262144 chunk size. |
192 | | */ |
193 | 376k | PoolResource() : PoolResource(262144) {}PoolResource<152ul, 8ul>::PoolResource() Line | Count | Source | 193 | 376k | PoolResource() : PoolResource(262144) {} |
PoolResource<8ul, 8ul>::PoolResource() Line | Count | Source | 193 | 1 | PoolResource() : PoolResource(262144) {} |
|
194 | | |
195 | | /** |
196 | | * Disable copy & move semantics, these are not supported for the resource. |
197 | | */ |
198 | | PoolResource(const PoolResource&) = delete; |
199 | | PoolResource& operator=(const PoolResource&) = delete; |
200 | | PoolResource(PoolResource&&) = delete; |
201 | | PoolResource& operator=(PoolResource&&) = delete; |
202 | | |
203 | | /** |
204 | | * Deallocates all memory allocated associated with the memory resource. |
205 | | */ |
206 | | ~PoolResource() |
207 | 376k | { |
208 | 388k | for (std::byte* chunk : m_allocated_chunks) { |
209 | 388k | std::destroy(chunk, chunk + m_chunk_size_bytes); |
210 | 388k | ::operator delete ((void*)chunk, std::align_val_t{ELEM_ALIGN_BYTES}); |
211 | 388k | ASAN_UNPOISON_MEMORY_REGION(chunk, m_chunk_size_bytes); |
212 | 388k | } |
213 | 376k | } PoolResource<152ul, 8ul>::~PoolResource() Line | Count | Source | 207 | 376k | { | 208 | 388k | for (std::byte* chunk : m_allocated_chunks) { | 209 | 388k | std::destroy(chunk, chunk + m_chunk_size_bytes); | 210 | 388k | ::operator delete ((void*)chunk, std::align_val_t{ELEM_ALIGN_BYTES}); | 211 | 388k | ASAN_UNPOISON_MEMORY_REGION(chunk, m_chunk_size_bytes); | 212 | 388k | } | 213 | 376k | } |
PoolResource<8ul, 8ul>::~PoolResource() Line | Count | Source | 207 | 1 | { | 208 | 1 | for (std::byte* chunk : m_allocated_chunks) { | 209 | 1 | std::destroy(chunk, chunk + m_chunk_size_bytes); | 210 | 1 | ::operator delete ((void*)chunk, std::align_val_t{ELEM_ALIGN_BYTES}); | 211 | 1 | ASAN_UNPOISON_MEMORY_REGION(chunk, m_chunk_size_bytes); | 212 | 1 | } | 213 | 1 | } |
PoolResource<128ul, 8ul>::~PoolResource() Line | Count | Source | 207 | 2 | { | 208 | 10 | for (std::byte* chunk : m_allocated_chunks) { | 209 | 10 | std::destroy(chunk, chunk + m_chunk_size_bytes); | 210 | 10 | ::operator delete ((void*)chunk, std::align_val_t{ELEM_ALIGN_BYTES}); | 211 | 10 | ASAN_UNPOISON_MEMORY_REGION(chunk, m_chunk_size_bytes); | 212 | 10 | } | 213 | 2 | } |
PoolResource<48ul, 8ul>::~PoolResource() Line | Count | Source | 207 | 1 | { | 208 | 239 | for (std::byte* chunk : m_allocated_chunks) { | 209 | 239 | std::destroy(chunk, chunk + m_chunk_size_bytes); | 210 | 239 | ::operator delete ((void*)chunk, std::align_val_t{ELEM_ALIGN_BYTES}); | 211 | 239 | ASAN_UNPOISON_MEMORY_REGION(chunk, m_chunk_size_bytes); | 212 | 239 | } | 213 | 1 | } |
|
214 | | |
215 | | /** |
216 | | * Allocates a block of bytes. If possible the freelist is used, otherwise allocation |
217 | | * is forwarded to ::operator new(). |
218 | | */ |
219 | | void* Allocate(std::size_t bytes, std::size_t alignment) |
220 | 67.1M | { |
221 | 67.1M | if (IsFreeListUsable(bytes, alignment)) { |
222 | 66.9M | const std::size_t num_alignments = NumElemAlignBytes(bytes); |
223 | 66.9M | if (nullptr != m_free_lists[num_alignments]) { |
224 | | // we've already got data in the pool's freelist, unlink one element and return the pointer |
225 | | // to the unlinked memory. Since FreeList is trivially destructible we can just treat it as |
226 | | // uninitialized memory. |
227 | 31.2M | ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode)); |
228 | 31.2M | auto* next{m_free_lists[num_alignments]->m_next}; |
229 | 31.2M | ASAN_POISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode)); |
230 | 31.2M | ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], bytes); |
231 | 31.2M | return std::exchange(m_free_lists[num_alignments], next); |
232 | 31.2M | } |
233 | | |
234 | | // freelist is empty: get one allocation from allocated chunk memory. |
235 | 35.7M | const std::ptrdiff_t round_bytes = static_cast<std::ptrdiff_t>(num_alignments * ELEM_ALIGN_BYTES); |
236 | 35.7M | if (round_bytes > m_available_memory_end - m_available_memory_it) { |
237 | | // slow path, only happens when a new chunk needs to be allocated |
238 | 11.9k | AllocateChunk(); |
239 | 11.9k | } |
240 | | |
241 | | // Make sure we use the right amount of bytes for that freelist (might be rounded up), |
242 | 35.7M | ASAN_UNPOISON_MEMORY_REGION(m_available_memory_it, round_bytes); |
243 | 35.7M | return std::exchange(m_available_memory_it, m_available_memory_it + round_bytes); |
244 | 66.9M | } |
245 | | |
246 | | // Can't use the pool => use operator new() |
247 | 131k | return ::operator new (bytes, std::align_val_t{alignment}); |
248 | 67.1M | } PoolResource<152ul, 8ul>::Allocate(unsigned long, unsigned long) Line | Count | Source | 220 | 67.1M | { | 221 | 67.1M | if (IsFreeListUsable(bytes, alignment)) { | 222 | 66.9M | const std::size_t num_alignments = NumElemAlignBytes(bytes); | 223 | 66.9M | if (nullptr != m_free_lists[num_alignments]) { | 224 | | // we've already got data in the pool's freelist, unlink one element and return the pointer | 225 | | // to the unlinked memory. Since FreeList is trivially destructible we can just treat it as | 226 | | // uninitialized memory. | 227 | 31.2M | ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode)); | 228 | 31.2M | auto* next{m_free_lists[num_alignments]->m_next}; | 229 | 31.2M | ASAN_POISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode)); | 230 | 31.2M | ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], bytes); | 231 | 31.2M | return std::exchange(m_free_lists[num_alignments], next); | 232 | 31.2M | } | 233 | | | 234 | | // freelist is empty: get one allocation from allocated chunk memory. | 235 | 35.7M | const std::ptrdiff_t round_bytes = static_cast<std::ptrdiff_t>(num_alignments * ELEM_ALIGN_BYTES); | 236 | 35.7M | if (round_bytes > m_available_memory_end - m_available_memory_it) { | 237 | | // slow path, only happens when a new chunk needs to be allocated | 238 | 11.6k | AllocateChunk(); | 239 | 11.6k | } | 240 | | | 241 | | // Make sure we use the right amount of bytes for that freelist (might be rounded up), | 242 | 35.7M | ASAN_UNPOISON_MEMORY_REGION(m_available_memory_it, round_bytes); | 243 | 35.7M | return std::exchange(m_available_memory_it, m_available_memory_it + round_bytes); | 244 | 66.9M | } | 245 | | | 246 | | // Can't use the pool => use operator new() | 247 | 130k | return ::operator new (bytes, std::align_val_t{alignment}); | 248 | 67.1M | } |
PoolResource<8ul, 8ul>::Allocate(unsigned long, unsigned long) Line | Count | Source | 220 | 5 | { | 221 | 5 | if (IsFreeListUsable(bytes, alignment)) { | 222 | 3 | const std::size_t num_alignments = NumElemAlignBytes(bytes); | 223 | 3 | if (nullptr != m_free_lists[num_alignments]) { | 224 | | // we've already got data in the pool's freelist, unlink one element and return the pointer | 225 | | // to the unlinked memory. Since FreeList is trivially destructible we can just treat it as | 226 | | // uninitialized memory. | 227 | 2 | ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode)); | 228 | 2 | auto* next{m_free_lists[num_alignments]->m_next}; | 229 | 2 | ASAN_POISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode)); | 230 | 2 | ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], bytes); | 231 | 2 | return std::exchange(m_free_lists[num_alignments], next); | 232 | 2 | } | 233 | | | 234 | | // freelist is empty: get one allocation from allocated chunk memory. | 235 | 1 | const std::ptrdiff_t round_bytes = static_cast<std::ptrdiff_t>(num_alignments * ELEM_ALIGN_BYTES); | 236 | 1 | if (round_bytes > m_available_memory_end - m_available_memory_it) { | 237 | | // slow path, only happens when a new chunk needs to be allocated | 238 | 0 | AllocateChunk(); | 239 | 0 | } | 240 | | | 241 | | // Make sure we use the right amount of bytes for that freelist (might be rounded up), | 242 | 1 | ASAN_UNPOISON_MEMORY_REGION(m_available_memory_it, round_bytes); | 243 | 1 | return std::exchange(m_available_memory_it, m_available_memory_it + round_bytes); | 244 | 3 | } | 245 | | | 246 | | // Can't use the pool => use operator new() | 247 | 2 | return ::operator new (bytes, std::align_val_t{alignment}); | 248 | 5 | } |
PoolResource<128ul, 8ul>::Allocate(unsigned long, unsigned long) Line | Count | Source | 220 | 941 | { | 221 | 941 | if (IsFreeListUsable(bytes, alignment)) { | 222 | 381 | const std::size_t num_alignments = NumElemAlignBytes(bytes); | 223 | 381 | if (nullptr != m_free_lists[num_alignments]) { | 224 | | // we've already got data in the pool's freelist, unlink one element and return the pointer | 225 | | // to the unlinked memory. Since FreeList is trivially destructible we can just treat it as | 226 | | // uninitialized memory. | 227 | 89 | ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode)); | 228 | 89 | auto* next{m_free_lists[num_alignments]->m_next}; | 229 | 89 | ASAN_POISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode)); | 230 | 89 | ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], bytes); | 231 | 89 | return std::exchange(m_free_lists[num_alignments], next); | 232 | 89 | } | 233 | | | 234 | | // freelist is empty: get one allocation from allocated chunk memory. | 235 | 292 | const std::ptrdiff_t round_bytes = static_cast<std::ptrdiff_t>(num_alignments * ELEM_ALIGN_BYTES); | 236 | 292 | if (round_bytes > m_available_memory_end - m_available_memory_it) { | 237 | | // slow path, only happens when a new chunk needs to be allocated | 238 | 8 | AllocateChunk(); | 239 | 8 | } | 240 | | | 241 | | // Make sure we use the right amount of bytes for that freelist (might be rounded up), | 242 | 292 | ASAN_UNPOISON_MEMORY_REGION(m_available_memory_it, round_bytes); | 243 | 292 | return std::exchange(m_available_memory_it, m_available_memory_it + round_bytes); | 244 | 381 | } | 245 | | | 246 | | // Can't use the pool => use operator new() | 247 | 560 | return ::operator new (bytes, std::align_val_t{alignment}); | 248 | 941 | } |
PoolResource<48ul, 8ul>::Allocate(unsigned long, unsigned long) Line | Count | Source | 220 | 10.0k | { | 221 | 10.0k | if (IsFreeListUsable(bytes, alignment)) { | 222 | 10.0k | const std::size_t num_alignments = NumElemAlignBytes(bytes); | 223 | 10.0k | if (nullptr != m_free_lists[num_alignments]) { | 224 | | // we've already got data in the pool's freelist, unlink one element and return the pointer | 225 | | // to the unlinked memory. Since FreeList is trivially destructible we can just treat it as | 226 | | // uninitialized memory. | 227 | 0 | ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode)); | 228 | 0 | auto* next{m_free_lists[num_alignments]->m_next}; | 229 | 0 | ASAN_POISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode)); | 230 | 0 | ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], bytes); | 231 | 0 | return std::exchange(m_free_lists[num_alignments], next); | 232 | 0 | } | 233 | | | 234 | | // freelist is empty: get one allocation from allocated chunk memory. | 235 | 10.0k | const std::ptrdiff_t round_bytes = static_cast<std::ptrdiff_t>(num_alignments * ELEM_ALIGN_BYTES); | 236 | 10.0k | if (round_bytes > m_available_memory_end - m_available_memory_it) { | 237 | | // slow path, only happens when a new chunk needs to be allocated | 238 | 238 | AllocateChunk(); | 239 | 238 | } | 240 | | | 241 | | // Make sure we use the right amount of bytes for that freelist (might be rounded up), | 242 | 10.0k | ASAN_UNPOISON_MEMORY_REGION(m_available_memory_it, round_bytes); | 243 | 10.0k | return std::exchange(m_available_memory_it, m_available_memory_it + round_bytes); | 244 | 10.0k | } | 245 | | | 246 | | // Can't use the pool => use operator new() | 247 | 10 | return ::operator new (bytes, std::align_val_t{alignment}); | 248 | 10.0k | } |
|
249 | | |
250 | | /** |
251 | | * Returns a block to the freelists, or deletes the block when it did not come from the chunks. |
252 | | */ |
253 | | void Deallocate(void* p, std::size_t bytes, std::size_t alignment) noexcept |
254 | 67.1M | { |
255 | 67.1M | if (IsFreeListUsable(bytes, alignment)) { |
256 | 66.9M | const std::size_t num_alignments = NumElemAlignBytes(bytes); |
257 | | // put the memory block into the linked list. We can placement construct the FreeList |
258 | | // into the memory since we can be sure the alignment is correct. |
259 | 66.9M | ASAN_UNPOISON_MEMORY_REGION(p, sizeof(ListNode)); |
260 | 66.9M | PlacementAddToList(p, m_free_lists[num_alignments]); |
261 | 66.9M | ASAN_POISON_MEMORY_REGION(p, std::max(bytes, sizeof(ListNode))); |
262 | 66.9M | } else { |
263 | | // Can't use the pool => forward deallocation to ::operator delete(). |
264 | 131k | ::operator delete (p, std::align_val_t{alignment}); |
265 | 131k | } |
266 | 67.1M | } PoolResource<152ul, 8ul>::Deallocate(void*, unsigned long, unsigned long) Line | Count | Source | 254 | 67.1M | { | 255 | 67.1M | if (IsFreeListUsable(bytes, alignment)) { | 256 | 66.9M | const std::size_t num_alignments = NumElemAlignBytes(bytes); | 257 | | // put the memory block into the linked list. We can placement construct the FreeList | 258 | | // into the memory since we can be sure the alignment is correct. | 259 | 66.9M | ASAN_UNPOISON_MEMORY_REGION(p, sizeof(ListNode)); | 260 | 66.9M | PlacementAddToList(p, m_free_lists[num_alignments]); | 261 | 66.9M | ASAN_POISON_MEMORY_REGION(p, std::max(bytes, sizeof(ListNode))); | 262 | 66.9M | } else { | 263 | | // Can't use the pool => forward deallocation to ::operator delete(). | 264 | 130k | ::operator delete (p, std::align_val_t{alignment}); | 265 | 130k | } | 266 | 67.1M | } |
PoolResource<48ul, 8ul>::Deallocate(void*, unsigned long, unsigned long) Line | Count | Source | 254 | 10.0k | { | 255 | 10.0k | if (IsFreeListUsable(bytes, alignment)) { | 256 | 10.0k | const std::size_t num_alignments = NumElemAlignBytes(bytes); | 257 | | // put the memory block into the linked list. We can placement construct the FreeList | 258 | | // into the memory since we can be sure the alignment is correct. | 259 | 10.0k | ASAN_UNPOISON_MEMORY_REGION(p, sizeof(ListNode)); | 260 | 10.0k | PlacementAddToList(p, m_free_lists[num_alignments]); | 261 | 10.0k | ASAN_POISON_MEMORY_REGION(p, std::max(bytes, sizeof(ListNode))); | 262 | 10.0k | } else { | 263 | | // Can't use the pool => forward deallocation to ::operator delete(). | 264 | 10 | ::operator delete (p, std::align_val_t{alignment}); | 265 | 10 | } | 266 | 10.0k | } |
PoolResource<8ul, 8ul>::Deallocate(void*, unsigned long, unsigned long) Line | Count | Source | 254 | 5 | { | 255 | 5 | if (IsFreeListUsable(bytes, alignment)) { | 256 | 3 | const std::size_t num_alignments = NumElemAlignBytes(bytes); | 257 | | // put the memory block into the linked list. We can placement construct the FreeList | 258 | | // into the memory since we can be sure the alignment is correct. | 259 | 3 | ASAN_UNPOISON_MEMORY_REGION(p, sizeof(ListNode)); | 260 | 3 | PlacementAddToList(p, m_free_lists[num_alignments]); | 261 | 3 | ASAN_POISON_MEMORY_REGION(p, std::max(bytes, sizeof(ListNode))); | 262 | 3 | } else { | 263 | | // Can't use the pool => forward deallocation to ::operator delete(). | 264 | 2 | ::operator delete (p, std::align_val_t{alignment}); | 265 | 2 | } | 266 | 5 | } |
PoolResource<128ul, 8ul>::Deallocate(void*, unsigned long, unsigned long) Line | Count | Source | 254 | 941 | { | 255 | 941 | if (IsFreeListUsable(bytes, alignment)) { | 256 | 381 | const std::size_t num_alignments = NumElemAlignBytes(bytes); | 257 | | // put the memory block into the linked list. We can placement construct the FreeList | 258 | | // into the memory since we can be sure the alignment is correct. | 259 | 381 | ASAN_UNPOISON_MEMORY_REGION(p, sizeof(ListNode)); | 260 | 381 | PlacementAddToList(p, m_free_lists[num_alignments]); | 261 | 381 | ASAN_POISON_MEMORY_REGION(p, std::max(bytes, sizeof(ListNode))); | 262 | 560 | } else { | 263 | | // Can't use the pool => forward deallocation to ::operator delete(). | 264 | 560 | ::operator delete (p, std::align_val_t{alignment}); | 265 | 560 | } | 266 | 941 | } |
|
267 | | |
268 | | /** |
269 | | * Number of allocated chunks |
270 | | */ |
271 | | [[nodiscard]] std::size_t NumAllocatedChunks() const |
272 | 1.95M | { |
273 | 1.95M | return m_allocated_chunks.size(); |
274 | 1.95M | } PoolResource<152ul, 8ul>::NumAllocatedChunks() const Line | Count | Source | 272 | 1.95M | { | 273 | 1.95M | return m_allocated_chunks.size(); | 274 | 1.95M | } |
PoolResource<48ul, 8ul>::NumAllocatedChunks() const Line | Count | Source | 272 | 5 | { | 273 | 5 | return m_allocated_chunks.size(); | 274 | 5 | } |
|
275 | | |
276 | | /** |
277 | | * Size in bytes to allocate per chunk, currently hardcoded to a fixed size. |
278 | | */ |
279 | | [[nodiscard]] size_t ChunkSizeBytes() const |
280 | 979k | { |
281 | 979k | return m_chunk_size_bytes; |
282 | 979k | } PoolResource<152ul, 8ul>::ChunkSizeBytes() const Line | Count | Source | 280 | 979k | { | 281 | 979k | return m_chunk_size_bytes; | 282 | 979k | } |
PoolResource<8ul, 8ul>::ChunkSizeBytes() const Line | Count | Source | 280 | 9 | { | 281 | 9 | return m_chunk_size_bytes; | 282 | 9 | } |
PoolResource<128ul, 8ul>::ChunkSizeBytes() const Line | Count | Source | 280 | 10 | { | 281 | 10 | return m_chunk_size_bytes; | 282 | 10 | } |
PoolResource<48ul, 8ul>::ChunkSizeBytes() const Line | Count | Source | 280 | 243 | { | 281 | 243 | return m_chunk_size_bytes; | 282 | 243 | } |
|
283 | | }; |
284 | | |
285 | | |
286 | | /** |
287 | | * Forwards all allocations/deallocations to the PoolResource. |
288 | | */ |
289 | | template <class T, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES = alignof(T)> |
290 | | class PoolAllocator |
291 | | { |
292 | | PoolResource<MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>* m_resource; |
293 | | |
294 | | template <typename U, std::size_t M, std::size_t A> |
295 | | friend class PoolAllocator; |
296 | | |
297 | | public: |
298 | | using value_type = T; |
299 | | using ResourceType = PoolResource<MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>; |
300 | | |
301 | | /** |
302 | | * Not explicit so we can easily construct it with the correct resource |
303 | | */ |
304 | | PoolAllocator(ResourceType* resource) noexcept |
305 | 376k | : m_resource(resource) |
306 | 376k | { |
307 | 376k | } PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 152ul, 8ul>::PoolAllocator(PoolResource<152ul, 8ul>*) Line | Count | Source | 305 | 376k | : m_resource(resource) | 306 | 376k | { | 307 | 376k | } |
PoolAllocator<std::pair<long const, long>, 48ul, 8ul>::PoolAllocator(PoolResource<48ul, 8ul>*) Line | Count | Source | 305 | 1 | : m_resource(resource) | 306 | 1 | { | 307 | 1 | } |
|
308 | | |
309 | | PoolAllocator(const PoolAllocator& other) noexcept = default; |
310 | | PoolAllocator& operator=(const PoolAllocator& other) noexcept = default; |
311 | | |
312 | | template <class U> |
313 | | PoolAllocator(const PoolAllocator<U, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& other) noexcept |
314 | 1.87M | : m_resource(other.resource()) |
315 | 1.87M | { |
316 | 1.87M | } PoolAllocator<std::__detail::_Hash_node_base*, 152ul, 8ul>::PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>>(PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>, 152ul, 8ul> const&) Line | Count | Source | 314 | 518k | : m_resource(other.resource()) | 315 | 518k | { | 316 | 518k | } |
PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 152ul, 8ul>::PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>>(PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>, 152ul, 8ul> const&) Line | Count | Source | 314 | 979k | : m_resource(other.resource()) | 315 | 979k | { | 316 | 979k | } |
PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>, 152ul, 8ul>::PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>>(PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 152ul, 8ul> const&) Line | Count | Source | 314 | 376k | : m_resource(other.resource()) | 315 | 376k | { | 316 | 376k | } |
PoolAllocator<std::__detail::_Hash_node_base*, 48ul, 8ul>::PoolAllocator<std::__detail::_Hash_node<std::pair<long const, long>, false>>(PoolAllocator<std::__detail::_Hash_node<std::pair<long const, long>, false>, 48ul, 8ul> const&) Line | Count | Source | 314 | 20 | : m_resource(other.resource()) | 315 | 20 | { | 316 | 20 | } |
PoolAllocator<std::__detail::_Hash_node<std::pair<long const, long>, false>, 48ul, 8ul>::PoolAllocator<std::pair<long const, long>>(PoolAllocator<std::pair<long const, long>, 48ul, 8ul> const&) Line | Count | Source | 314 | 1 | : m_resource(other.resource()) | 315 | 1 | { | 316 | 1 | } |
PoolAllocator<std::pair<long const, long>, 48ul, 8ul>::PoolAllocator<std::__detail::_Hash_node<std::pair<long const, long>, false>>(PoolAllocator<std::__detail::_Hash_node<std::pair<long const, long>, false>, 48ul, 8ul> const&) Line | Count | Source | 314 | 2 | : m_resource(other.resource()) | 315 | 2 | { | 316 | 2 | } |
|
317 | | |
318 | | /** |
319 | | * The rebind struct here is mandatory because we use non type template arguments for |
320 | | * PoolAllocator. See https://en.cppreference.com/w/cpp/named_req/Allocator#cite_note-2 |
321 | | */ |
322 | | template <typename U> |
323 | | struct rebind { |
324 | | using other = PoolAllocator<U, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>; |
325 | | }; |
326 | | |
327 | | /** |
328 | | * Forwards each call to the resource. |
329 | | */ |
330 | | T* allocate(size_t n) |
331 | 67.1M | { |
332 | 67.1M | return static_cast<T*>(m_resource->Allocate(n * sizeof(T), alignof(T))); |
333 | 67.1M | } PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>, 152ul, 8ul>::allocate(unsigned long) Line | Count | Source | 331 | 66.8M | { | 332 | 66.8M | return static_cast<T*>(m_resource->Allocate(n * sizeof(T), alignof(T))); | 333 | 66.8M | } |
PoolAllocator<std::__detail::_Hash_node_base*, 152ul, 8ul>::allocate(unsigned long) Line | Count | Source | 331 | 259k | { | 332 | 259k | return static_cast<T*>(m_resource->Allocate(n * sizeof(T), alignof(T))); | 333 | 259k | } |
PoolAllocator<std::__detail::_Hash_node_base*, 48ul, 8ul>::allocate(unsigned long) Line | Count | Source | 331 | 10 | { | 332 | 10 | return static_cast<T*>(m_resource->Allocate(n * sizeof(T), alignof(T))); | 333 | 10 | } |
PoolAllocator<std::__detail::_Hash_node<std::pair<long const, long>, false>, 48ul, 8ul>::allocate(unsigned long) Line | Count | Source | 331 | 10.0k | { | 332 | 10.0k | return static_cast<T*>(m_resource->Allocate(n * sizeof(T), alignof(T))); | 333 | 10.0k | } |
|
334 | | |
335 | | /** |
336 | | * Forwards each call to the resource. |
337 | | */ |
338 | | void deallocate(T* p, size_t n) noexcept |
339 | 67.1M | { |
340 | 67.1M | m_resource->Deallocate(p, n * sizeof(T), alignof(T)); |
341 | 67.1M | } PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>, 152ul, 8ul>::deallocate(std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>*, unsigned long) Line | Count | Source | 339 | 66.8M | { | 340 | 66.8M | m_resource->Deallocate(p, n * sizeof(T), alignof(T)); | 341 | 66.8M | } |
PoolAllocator<std::__detail::_Hash_node_base*, 152ul, 8ul>::deallocate(std::__detail::_Hash_node_base**, unsigned long) Line | Count | Source | 339 | 259k | { | 340 | 259k | m_resource->Deallocate(p, n * sizeof(T), alignof(T)); | 341 | 259k | } |
PoolAllocator<std::__detail::_Hash_node<std::pair<long const, long>, false>, 48ul, 8ul>::deallocate(std::__detail::_Hash_node<std::pair<long const, long>, false>*, unsigned long) Line | Count | Source | 339 | 10.0k | { | 340 | 10.0k | m_resource->Deallocate(p, n * sizeof(T), alignof(T)); | 341 | 10.0k | } |
PoolAllocator<std::__detail::_Hash_node_base*, 48ul, 8ul>::deallocate(std::__detail::_Hash_node_base**, unsigned long) Line | Count | Source | 339 | 10 | { | 340 | 10 | m_resource->Deallocate(p, n * sizeof(T), alignof(T)); | 341 | 10 | } |
|
342 | | |
343 | | ResourceType* resource() const noexcept |
344 | 2.85M | { |
345 | 2.85M | return m_resource; |
346 | 2.85M | } PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>, 152ul, 8ul>::resource() const Line | Count | Source | 344 | 1.49M | { | 345 | 1.49M | return m_resource; | 346 | 1.49M | } |
PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 152ul, 8ul>::resource() const Line | Count | Source | 344 | 1.35M | { | 345 | 1.35M | return m_resource; | 346 | 1.35M | } |
PoolAllocator<std::__detail::_Hash_node<std::pair<long const, long>, false>, 48ul, 8ul>::resource() const Line | Count | Source | 344 | 22 | { | 345 | 22 | return m_resource; | 346 | 22 | } |
PoolAllocator<std::pair<long const, long>, 48ul, 8ul>::resource() const Line | Count | Source | 344 | 3 | { | 345 | 3 | return m_resource; | 346 | 3 | } |
|
347 | | }; |
348 | | |
349 | | template <class T1, class T2, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES> |
350 | | bool operator==(const PoolAllocator<T1, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& a, |
351 | | const PoolAllocator<T2, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& b) noexcept |
352 | | { |
353 | | return a.resource() == b.resource(); |
354 | | } |
355 | | |
356 | | #endif // BITCOIN_SUPPORT_ALLOCATORS_POOL_H |