Coverage Report

Created: 2026-04-29 19:21

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/tmp/bitcoin/src/node/txorphanage.cpp
Line
Count
Source
1
// Copyright (c) 2021-present The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <node/txorphanage.h>
6
7
#include <consensus/validation.h>
8
#include <logging.h>
9
#include <policy/policy.h>
10
#include <primitives/transaction.h>
11
#include <util/feefrac.h>
12
#include <util/time.h>
13
#include <util/hasher.h>
14
15
#include <boost/multi_index/indexed_by.hpp>
16
#include <boost/multi_index/ordered_index.hpp>
17
#include <boost/multi_index/tag.hpp>
18
#include <boost/multi_index_container.hpp>
19
20
#include <cassert>
21
#include <cmath>
22
#include <unordered_map>
23
24
namespace node {
25
/** Minimum NodeId for lower_bound lookups (in practice, NodeIds start at 0). */
26
static constexpr NodeId MIN_PEER{std::numeric_limits<NodeId>::min()};
27
/** Maximum NodeId for upper_bound lookups. */
28
static constexpr NodeId MAX_PEER{std::numeric_limits<NodeId>::max()};
29
class TxOrphanageImpl final : public TxOrphanage {
30
    // Type alias for sequence numbers
31
    using SequenceNumber = uint64_t;
32
    /** Global sequence number, increment each time an announcement is added. */
33
    SequenceNumber m_current_sequence{0};
34
35
    /** One orphan announcement. Each announcement (i.e. combination of wtxid, nodeid) is unique. There may be multiple
36
     * announcements for the same tx, and multiple transactions with the same txid but different wtxid are possible. */
37
    struct Announcement
38
    {
39
        const CTransactionRef m_tx;
40
        /** Which peer announced this tx */
41
        const NodeId m_announcer;
42
        /** What order this transaction entered the orphanage. */
43
        const SequenceNumber m_entry_sequence;
44
        /** Whether this tx should be reconsidered. Always starts out false. A peer's workset is the collection of all
45
         * announcements with m_reconsider=true. */
46
        bool m_reconsider{false};
47
48
        Announcement(const CTransactionRef& tx, NodeId peer, SequenceNumber seq) :
49
968
            m_tx{tx}, m_announcer{peer}, m_entry_sequence{seq}
50
968
        { }
51
52
        /** Get an approximation for "memory usage". The total memory is a function of the memory used to store the
53
         * transaction itself, each entry in m_orphans, and each entry in m_outpoint_to_orphan_wtxids. We use weight because
54
         * it is often higher than the actual memory usage of the transaction. This metric conveniently encompasses
55
         * m_outpoint_to_orphan_wtxids usage since input data does not get the witness discount, and makes it easier to
56
         * reason about each peer's limits using well-understood transaction attributes. */
57
4.29k
        TxOrphanage::Usage GetMemUsage()  const {
58
4.29k
            return GetTransactionWeight(*m_tx);
59
4.29k
        }
60
61
        /** Get an approximation of how much this transaction contributes to latency in EraseForBlock and EraseForPeer.
62
         * The computation time is a function of the number of entries in m_orphans (thus 1 per announcement) and the
63
         * number of entries in m_outpoint_to_orphan_wtxids (thus an additional 1 for every 10 inputs). Transactions with a
64
         * small number of inputs (9 or fewer) are counted as 1 to make it easier to reason about each peer's limits in
65
         * terms of "normal" transactions. */
66
4.29k
        TxOrphanage::Count GetLatencyScore() const {
67
4.29k
            return 1 + (m_tx->vin.size() / 10);
68
4.29k
        }
69
    };
70
71
    // Index by wtxid, then peer
72
    struct ByWtxid {};
73
    using ByWtxidView = std::tuple<Wtxid, NodeId>;
74
    struct WtxidExtractor
75
    {
76
        using result_type = ByWtxidView;
77
        result_type operator()(const Announcement& ann) const
78
42.7k
        {
79
42.7k
            return ByWtxidView{ann.m_tx->GetWitnessHash(), ann.m_announcer};
80
42.7k
        }
81
    };
82
83
    // Sort by peer, then by whether it is ready to reconsider, then by recency.
84
    struct ByPeer {};
85
    using ByPeerView = std::tuple<NodeId, bool, SequenceNumber>;
86
    struct ByPeerViewExtractor {
87
        using result_type = ByPeerView;
88
        result_type operator()(const Announcement& ann) const
89
192k
        {
90
192k
            return ByPeerView{ann.m_announcer, ann.m_reconsider, ann.m_entry_sequence};
91
192k
        }
92
    };
93
94
    using AnnouncementMap = boost::multi_index::multi_index_container<
95
        Announcement,
96
        boost::multi_index::indexed_by<
97
            boost::multi_index::ordered_unique<boost::multi_index::tag<ByWtxid>, WtxidExtractor>,
98
            boost::multi_index::ordered_unique<boost::multi_index::tag<ByPeer>, ByPeerViewExtractor>
99
        >
100
    >;
101
    template<typename Tag>
102
    using Iter = typename AnnouncementMap::index<Tag>::type::iterator;
103
    AnnouncementMap m_orphans;
104
105
    const TxOrphanage::Count m_max_global_latency_score{DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE};
106
    const TxOrphanage::Usage m_reserved_usage_per_peer{DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER};
107
108
    /** Number of unique orphans by wtxid. Less than or equal to the number of entries in m_orphans. */
109
    TxOrphanage::Count m_unique_orphans{0};
110
111
    /** Memory used by orphans (see Announcement::GetMemUsage()), deduplicated by wtxid. */
112
    TxOrphanage::Usage m_unique_orphan_usage{0};
113
114
    /** The sum of each unique transaction's latency scores including the inputs only (see Announcement::GetLatencyScore
115
     * but subtract 1 for the announcements themselves). The total orphanage's latency score is given by this value +
116
     * the number of entries in m_orphans. */
117
    TxOrphanage::Count m_unique_rounded_input_scores{0};
118
119
    /** Index from the parents' outputs to wtxids that exist in m_orphans. Used to find children of
120
     * a transaction that can be reconsidered and to remove entries that conflict with a block.*/
121
    std::unordered_map<COutPoint, std::set<Wtxid>, SaltedOutpointHasher> m_outpoint_to_orphan_wtxids;
122
123
    /** Set of Wtxids for which (exactly) one announcement with m_reconsider=true exists. */
124
    std::set<Wtxid> m_reconsiderable_wtxids;
125
126
    struct PeerDoSInfo {
127
        TxOrphanage::Usage m_total_usage{0};
128
        TxOrphanage::Count m_count_announcements{0};
129
        TxOrphanage::Count m_total_latency_score{0};
130
        bool operator==(const PeerDoSInfo& other) const
131
17
        {
132
17
            return m_total_usage == other.m_total_usage &&
133
17
                   m_count_announcements == other.m_count_announcements &&
134
17
                   m_total_latency_score == other.m_total_latency_score;
135
17
        }
136
        void Add(const Announcement& ann)
137
965
        {
138
965
            m_total_usage += ann.GetMemUsage();
139
965
            m_total_latency_score += ann.GetLatencyScore();
140
965
            m_count_announcements += 1;
141
965
        }
142
        bool Subtract(const Announcement& ann)
143
712
        {
144
712
            Assume(m_total_usage >= ann.GetMemUsage());
145
712
            Assume(m_total_latency_score >= ann.GetLatencyScore());
146
712
            Assume(m_count_announcements >= 1);
147
148
712
            m_total_usage -= ann.GetMemUsage();
149
712
            m_total_latency_score -= ann.GetLatencyScore();
150
712
            m_count_announcements -= 1;
151
712
            return m_count_announcements == 0;
152
712
        }
153
        /** There are 2 DoS scores:
154
        * - Latency score (ratio of total latency score / max allowed latency score)
155
        * - Memory score (ratio of total memory usage / max allowed memory usage).
156
        *
157
        * If the peer is using more than the allowed for either resource, its DoS score is > 1.
158
        * A peer having a DoS score > 1 does not necessarily mean that something is wrong, since we
159
        * do not trim unless the orphanage exceeds global limits, but it means that this peer will
160
        * be selected for trimming sooner. If the global latency score or global memory usage
161
        * limits are exceeded, it must be that there is a peer whose DoS score > 1. */
162
        FeeFrac GetDosScore(TxOrphanage::Count max_peer_latency_score, TxOrphanage::Usage max_peer_memory) const
163
1.68k
        {
164
1.68k
            assert(max_peer_latency_score > 0);
165
1.68k
            assert(max_peer_memory > 0);
166
1.68k
            const FeeFrac latency_score(m_total_latency_score, max_peer_latency_score);
167
1.68k
            const FeeFrac mem_score(m_total_usage, max_peer_memory);
168
1.68k
            return std::max<FeeFrac>(latency_score, mem_score);
169
1.68k
        }
170
    };
171
    /** Store per-peer statistics. Used to determine each peer's DoS score. The size of this map is used to determine the
172
     * number of peers and thus global {latency score, memory} limits. */
173
    std::unordered_map<NodeId, PeerDoSInfo> m_peer_orphanage_info;
174
175
    /** Erase from m_orphans and update m_peer_orphanage_info. */
176
    template<typename Tag>
177
    void Erase(Iter<Tag> it);
178
179
    /** Erase by wtxid. */
180
    bool EraseTxInternal(const Wtxid& wtxid);
181
182
    /** Check if there is exactly one announcement with the same wtxid as it. */
183
    bool IsUnique(Iter<ByWtxid> it) const;
184
185
    /** Check if the orphanage needs trimming. */
186
    bool NeedsTrim() const;
187
188
    /** Limit the orphanage to MaxGlobalLatencyScore and MaxGlobalUsage. */
189
    void LimitOrphans();
190
191
public:
192
1.24k
    TxOrphanageImpl() = default;
193
    TxOrphanageImpl(Count max_global_latency_score, Usage reserved_peer_usage) :
194
6
        m_max_global_latency_score{max_global_latency_score},
195
6
        m_reserved_usage_per_peer{reserved_peer_usage}
196
6
    {}
197
1.24k
    ~TxOrphanageImpl() noexcept override = default;
198
199
    TxOrphanage::Count CountAnnouncements() const override;
200
    TxOrphanage::Count CountUniqueOrphans() const override;
201
    TxOrphanage::Count AnnouncementsFromPeer(NodeId peer) const override;
202
    TxOrphanage::Count LatencyScoreFromPeer(NodeId peer) const override;
203
    TxOrphanage::Usage UsageByPeer(NodeId peer) const override;
204
205
    TxOrphanage::Count MaxGlobalLatencyScore() const override;
206
    TxOrphanage::Count TotalLatencyScore() const override;
207
    TxOrphanage::Usage ReservedPeerUsage() const override;
208
209
    /** Maximum allowed (deduplicated) latency score for all transactions (see Announcement::GetLatencyScore()). Dynamic
210
     * based on number of peers. Each peer has an equal amount, but the global maximum latency score stays constant. The
211
     * number of peers times MaxPeerLatencyScore() (rounded) adds up to MaxGlobalLatencyScore().  As long as every peer's
212
     * m_total_latency_score / MaxPeerLatencyScore() < 1, MaxGlobalLatencyScore() is not exceeded. */
213
    TxOrphanage::Count MaxPeerLatencyScore() const override;
214
215
    /** Maximum allowed (deduplicated) memory usage for all transactions (see Announcement::GetMemUsage()). Dynamic based
216
     * on number of peers. More peers means more allowed memory usage. The number of peers times ReservedPeerUsage()
217
     * adds up to MaxGlobalUsage(). As long as every peer's m_total_usage / ReservedPeerUsage() < 1, MaxGlobalUsage() is
218
     * not exceeded. */
219
    TxOrphanage::Usage MaxGlobalUsage() const override;
220
221
    bool AddTx(const CTransactionRef& tx, NodeId peer) override;
222
    bool AddAnnouncer(const Wtxid& wtxid, NodeId peer) override;
223
    CTransactionRef GetTx(const Wtxid& wtxid) const override;
224
    bool HaveTx(const Wtxid& wtxid) const override;
225
    bool HaveTxFromPeer(const Wtxid& wtxid, NodeId peer) const override;
226
    CTransactionRef GetTxToReconsider(NodeId peer) override;
227
    bool EraseTx(const Wtxid& wtxid) override;
228
    void EraseForPeer(NodeId peer) override;
229
    void EraseForBlock(const CBlock& block) override;
230
    std::vector<std::pair<Wtxid, NodeId>> AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng) override;
231
    bool HaveTxToReconsider(NodeId peer) override;
232
    std::vector<CTransactionRef> GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId nodeid) const override;
233
    std::vector<OrphanInfo> GetOrphanTransactions() const override;
234
    TxOrphanage::Usage TotalOrphanUsage() const override;
235
    void SanityCheck() const override;
236
};
237
238
template<typename Tag>
239
void TxOrphanageImpl::Erase(Iter<Tag> it)
240
712
{
241
    // Update m_peer_orphanage_info and clean up entries if they point to an empty struct.
242
    // This means peers that are not storing any orphans do not have an entry in
243
    // m_peer_orphanage_info (they can be added back later if they announce another orphan) and
244
    // ensures disconnected peers are not tracked forever.
245
712
    auto peer_it = m_peer_orphanage_info.find(it->m_announcer);
246
712
    Assume(peer_it != m_peer_orphanage_info.end());
247
712
    if (peer_it->second.Subtract(*it)) m_peer_orphanage_info.erase(peer_it);
248
249
712
    if (IsUnique(m_orphans.project<ByWtxid>(it))) {
250
671
        m_unique_orphans -= 1;
251
671
        m_unique_rounded_input_scores -= it->GetLatencyScore() - 1;
252
671
        m_unique_orphan_usage -= it->GetMemUsage();
253
254
        // Remove references in m_outpoint_to_orphan_wtxids
255
671
        const auto& wtxid{it->m_tx->GetWitnessHash()};
256
697
        for (const auto& input : it->m_tx->vin) {
257
697
            auto it_prev = m_outpoint_to_orphan_wtxids.find(input.prevout);
258
697
            if (it_prev != m_outpoint_to_orphan_wtxids.end()) {
259
697
                it_prev->second.erase(wtxid);
260
                // Clean up keys if they point to an empty set.
261
697
                if (it_prev->second.empty()) {
262
690
                    m_outpoint_to_orphan_wtxids.erase(it_prev);
263
690
                }
264
697
            }
265
697
        }
266
671
    }
267
268
    // If this was the (unique) reconsiderable announcement for its wtxid, then the wtxid won't
269
    // have any reconsiderable announcements left after erasing.
270
712
    if (it->m_reconsider) m_reconsiderable_wtxids.erase(it->m_tx->GetWitnessHash());
271
272
712
    m_orphans.get<Tag>().erase(it);
273
712
}
void node::TxOrphanageImpl::Erase<node::TxOrphanageImpl::ByWtxid>(boost::multi_index::multi_index_container<node::TxOrphanageImpl::Announcement, boost::multi_index::indexed_by<boost::multi_index::ordered_unique<boost::multi_index::tag<node::TxOrphanageImpl::ByWtxid, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na>, node::TxOrphanageImpl::WtxidExtractor, mpl_::na>, boost::multi_index::ordered_unique<boost::multi_index::tag<node::TxOrphanageImpl::ByPeer, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na>, node::TxOrphanageImpl::ByPeerViewExtractor, mpl_::na>, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na>, std::allocator<node::TxOrphanageImpl::Announcement>>::index<node::TxOrphanageImpl::ByWtxid>::type::iterator)
Line
Count
Source
240
145
{
241
    // Update m_peer_orphanage_info and clean up entries if they point to an empty struct.
242
    // This means peers that are not storing any orphans do not have an entry in
243
    // m_peer_orphanage_info (they can be added back later if they announce another orphan) and
244
    // ensures disconnected peers are not tracked forever.
245
145
    auto peer_it = m_peer_orphanage_info.find(it->m_announcer);
246
145
    Assume(peer_it != m_peer_orphanage_info.end());
247
145
    if (peer_it->second.Subtract(*it)) m_peer_orphanage_info.erase(peer_it);
248
249
145
    if (IsUnique(m_orphans.project<ByWtxid>(it))) {
250
143
        m_unique_orphans -= 1;
251
143
        m_unique_rounded_input_scores -= it->GetLatencyScore() - 1;
252
143
        m_unique_orphan_usage -= it->GetMemUsage();
253
254
        // Remove references in m_outpoint_to_orphan_wtxids
255
143
        const auto& wtxid{it->m_tx->GetWitnessHash()};
256
159
        for (const auto& input : it->m_tx->vin) {
257
159
            auto it_prev = m_outpoint_to_orphan_wtxids.find(input.prevout);
258
159
            if (it_prev != m_outpoint_to_orphan_wtxids.end()) {
259
159
                it_prev->second.erase(wtxid);
260
                // Clean up keys if they point to an empty set.
261
159
                if (it_prev->second.empty()) {
262
155
                    m_outpoint_to_orphan_wtxids.erase(it_prev);
263
155
                }
264
159
            }
265
159
        }
266
143
    }
267
268
    // If this was the (unique) reconsiderable announcement for its wtxid, then the wtxid won't
269
    // have any reconsiderable announcements left after erasing.
270
145
    if (it->m_reconsider) m_reconsiderable_wtxids.erase(it->m_tx->GetWitnessHash());
271
272
145
    m_orphans.get<Tag>().erase(it);
273
145
}
void node::TxOrphanageImpl::Erase<node::TxOrphanageImpl::ByPeer>(boost::multi_index::multi_index_container<node::TxOrphanageImpl::Announcement, boost::multi_index::indexed_by<boost::multi_index::ordered_unique<boost::multi_index::tag<node::TxOrphanageImpl::ByWtxid, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na>, node::TxOrphanageImpl::WtxidExtractor, mpl_::na>, boost::multi_index::ordered_unique<boost::multi_index::tag<node::TxOrphanageImpl::ByPeer, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na>, node::TxOrphanageImpl::ByPeerViewExtractor, mpl_::na>, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na>, std::allocator<node::TxOrphanageImpl::Announcement>>::index<node::TxOrphanageImpl::ByPeer>::type::iterator)
Line
Count
Source
240
567
{
241
    // Update m_peer_orphanage_info and clean up entries if they point to an empty struct.
242
    // This means peers that are not storing any orphans do not have an entry in
243
    // m_peer_orphanage_info (they can be added back later if they announce another orphan) and
244
    // ensures disconnected peers are not tracked forever.
245
567
    auto peer_it = m_peer_orphanage_info.find(it->m_announcer);
246
567
    Assume(peer_it != m_peer_orphanage_info.end());
247
567
    if (peer_it->second.Subtract(*it)) m_peer_orphanage_info.erase(peer_it);
248
249
567
    if (IsUnique(m_orphans.project<ByWtxid>(it))) {
250
528
        m_unique_orphans -= 1;
251
528
        m_unique_rounded_input_scores -= it->GetLatencyScore() - 1;
252
528
        m_unique_orphan_usage -= it->GetMemUsage();
253
254
        // Remove references in m_outpoint_to_orphan_wtxids
255
528
        const auto& wtxid{it->m_tx->GetWitnessHash()};
256
538
        for (const auto& input : it->m_tx->vin) {
257
538
            auto it_prev = m_outpoint_to_orphan_wtxids.find(input.prevout);
258
538
            if (it_prev != m_outpoint_to_orphan_wtxids.end()) {
259
538
                it_prev->second.erase(wtxid);
260
                // Clean up keys if they point to an empty set.
261
538
                if (it_prev->second.empty()) {
262
535
                    m_outpoint_to_orphan_wtxids.erase(it_prev);
263
535
                }
264
538
            }
265
538
        }
266
528
    }
267
268
    // If this was the (unique) reconsiderable announcement for its wtxid, then the wtxid won't
269
    // have any reconsiderable announcements left after erasing.
270
567
    if (it->m_reconsider) m_reconsiderable_wtxids.erase(it->m_tx->GetWitnessHash());
271
272
567
    m_orphans.get<Tag>().erase(it);
273
567
}
274
275
bool TxOrphanageImpl::IsUnique(Iter<ByWtxid> it) const
276
1.67k
{
277
    // Iterators ByWtxid are sorted by wtxid, so check if neighboring elements have the same wtxid.
278
1.67k
    auto& index = m_orphans.get<ByWtxid>();
279
1.67k
    if (it == index.end()) return false;
280
1.67k
    if (std::next(it) != index.end() && std::next(it)->m_tx->GetWitnessHash() == it->m_tx->GetWitnessHash()) return false;
281
1.63k
    if (it != index.begin() && std::prev(it)->m_tx->GetWitnessHash() == it->m_tx->GetWitnessHash()) return false;
282
1.57k
    return true;
283
1.63k
}
284
285
TxOrphanage::Usage TxOrphanageImpl::UsageByPeer(NodeId peer) const
286
1.62k
{
287
1.62k
    auto it = m_peer_orphanage_info.find(peer);
288
1.62k
    return it == m_peer_orphanage_info.end() ? 0 : it->second.m_total_usage;
289
1.62k
}
290
291
34
TxOrphanage::Count TxOrphanageImpl::CountAnnouncements() const { return m_orphans.size(); }
292
293
14.7k
TxOrphanage::Usage TxOrphanageImpl::TotalOrphanUsage() const { return m_unique_orphan_usage; }
294
295
1.12k
TxOrphanage::Count TxOrphanageImpl::CountUniqueOrphans() const { return m_unique_orphans; }
296
297
73
TxOrphanage::Count TxOrphanageImpl::AnnouncementsFromPeer(NodeId peer) const {
298
73
    auto it = m_peer_orphanage_info.find(peer);
299
73
    return it == m_peer_orphanage_info.end() ? 0 : it->second.m_count_announcements;
300
73
}
301
302
2
TxOrphanage::Count TxOrphanageImpl::LatencyScoreFromPeer(NodeId peer) const {
303
2
    auto it = m_peer_orphanage_info.find(peer);
304
2
    return it == m_peer_orphanage_info.end() ? 0 : it->second.m_total_latency_score;
305
2
}
306
307
bool TxOrphanageImpl::AddTx(const CTransactionRef& tx, NodeId peer)
308
973
{
309
973
    const auto& wtxid{tx->GetWitnessHash()};
310
973
    const auto& txid{tx->GetHash()};
311
312
    // Ignore transactions above max standard size to avoid a send-big-orphans memory exhaustion attack.
313
973
    TxOrphanage::Usage sz = GetTransactionWeight(*tx);
314
973
    if (sz > MAX_STANDARD_TX_WEIGHT) {
315
11
        LogDebug(BCLog::TXPACKAGES, "ignoring large orphan tx (size: %u, txid: %s, wtxid: %s)\n", sz, txid.ToString(), wtxid.ToString());
316
11
        return false;
317
11
    }
318
319
    // We will return false if the tx already exists under a different peer.
320
962
    const bool brand_new{!HaveTx(wtxid)};
321
322
962
    auto [iter, inserted] = m_orphans.get<ByWtxid>().emplace(tx, peer, m_current_sequence);
323
    // If the announcement (same wtxid, same peer) already exists, emplacement fails. Return false.
324
962
    if (!inserted) return false;
325
326
960
    ++m_current_sequence;
327
960
    auto& peer_info = m_peer_orphanage_info.try_emplace(peer).first->second;
328
960
    peer_info.Add(*iter);
329
330
    // Add links in m_outpoint_to_orphan_wtxids
331
960
    if (brand_new) {
332
1.59k
        for (const auto& input : tx->vin) {
333
1.59k
            auto& wtxids_for_prevout = m_outpoint_to_orphan_wtxids.try_emplace(input.prevout).first->second;
334
1.59k
            wtxids_for_prevout.emplace(wtxid);
335
1.59k
        }
336
337
906
        m_unique_orphans += 1;
338
906
        m_unique_orphan_usage += iter->GetMemUsage();
339
906
        m_unique_rounded_input_scores += iter->GetLatencyScore() - 1;
340
341
906
        LogDebug(BCLog::TXPACKAGES, "stored orphan tx %s (wtxid=%s), weight: %u (mapsz %u outsz %u)\n",
342
906
                    txid.ToString(), wtxid.ToString(), sz, m_orphans.size(), m_outpoint_to_orphan_wtxids.size());
343
906
        Assume(IsUnique(iter));
344
906
    } else {
345
54
        LogDebug(BCLog::TXPACKAGES, "added peer=%d as announcer of orphan tx %s (wtxid=%s)\n",
346
54
                    peer, txid.ToString(), wtxid.ToString());
347
54
        Assume(!IsUnique(iter));
348
54
    }
349
350
    // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789)
351
960
    LimitOrphans();
352
960
    return brand_new;
353
962
}
354
355
bool TxOrphanageImpl::AddAnnouncer(const Wtxid& wtxid, NodeId peer)
356
6
{
357
6
    auto& index_by_wtxid = m_orphans.get<ByWtxid>();
358
6
    auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER});
359
360
    // Do nothing if this transaction isn't already present. We can't create an entry if we don't
361
    // have the tx data.
362
6
    if (it == index_by_wtxid.end()) return false;
363
6
    if (it->m_tx->GetWitnessHash() != wtxid) return false;
364
365
    // Add another announcement, copying the CTransactionRef from one that already exists.
366
6
    const auto& ptx = it->m_tx;
367
6
    auto [iter, inserted] = index_by_wtxid.emplace(ptx, peer, m_current_sequence);
368
    // If the announcement (same wtxid, same peer) already exists, emplacement fails. Return false.
369
6
    if (!inserted) return false;
370
371
5
    ++m_current_sequence;
372
5
    auto& peer_info = m_peer_orphanage_info.try_emplace(peer).first->second;
373
5
    peer_info.Add(*iter);
374
375
5
    const auto& txid = ptx->GetHash();
376
5
    LogDebug(BCLog::TXPACKAGES, "added peer=%d as announcer of orphan tx %s (wtxid=%s)\n",
377
5
                peer, txid.ToString(), wtxid.ToString());
378
379
5
    Assume(!IsUnique(iter));
380
381
    // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789)
382
5
    LimitOrphans();
383
5
    return true;
384
6
}
385
386
bool TxOrphanageImpl::EraseTxInternal(const Wtxid& wtxid)
387
12.7k
{
388
12.7k
    auto& index_by_wtxid = m_orphans.get<ByWtxid>();
389
390
12.7k
    auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER});
391
12.7k
    if (it == index_by_wtxid.end() || it->m_tx->GetWitnessHash() != wtxid) return false;
392
393
143
    auto it_end = index_by_wtxid.upper_bound(ByWtxidView{wtxid, MAX_PEER});
394
143
    unsigned int num_ann{0};
395
143
    const auto txid = it->m_tx->GetHash();
396
288
    while (it != it_end) {
397
145
        Assume(it->m_tx->GetWitnessHash() == wtxid);
398
145
        Erase<ByWtxid>(it++);
399
145
        num_ann += 1;
400
145
    }
401
143
    LogDebug(BCLog::TXPACKAGES, "removed orphan tx %s (wtxid=%s) (%u announcements)\n", txid.ToString(), wtxid.ToString(), num_ann);
402
403
143
    return true;
404
12.7k
}
405
406
bool TxOrphanageImpl::EraseTx(const Wtxid& wtxid)
407
12.6k
{
408
12.6k
    const auto ret = EraseTxInternal(wtxid);
409
410
    // Deletions can cause the orphanage's MaxGlobalUsage to decrease, so we may need to trim here.
411
12.6k
    LimitOrphans();
412
413
12.6k
    return ret;
414
12.6k
}
415
416
/** Erase all entries by this peer. */
417
void TxOrphanageImpl::EraseForPeer(NodeId peer)
418
1.62k
{
419
1.62k
    auto& index_by_peer = m_orphans.get<ByPeer>();
420
1.62k
    auto it = index_by_peer.lower_bound(ByPeerView{peer, false, 0});
421
1.62k
    if (it == index_by_peer.end() || it->m_announcer != peer) return;
422
423
85
    unsigned int num_ann{0};
424
507
    while (it != index_by_peer.end() && it->m_announcer == peer) {
425
        // Delete item, cleaning up m_outpoint_to_orphan_wtxids iff this entry is unique by wtxid.
426
422
        Erase<ByPeer>(it++);
427
422
        num_ann += 1;
428
422
    }
429
85
    Assume(!m_peer_orphanage_info.contains(peer));
430
431
85
    if (num_ann > 0) LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) from peer=%d\n", num_ann, peer);
432
433
    // Deletions can cause the orphanage's MaxGlobalUsage to decrease, so we may need to trim here.
434
85
    LimitOrphans();
435
85
}
436
437
/** If the data structure needs trimming, evicts announcements by selecting the DoSiest peer and evicting its oldest
438
 * announcement (sorting non-reconsiderable orphans first, to give reconsiderable orphans a greater chance of being
439
 * processed). Does nothing if no global limits are exceeded.  This eviction strategy effectively "reserves" an
440
 * amount of announcements and space for each peer. The reserved amount is protected from eviction even if there
441
 * are peers spamming the orphanage.
442
 */
443
void TxOrphanageImpl::LimitOrphans()
444
13.7k
{
445
13.7k
    if (!NeedsTrim()) return;
446
447
136
    const auto original_unique_txns{CountUniqueOrphans()};
448
449
    // Even though it's possible for MaxPeerLatencyScore to increase within this call to LimitOrphans
450
    // (e.g. if a peer's orphans are removed entirely, changing the number of peers), use consistent limits throughout.
451
136
    const auto max_lat{MaxPeerLatencyScore()};
452
136
    const auto max_mem{ReservedPeerUsage()};
453
454
    // We have exceeded the global limit(s). Now, identify who is using too much and evict their orphans.
455
    // Create a heap of pairs (NodeId, DoS score), sorted by descending DoS score.
456
136
    std::vector<std::pair<NodeId, FeeFrac>> heap_peer_dos;
457
136
    heap_peer_dos.reserve(m_peer_orphanage_info.size());
458
1.53k
    for (const auto& [nodeid, entry] : m_peer_orphanage_info) {
459
        // Performance optimization: only consider peers with a DoS score > 1.
460
1.53k
        const auto dos_score = entry.GetDosScore(max_lat, max_mem);
461
1.53k
        if (dos_score >> FeeFrac{1, 1}) {
462
136
            heap_peer_dos.emplace_back(nodeid, dos_score);
463
136
        }
464
1.53k
    }
465
136
    static constexpr auto compare_score = [](const auto& left, const auto& right) {
466
0
        if (left.second != right.second) return left.second < right.second;
467
        // Tiebreak by considering the more recent peer (higher NodeId) to be worse.
468
0
        return left.first < right.first;
469
0
    };
470
136
    std::make_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score);
471
472
136
    unsigned int num_erased{0};
473
    // This outer loop finds the peer with the highest DoS score, which is a fraction of memory and latency scores
474
    // over the respective allowances. We continue until the orphanage is within global limits. That means some peers
475
    // might still have a DoS score > 1 at the end.
476
    // Note: if ratios are the same, FeeFrac tiebreaks by denominator. In practice, since the latency denominator (number of
477
    // announcements and inputs) is always lower, this means that a peer with only high latency scores will be targeted
478
    // before a peer using a lot of memory, even if they have the same ratios.
479
136
    do {
480
136
        Assume(!heap_peer_dos.empty());
481
        // This is a max-heap, so the worst peer is at the front. pop_heap()
482
        // moves it to the back, and the next worst peer is moved to the front.
483
136
        std::pop_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score);
484
136
        const auto [worst_peer, dos_score] = std::move(heap_peer_dos.back());
485
136
        heap_peer_dos.pop_back();
486
487
        // If needs trim, then at least one peer has a DoS score higher than 1.
488
136
        Assume(dos_score >> (FeeFrac{1, 1}));
489
490
136
        auto it_worst_peer = m_peer_orphanage_info.find(worst_peer);
491
492
        // This inner loop trims until this peer is no longer the DoSiest one or has a score within 1. The score 1 is
493
        // just a conservative fallback: once the last peer goes below ratio 1, NeedsTrim() will return false anyway.
494
        // We evict the oldest announcement(s) from this peer, sorting non-reconsiderable before reconsiderable.
495
        // The number of inner loop iterations is bounded by the total number of announcements.
496
136
        const auto& dos_threshold = heap_peer_dos.empty() ? FeeFrac{1, 1} : heap_peer_dos.front().second;
497
136
        auto it_ann = m_orphans.get<ByPeer>().lower_bound(ByPeerView{worst_peer, false, 0});
498
136
        unsigned int num_erased_this_round{0};
499
136
        unsigned int starting_num_ann{it_worst_peer->second.m_count_announcements};
500
194
        while (NeedsTrim()) {
501
145
            if (!Assume(it_ann != m_orphans.get<ByPeer>().end())) break;
502
145
            if (!Assume(it_ann->m_announcer == worst_peer)) break;
503
504
145
            Erase<ByPeer>(it_ann++);
505
145
            num_erased += 1;
506
145
            num_erased_this_round += 1;
507
508
            // If we erased the last orphan from this peer, it_worst_peer will be invalidated.
509
145
            it_worst_peer = m_peer_orphanage_info.find(worst_peer);
510
145
            if (it_worst_peer == m_peer_orphanage_info.end() || it_worst_peer->second.GetDosScore(max_lat, max_mem) <= dos_threshold) break;
511
145
        }
512
136
        LogDebug(BCLog::TXPACKAGES, "peer=%d orphanage overflow, removed %u of %u announcements\n", worst_peer, num_erased_this_round, starting_num_ann);
513
514
136
        if (!NeedsTrim()) break;
515
516
        // Unless this peer is empty, put it back in the heap so we continue to consider evicting its orphans.
517
        // We may select this peer for evictions again if there are multiple DoSy peers.
518
0
        if (it_worst_peer != m_peer_orphanage_info.end() && it_worst_peer->second.m_count_announcements > 0) {
519
0
            heap_peer_dos.emplace_back(worst_peer, it_worst_peer->second.GetDosScore(max_lat, max_mem));
520
0
            std::push_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score);
521
0
        }
522
0
    } while (true);
523
524
136
    const auto remaining_unique_orphans{CountUniqueOrphans()};
525
136
    LogDebug(BCLog::TXPACKAGES, "orphanage overflow, removed %u tx (%u announcements)\n", original_unique_txns - remaining_unique_orphans, num_erased);
526
136
}
527
528
std::vector<std::pair<Wtxid, NodeId>> TxOrphanageImpl::AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng)
529
12.3k
{
530
12.3k
    std::vector<std::pair<Wtxid, NodeId>> ret;
531
12.3k
    auto& index_by_wtxid = m_orphans.get<ByWtxid>();
532
51.5k
    for (unsigned int i = 0; i < tx.vout.size(); i++) {
533
39.1k
        const auto it_by_prev = m_outpoint_to_orphan_wtxids.find(COutPoint(tx.GetHash(), i));
534
39.1k
        if (it_by_prev != m_outpoint_to_orphan_wtxids.end()) {
535
51
            for (const auto& wtxid : it_by_prev->second) {
536
                // If a reconsiderable announcement for this wtxid already exists, skip it.
537
51
                if (m_reconsiderable_wtxids.contains(wtxid)) continue;
538
539
                // Belt and suspenders, each entry in m_outpoint_to_orphan_wtxids should always have at least 1 announcement.
540
51
                auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER});
541
51
                if (!Assume(it != index_by_wtxid.end() && it->m_tx->GetWitnessHash() == wtxid)) continue;
542
543
                // Select a random peer to assign orphan processing, reducing wasted work if the orphan is still missing
544
                // inputs. However, we don't want to create an issue in which the assigned peer can purposefully stop us
545
                // from processing the orphan by disconnecting.
546
51
                auto it_end = index_by_wtxid.upper_bound(ByWtxidView{wtxid, MAX_PEER});
547
51
                const auto num_announcers{std::distance(it, it_end)};
548
51
                if (!Assume(num_announcers > 0)) continue;
549
51
                std::advance(it, rng.randrange(num_announcers));
550
551
51
                if (!Assume(it->m_tx->GetWitnessHash() == wtxid)) break;
552
553
                // Mark this orphan as ready to be reconsidered.
554
51
                static constexpr auto mark_reconsidered_modifier = [](auto& ann) { ann.m_reconsider = true; };
555
51
                Assume(!it->m_reconsider);
556
51
                index_by_wtxid.modify(it, mark_reconsidered_modifier);
557
51
                ret.emplace_back(wtxid, it->m_announcer);
558
51
                m_reconsiderable_wtxids.insert(wtxid);
559
560
51
                LogDebug(BCLog::TXPACKAGES, "added %s (wtxid=%s) to peer %d workset\n",
561
51
                            it->m_tx->GetHash().ToString(), it->m_tx->GetWitnessHash().ToString(), it->m_announcer);
562
51
            }
563
48
        }
564
39.1k
    }
565
12.3k
    return ret;
566
12.3k
}
567
568
bool TxOrphanageImpl::HaveTx(const Wtxid& wtxid) const
569
69.0k
{
570
69.0k
    auto it_lower = m_orphans.get<ByWtxid>().lower_bound(ByWtxidView{wtxid, MIN_PEER});
571
69.0k
    return it_lower != m_orphans.get<ByWtxid>().end() && it_lower->m_tx->GetWitnessHash() == wtxid;
572
69.0k
}
573
574
CTransactionRef TxOrphanageImpl::GetTx(const Wtxid& wtxid) const
575
27.5k
{
576
27.5k
    auto it_lower = m_orphans.get<ByWtxid>().lower_bound(ByWtxidView{wtxid, MIN_PEER});
577
27.5k
    if (it_lower != m_orphans.get<ByWtxid>().end() && it_lower->m_tx->GetWitnessHash() == wtxid) return it_lower->m_tx;
578
27.5k
    return nullptr;
579
27.5k
}
580
581
bool TxOrphanageImpl::HaveTxFromPeer(const Wtxid& wtxid, NodeId peer) const
582
701
{
583
701
    return m_orphans.get<ByWtxid>().count(ByWtxidView{wtxid, peer}) > 0;
584
701
}
585
586
/** If there is a tx that can be reconsidered, return it and set it back to
587
 * non-reconsiderable. Otherwise, return a nullptr. */
588
CTransactionRef TxOrphanageImpl::GetTxToReconsider(NodeId peer)
589
377k
{
590
377k
    auto it = m_orphans.get<ByPeer>().lower_bound(ByPeerView{peer, true, 0});
591
377k
    if (it != m_orphans.get<ByPeer>().end() && it->m_announcer == peer && it->m_reconsider) {
592
        // Flip m_reconsider. Even if this transaction stays in orphanage, it shouldn't be
593
        // reconsidered again until there is a new reason to do so.
594
50
        static constexpr auto mark_reconsidered_modifier = [](auto& ann) { ann.m_reconsider = false; };
595
50
        m_orphans.get<ByPeer>().modify(it, mark_reconsidered_modifier);
596
        // As there is exactly one m_reconsider announcement per reconsiderable wtxids, flipping
597
        // the m_reconsider flag means the wtxid is no longer reconsiderable.
598
50
        m_reconsiderable_wtxids.erase(it->m_tx->GetWitnessHash());
599
50
        return it->m_tx;
600
50
    }
601
377k
    return nullptr;
602
377k
}
603
604
/** Return whether there is a tx that can be reconsidered. */
605
bool TxOrphanageImpl::HaveTxToReconsider(NodeId peer)
606
160k
{
607
160k
    auto it = m_orphans.get<ByPeer>().lower_bound(ByPeerView{peer, true, 0});
608
160k
    return it != m_orphans.get<ByPeer>().end() && it->m_announcer == peer && it->m_reconsider;
609
160k
}
610
611
void TxOrphanageImpl::EraseForBlock(const CBlock& block)
612
83.4k
{
613
83.4k
    if (m_orphans.empty()) return;
614
615
15
    std::set<Wtxid> wtxids_to_erase;
616
121
    for (const CTransactionRef& ptx : block.vtx) {
617
121
        const CTransaction& block_tx = *ptx;
618
619
        // Which orphan pool entries must we evict?
620
122
        for (const auto& input : block_tx.vin) {
621
122
            auto it_prev = m_outpoint_to_orphan_wtxids.find(input.prevout);
622
122
            if (it_prev != m_outpoint_to_orphan_wtxids.end()) {
623
                // Copy all wtxids to wtxids_to_erase.
624
63
                std::copy(it_prev->second.cbegin(), it_prev->second.cend(), std::inserter(wtxids_to_erase, wtxids_to_erase.end()));
625
63
            }
626
122
        }
627
121
    }
628
629
15
    unsigned int num_erased{0};
630
63
    for (const auto& wtxid : wtxids_to_erase) {
631
        // Don't use EraseTx here because it calls LimitOrphans and announcements deleted in that call are not reflected
632
        // in its return result. Waiting until the end to do LimitOrphans helps save repeated computation and allows us
633
        // to check that num_erased is what we expect.
634
63
        num_erased += EraseTxInternal(wtxid) ? 1 : 0;
635
63
    }
636
637
15
    if (num_erased != 0) {
638
8
        LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) included or conflicted by block\n", num_erased);
639
8
    }
640
15
    Assume(wtxids_to_erase.size() == num_erased);
641
642
    // Deletions can cause the orphanage's MaxGlobalUsage to decrease, so we may need to trim here.
643
15
    LimitOrphans();
644
15
}
645
646
std::vector<CTransactionRef> TxOrphanageImpl::GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId peer) const
647
66
{
648
66
    std::vector<CTransactionRef> children_found;
649
66
    const auto& parent_txid{parent->GetHash()};
650
651
    // Iterate through all orphans from this peer, in reverse order, so that more recent
652
    // transactions are added first. Doing so helps avoid work when one of the orphans replaced
653
    // an earlier one. Since we require the NodeId to match, one peer's announcement order does
654
    // not bias how we process other peer's orphans.
655
66
    auto& index_by_peer = m_orphans.get<ByPeer>();
656
66
    auto it_upper = index_by_peer.upper_bound(ByPeerView{peer, true, std::numeric_limits<uint64_t>::max()});
657
66
    auto it_lower = index_by_peer.lower_bound(ByPeerView{peer, false, 0});
658
659
131
    while (it_upper != it_lower) {
660
65
        --it_upper;
661
65
        if (!Assume(it_upper->m_announcer == peer)) break;
662
        // Check if this tx spends from parent.
663
80
        for (const auto& input : it_upper->m_tx->vin) {
664
80
            if (input.prevout.hash == parent_txid) {
665
44
                children_found.emplace_back(it_upper->m_tx);
666
44
                break;
667
44
            }
668
80
        }
669
65
    }
670
66
    return children_found;
671
66
}
672
673
std::vector<TxOrphanage::OrphanInfo> TxOrphanageImpl::GetOrphanTransactions() const
674
228
{
675
228
    std::vector<TxOrphanage::OrphanInfo> result;
676
228
    result.reserve(m_unique_orphans);
677
678
228
    auto& index_by_wtxid = m_orphans.get<ByWtxid>();
679
228
    auto it = index_by_wtxid.begin();
680
228
    std::set<NodeId> this_orphan_announcers;
681
18.6k
    while (it != index_by_wtxid.end()) {
682
18.3k
        this_orphan_announcers.insert(it->m_announcer);
683
        // If this is the last entry, or the next entry has a different wtxid, build a OrphanInfo.
684
18.3k
        if (std::next(it) == index_by_wtxid.end() || std::next(it)->m_tx->GetWitnessHash() != it->m_tx->GetWitnessHash()) {
685
18.3k
            result.emplace_back(it->m_tx, std::move(this_orphan_announcers));
686
18.3k
            this_orphan_announcers.clear();
687
18.3k
        }
688
689
18.3k
        ++it;
690
18.3k
    }
691
228
    Assume(m_unique_orphans == result.size());
692
693
228
    return result;
694
228
}
695
696
void TxOrphanageImpl::SanityCheck() const
697
10
{
698
10
    std::unordered_map<NodeId, PeerDoSInfo> reconstructed_peer_info;
699
10
    std::map<Wtxid, std::pair<TxOrphanage::Usage, TxOrphanage::Count>> unique_wtxids_to_scores;
700
10
    std::set<COutPoint> all_outpoints;
701
10
    std::set<Wtxid> reconstructed_reconsiderable_wtxids;
702
703
176
    for (auto it = m_orphans.begin(); it != m_orphans.end(); ++it) {
704
1.02k
        for (const auto& input : it->m_tx->vin) {
705
1.02k
            all_outpoints.insert(input.prevout);
706
1.02k
        }
707
166
        unique_wtxids_to_scores.emplace(it->m_tx->GetWitnessHash(), std::make_pair(it->GetMemUsage(), it->GetLatencyScore() - 1));
708
709
166
        auto& peer_info = reconstructed_peer_info[it->m_announcer];
710
166
        peer_info.m_total_usage += it->GetMemUsage();
711
166
        peer_info.m_count_announcements += 1;
712
166
        peer_info.m_total_latency_score += it->GetLatencyScore();
713
714
166
        if (it->m_reconsider) {
715
1
            auto [_, added] = reconstructed_reconsiderable_wtxids.insert(it->m_tx->GetWitnessHash());
716
            // Check that there is only ever 1 announcement per wtxid with m_reconsider set.
717
1
            assert(added);
718
1
        }
719
166
    }
720
10
    assert(reconstructed_peer_info.size() == m_peer_orphanage_info.size());
721
722
    // Recalculated per-peer stats are identical to m_peer_orphanage_info
723
10
    assert(reconstructed_peer_info == m_peer_orphanage_info);
724
725
    // Recalculated set of reconsiderable wtxids must match.
726
10
    assert(m_reconsiderable_wtxids == reconstructed_reconsiderable_wtxids);
727
728
    // All outpoints exist in m_outpoint_to_orphan_wtxids, all keys in m_outpoint_to_orphan_wtxids correspond to some
729
    // orphan, and all wtxids referenced in m_outpoint_to_orphan_wtxids are also in m_orphans.
730
    // This ensures m_outpoint_to_orphan_wtxids is cleaned up.
731
10
    assert(all_outpoints.size() == m_outpoint_to_orphan_wtxids.size());
732
234
    for (const auto& [outpoint, wtxid_set] : m_outpoint_to_orphan_wtxids) {
733
234
        assert(all_outpoints.contains(outpoint));
734
765
        for (const auto& wtxid : wtxid_set) {
735
765
            assert(unique_wtxids_to_scores.contains(wtxid));
736
765
        }
737
234
    }
738
739
    // Cached m_unique_orphans value is correct.
740
10
    assert(m_orphans.size() >= m_unique_orphans);
741
10
    assert(m_orphans.size() <= m_peer_orphanage_info.size() * m_unique_orphans);
742
10
    assert(unique_wtxids_to_scores.size() == m_unique_orphans);
743
744
10
    const auto calculated_dedup_usage = std::accumulate(unique_wtxids_to_scores.begin(), unique_wtxids_to_scores.end(),
745
151
        TxOrphanage::Usage{0}, [](TxOrphanage::Usage sum, const auto pair) { return sum + pair.second.first; });
746
10
    assert(calculated_dedup_usage == m_unique_orphan_usage);
747
748
    // Global usage is deduplicated, should be less than or equal to the sum of all per-peer usages.
749
10
    const auto summed_peer_usage = std::accumulate(m_peer_orphanage_info.begin(), m_peer_orphanage_info.end(),
750
17
        TxOrphanage::Usage{0}, [](TxOrphanage::Usage sum, const auto pair) { return sum + pair.second.m_total_usage; });
751
10
    assert(summed_peer_usage >= m_unique_orphan_usage);
752
753
    // Cached m_unique_rounded_input_scores value is correct.
754
10
    const auto calculated_total_latency_score = std::accumulate(unique_wtxids_to_scores.begin(), unique_wtxids_to_scores.end(),
755
151
        TxOrphanage::Count{0}, [](TxOrphanage::Count sum, const auto pair) { return sum + pair.second.second; });
756
10
    assert(calculated_total_latency_score == m_unique_rounded_input_scores);
757
758
    // Global latency score is deduplicated, should be less than or equal to the sum of all per-peer latency scores.
759
10
    const auto summed_peer_latency_score = std::accumulate(m_peer_orphanage_info.begin(), m_peer_orphanage_info.end(),
760
17
        TxOrphanage::Count{0}, [](TxOrphanage::Count sum, const auto pair) { return sum + pair.second.m_total_latency_score; });
761
10
    assert(summed_peer_latency_score >= m_unique_rounded_input_scores + m_orphans.size());
762
763
10
    assert(!NeedsTrim());
764
10
}
765
766
14.0k
TxOrphanage::Count TxOrphanageImpl::MaxGlobalLatencyScore() const { return m_max_global_latency_score; }
767
14.0k
TxOrphanage::Count TxOrphanageImpl::TotalLatencyScore() const { return m_unique_rounded_input_scores + m_orphans.size(); }
768
143
TxOrphanage::Usage TxOrphanageImpl::ReservedPeerUsage() const { return m_reserved_usage_per_peer; }
769
153
TxOrphanage::Count TxOrphanageImpl::MaxPeerLatencyScore() const { return m_max_global_latency_score / std::max<unsigned int>(m_peer_orphanage_info.size(), 1); }
770
13.9k
TxOrphanage::Usage TxOrphanageImpl::MaxGlobalUsage() const { return m_reserved_usage_per_peer * std::max<int64_t>(m_peer_orphanage_info.size(), 1); }
771
772
bool TxOrphanageImpl::NeedsTrim() const
773
14.0k
{
774
14.0k
    return TotalLatencyScore() > MaxGlobalLatencyScore() || TotalOrphanUsage() > MaxGlobalUsage();
775
14.0k
}
776
std::unique_ptr<TxOrphanage> MakeTxOrphanage() noexcept
777
1.24k
{
778
1.24k
    return std::make_unique<TxOrphanageImpl>();
779
1.24k
}
780
std::unique_ptr<TxOrphanage> MakeTxOrphanage(TxOrphanage::Count max_global_latency_score, TxOrphanage::Usage reserved_peer_usage) noexcept
781
6
{
782
6
    return std::make_unique<TxOrphanageImpl>(max_global_latency_score, reserved_peer_usage);
783
6
}
784
} // namespace node