Coverage Report

Created: 2026-04-29 19:21

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/tmp/bitcoin/src/index/base.cpp
Line
Count
Source
1
// Copyright (c) 2017-present The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <index/base.h>
6
7
#include <chain.h>
8
#include <common/args.h>
9
#include <dbwrapper.h>
10
#include <interfaces/chain.h>
11
#include <interfaces/types.h>
12
#include <kernel/types.h>
13
#include <node/abort.h>
14
#include <node/blockstorage.h>
15
#include <node/context.h>
16
#include <node/database_args.h>
17
#include <node/interface_ui.h>
18
#include <primitives/block.h>
19
#include <sync.h>
20
#include <tinyformat.h>
21
#include <uint256.h>
22
#include <undo.h>
23
#include <util/check.h>
24
#include <util/fs.h>
25
#include <util/log.h>
26
#include <util/string.h>
27
#include <util/thread.h>
28
#include <util/threadinterrupt.h>
29
#include <util/time.h>
30
#include <util/translation.h>
31
#include <validation.h>
32
#include <validationinterface.h>
33
34
#include <compare>
35
#include <cstdint>
36
#include <functional>
37
#include <memory>
38
#include <optional>
39
#include <stdexcept>
40
#include <string>
41
#include <thread>
42
#include <utility>
43
#include <vector>
44
45
using kernel::ChainstateRole;
46
47
constexpr uint8_t DB_BEST_BLOCK{'B'};
48
49
constexpr auto SYNC_LOG_INTERVAL{30s};
50
constexpr auto SYNC_LOCATOR_WRITE_INTERVAL{30s};
51
52
template <typename... Args>
53
void BaseIndex::FatalErrorf(util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args)
54
0
{
55
0
    auto message = tfm::format(fmt, args...);
56
0
    node::AbortNode(m_chain->context()->shutdown_request, m_chain->context()->exit_status, Untranslated(message), m_chain->context()->warnings.get());
57
0
}
Unexecuted instantiation: void BaseIndex::FatalErrorf<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>(util::ConstevalFormatString<sizeof...(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>)>, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
Unexecuted instantiation: void BaseIndex::FatalErrorf<int>(util::ConstevalFormatString<sizeof...(int)>, int const&)
58
59
CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash)
60
284
{
61
284
    CBlockLocator locator;
62
284
    bool found = chain.findBlock(block_hash, interfaces::FoundBlock().locator(locator));
63
284
    assert(found);
64
284
    assert(!locator.IsNull());
65
284
    return locator;
66
284
}
67
68
BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
69
147
    CDBWrapper{DBParams{
70
147
        .path = path,
71
147
        .cache_bytes = n_cache_size,
72
147
        .memory_only = f_memory,
73
147
        .wipe_data = f_wipe,
74
147
        .obfuscate = f_obfuscate,
75
147
        .options = [] { DBOptions options; node::ReadDatabaseArgs(gArgs, options); return options; }()}}
76
147
{}
77
78
CBlockLocator BaseIndex::DB::ReadBestBlock() const
79
145
{
80
145
    CBlockLocator locator;
81
82
145
    bool success = Read(DB_BEST_BLOCK, locator);
83
145
    if (!success) {
84
52
        locator.SetNull();
85
52
    }
86
87
145
    return locator;
88
145
}
89
90
void BaseIndex::DB::WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator)
91
284
{
92
284
    batch.Write(DB_BEST_BLOCK, locator);
93
284
}
94
95
BaseIndex::BaseIndex(std::unique_ptr<interfaces::Chain> chain, std::string name)
96
147
    : m_chain{std::move(chain)}, m_name{std::move(name)} {}
97
98
BaseIndex::~BaseIndex()
99
147
{
100
147
    Interrupt();
101
147
    Stop();
102
147
}
103
104
bool BaseIndex::Init()
105
145
{
106
145
    AssertLockNotHeld(cs_main);
107
108
    // May need reset if index is being restarted.
109
145
    m_interrupt.reset();
110
111
    // m_chainstate member gives indexing code access to node internals. It is
112
    // removed in followup https://github.com/bitcoin/bitcoin/pull/24230
113
145
    m_chainstate = WITH_LOCK(::cs_main,
114
145
                             return &m_chain->context()->chainman->ValidatedChainstate());
115
    // Register to validation interface before setting the 'm_synced' flag, so that
116
    // callbacks are not missed once m_synced is true.
117
145
    m_chain->context()->validation_signals->RegisterValidationInterface(this);
118
119
145
    const auto locator{GetDB().ReadBestBlock()};
120
121
145
    LOCK(cs_main);
122
145
    CChain& index_chain = m_chainstate->m_chain;
123
124
145
    if (locator.IsNull()) {
125
52
        SetBestBlockIndex(nullptr);
126
93
    } else {
127
        // Setting the best block to the locator's top block. If it is not part of the
128
        // best chain, we will rewind to the fork point during index sync
129
93
        const CBlockIndex* locator_index{m_chainstate->m_blockman.LookupBlockIndex(locator.vHave.at(0))};
130
93
        if (!locator_index) {
131
0
            return InitError(Untranslated(strprintf("best block of %s not found. Please rebuild the index.", GetName())));
132
0
        }
133
93
        SetBestBlockIndex(locator_index);
134
93
    }
135
136
    // Child init
137
145
    const CBlockIndex* start_block = m_best_block_index.load();
138
145
    if (!CustomInit(start_block ? std::make_optional(interfaces::BlockRef{start_block->GetBlockHash(), start_block->nHeight}) : std::nullopt)) {
139
0
        return false;
140
0
    }
141
142
    // Note: this will latch to true immediately if the user starts up with an empty
143
    // datadir and an index enabled. If this is the case, indexation will happen solely
144
    // via `BlockConnected` signals until, possibly, the next restart.
145
145
    m_synced = start_block == index_chain.Tip();
146
145
    m_init = true;
147
145
    return true;
148
145
}
149
150
static const CBlockIndex* NextSyncBlock(const CBlockIndex* const pindex_prev, CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
151
7.73k
{
152
7.73k
    AssertLockHeld(cs_main);
153
154
7.73k
    if (!pindex_prev) {
155
34
        return chain.Genesis();
156
34
    }
157
158
7.69k
    if (const auto* pindex{chain.Next(*pindex_prev)}) {
159
7.60k
        return pindex;
160
7.60k
    }
161
162
    // If there is no next block, we might be synced
163
96
    if (pindex_prev == chain.Tip()) {
164
94
        return nullptr;
165
94
    }
166
167
    // Since block is not in the chain, return the next block in the chain AFTER the last common ancestor.
168
    // Caller will be responsible for rewinding back to the common ancestor.
169
2
    const auto* fork{chain.FindFork(*pindex_prev)};
170
    // Common ancestor must exist (genesis).
171
2
    return chain.Next(*Assert(fork));
172
96
}
173
174
bool BaseIndex::ProcessBlock(const CBlockIndex* pindex, const CBlock* block_data)
175
15.9k
{
176
15.9k
    interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex, block_data);
177
178
15.9k
    CBlock block;
179
15.9k
    if (!block_data) { // disk lookup if block data wasn't provided
180
7.63k
        if (!m_chainstate->m_blockman.ReadBlock(block, *pindex)) {
181
0
            FatalErrorf("Failed to read block %s from disk",
182
0
                        pindex->GetBlockHash().ToString());
183
0
            return false;
184
0
        }
185
7.63k
        block_info.data = &block;
186
7.63k
    }
187
188
15.9k
    CBlockUndo block_undo;
189
15.9k
    if (CustomOptions().connect_undo_data) {
190
11.4k
        if (pindex->nHeight > 0 && !m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) {
191
0
            FatalErrorf("Failed to read undo block data %s from disk",
192
0
                        pindex->GetBlockHash().ToString());
193
0
            return false;
194
0
        }
195
11.4k
        block_info.undo_data = &block_undo;
196
11.4k
    }
197
198
15.9k
    if (!CustomAppend(block_info)) {
199
0
        FatalErrorf("Failed to write block %s to index database",
200
0
                    pindex->GetBlockHash().ToString());
201
0
        return false;
202
0
    }
203
204
15.9k
    return true;
205
15.9k
}
206
207
void BaseIndex::Sync()
208
145
{
209
145
    const CBlockIndex* pindex = m_best_block_index.load();
210
145
    if (!m_synced) {
211
94
        auto last_log_time{NodeClock::now()};
212
94
        auto last_locator_write_time{last_log_time};
213
7.73k
        while (true) {
214
7.73k
            if (m_interrupt) {
215
47
                LogInfo("%s: m_interrupt set; exiting ThreadSync", GetName());
216
217
47
                SetBestBlockIndex(pindex);
218
                // No need to handle errors in Commit. If it fails, the error will be already be
219
                // logged. The best way to recover is to continue, as index cannot be corrupted by
220
                // a missed commit to disk for an advanced index state.
221
47
                Commit();
222
47
                return;
223
47
            }
224
225
7.68k
            const CBlockIndex* pindex_next = WITH_LOCK(cs_main, return NextSyncBlock(pindex, m_chainstate->m_chain));
226
            // If pindex_next is null, it means pindex is the chain tip, so
227
            // commit data indexed so far.
228
7.68k
            if (!pindex_next) {
229
47
                SetBestBlockIndex(pindex);
230
                // No need to handle errors in Commit. See rationale above.
231
47
                Commit();
232
233
                // If pindex is still the chain tip after committing, exit the
234
                // sync loop. It is important for cs_main to be locked while
235
                // setting m_synced = true, otherwise a new block could be
236
                // attached while m_synced is still false, and it would not be
237
                // indexed.
238
47
                LOCK(::cs_main);
239
47
                pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain);
240
47
                if (!pindex_next) {
241
47
                    m_synced = true;
242
47
                    break;
243
47
                }
244
47
            }
245
7.63k
            if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) {
246
0
                FatalErrorf("Failed to rewind %s to a previous chain tip", GetName());
247
0
                return;
248
0
            }
249
7.63k
            pindex = pindex_next;
250
251
252
7.63k
            if (!ProcessBlock(pindex)) return; // error logged internally
253
254
7.63k
            auto current_time{NodeClock::now()};
255
7.63k
            if (current_time - last_log_time >= SYNC_LOG_INTERVAL) {
256
3
                LogInfo("Syncing %s with block chain from height %d", GetName(), pindex->nHeight);
257
3
                last_log_time = current_time;
258
3
            }
259
260
7.63k
            if (current_time - last_locator_write_time >= SYNC_LOCATOR_WRITE_INTERVAL) {
261
3
                SetBestBlockIndex(pindex);
262
3
                last_locator_write_time = current_time;
263
                // No need to handle errors in Commit. See rationale above.
264
3
                Commit();
265
3
            }
266
7.63k
        }
267
94
    }
268
269
98
    if (pindex) {
270
97
        LogInfo("%s is enabled at height %d", GetName(), pindex->nHeight);
271
97
    } else {
272
1
        LogInfo("%s is enabled", GetName());
273
1
    }
274
98
}
275
276
bool BaseIndex::Commit()
277
286
{
278
    // Don't commit anything if we haven't indexed any block yet
279
    // (this could happen if init is interrupted).
280
286
    bool ok = m_best_block_index != nullptr;
281
286
    if (ok) {
282
284
        CDBBatch batch(GetDB());
283
284
        ok = CustomCommit(batch);
284
284
        if (ok) {
285
284
            GetDB().WriteBestBlock(batch, GetLocator(*m_chain, m_best_block_index.load()->GetBlockHash()));
286
284
            GetDB().WriteBatch(batch);
287
284
        }
288
284
    }
289
286
    if (!ok) {
290
2
        LogError("Failed to commit latest %s state", GetName());
291
2
        return false;
292
2
    }
293
284
    return true;
294
286
}
295
296
bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_tip)
297
17
{
298
17
    assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
299
300
17
    CBlock block;
301
17
    CBlockUndo block_undo;
302
303
357
    for (const CBlockIndex* iter_tip = current_tip; iter_tip != new_tip; iter_tip = iter_tip->pprev) {
304
340
        interfaces::BlockInfo block_info = kernel::MakeBlockInfo(iter_tip);
305
340
        if (CustomOptions().disconnect_data) {
306
125
            if (!m_chainstate->m_blockman.ReadBlock(block, *iter_tip)) {
307
0
                LogError("Failed to read block %s from disk",
308
0
                         iter_tip->GetBlockHash().ToString());
309
0
                return false;
310
0
            }
311
125
            block_info.data = &block;
312
125
        }
313
340
        if (CustomOptions().disconnect_undo_data && iter_tip->nHeight > 0) {
314
121
            if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *iter_tip)) {
315
0
                return false;
316
0
            }
317
121
            block_info.undo_data = &block_undo;
318
121
        }
319
340
        if (!CustomRemove(block_info)) {
320
0
            return false;
321
0
        }
322
340
    }
323
324
    // Don't commit here - the committed index state must never be ahead of the
325
    // flushed chainstate, otherwise unclean restarts would lead to index corruption.
326
    // Pruning has a minimum of 288 blocks-to-keep and getting the index
327
    // out of sync may be possible but a users fault.
328
    // In case we reorg beyond the pruned depth, ReadBlock would
329
    // throw and lead to a graceful shutdown
330
17
    SetBestBlockIndex(new_tip);
331
17
    return true;
332
17
}
333
334
void BaseIndex::BlockConnected(const ChainstateRole& role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex)
335
11.0k
{
336
    // Ignore events from not fully validated chains to avoid out-of-order indexing.
337
    //
338
    // TODO at some point we could parameterize whether a particular index can be
339
    // built out of order, but for now just do the conservative simple thing.
340
11.0k
    if (!role.validated) {
341
800
        return;
342
800
    }
343
344
    // Ignore BlockConnected signals until we have fully indexed the chain.
345
10.2k
    if (!m_synced) {
346
1.91k
        return;
347
1.91k
    }
348
349
8.35k
    const CBlockIndex* best_block_index = m_best_block_index.load();
350
8.35k
    if (!best_block_index) {
351
16
        if (pindex->nHeight != 0) {
352
0
            FatalErrorf("First block connected is not the genesis block (height=%d)",
353
0
                       pindex->nHeight);
354
0
            return;
355
0
        }
356
8.34k
    } else {
357
        // Ensure block connects to an ancestor of the current best block. This should be the case
358
        // most of the time, but may not be immediately after the sync thread catches up and sets
359
        // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
360
        // in the ValidationInterface queue backlog even after the sync thread has caught up to the
361
        // new chain tip. In this unlikely event, log a warning and let the queue clear.
362
8.34k
        if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
363
0
            LogWarning("Block %s does not connect to an ancestor of "
364
0
                      "known best chain (tip=%s); not updating index",
365
0
                      pindex->GetBlockHash().ToString(),
366
0
                      best_block_index->GetBlockHash().ToString());
367
0
            return;
368
0
        }
369
8.34k
        if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) {
370
0
            FatalErrorf("Failed to rewind %s to a previous chain tip",
371
0
                       GetName());
372
0
            return;
373
0
        }
374
8.34k
    }
375
376
    // Dispatch block to child class; errors are logged internally and abort the node.
377
8.35k
    if (ProcessBlock(pindex, block.get())) {
378
        // Setting the best block index is intentionally the last step of this
379
        // function, so BlockUntilSyncedToCurrentChain callers waiting for the
380
        // best block index to be updated can rely on the block being fully
381
        // processed, and the index object being safe to delete.
382
8.35k
        SetBestBlockIndex(pindex);
383
8.35k
    }
384
8.35k
}
385
386
void BaseIndex::ChainStateFlushed(const ChainstateRole& role, const CBlockLocator& locator)
387
278
{
388
    // Ignore events from not fully validated chains to avoid out-of-order indexing.
389
278
    if (!role.validated) {
390
29
        return;
391
29
    }
392
393
249
    if (!m_synced) {
394
49
        return;
395
49
    }
396
397
200
    const uint256& locator_tip_hash = locator.vHave.front();
398
200
    const CBlockIndex* locator_tip_index;
399
200
    {
400
200
        LOCK(cs_main);
401
200
        locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash);
402
200
    }
403
404
200
    if (!locator_tip_index) {
405
0
        FatalErrorf("First block (hash=%s) in locator was not found",
406
0
                   locator_tip_hash.ToString());
407
0
        return;
408
0
    }
409
410
    // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
411
    // immediately after the sync thread catches up and sets m_synced. Consider the case where
412
    // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
413
    // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
414
    // event, log a warning and let the queue clear.
415
200
    const CBlockIndex* best_block_index = m_best_block_index.load();
416
200
    if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
417
11
        LogWarning("Locator contains block (hash=%s) not on known best "
418
11
                  "chain (tip=%s); not writing index locator",
419
11
                  locator_tip_hash.ToString(),
420
11
                  best_block_index->GetBlockHash().ToString());
421
11
        return;
422
11
    }
423
424
    // No need to handle errors in Commit. If it fails, the error will be already be logged. The
425
    // best way to recover is to continue, as index cannot be corrupted by a missed commit to disk
426
    // for an advanced index state.
427
189
    Commit();
428
189
}
429
430
bool BaseIndex::BlockUntilSyncedToCurrentChain() const
431
125
{
432
125
    AssertLockNotHeld(cs_main);
433
434
125
    if (!m_synced) {
435
4
        return false;
436
4
    }
437
438
121
    {
439
        // Skip the queue-draining stuff if we know we're caught up with
440
        // m_chain.Tip().
441
121
        LOCK(cs_main);
442
121
        const CBlockIndex* chain_tip = m_chainstate->m_chain.Tip();
443
121
        const CBlockIndex* best_block_index = m_best_block_index.load();
444
121
        if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
445
120
            return true;
446
120
        }
447
121
    }
448
449
1
    LogInfo("%s is catching up on block notifications", GetName());
450
1
    m_chain->context()->validation_signals->SyncWithValidationInterfaceQueue();
451
1
    return true;
452
121
}
453
454
void BaseIndex::Interrupt()
455
286
{
456
286
    m_interrupt();
457
286
}
458
459
bool BaseIndex::StartBackgroundSync()
460
140
{
461
140
    if (!m_init) throw std::logic_error("Error: Cannot start a non-initialized index");
462
463
140
    m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { Sync(); });
464
140
    return true;
465
140
}
466
467
void BaseIndex::Stop()
468
292
{
469
292
    if (m_chain->context()->validation_signals) {
470
289
        m_chain->context()->validation_signals->UnregisterValidationInterface(this);
471
289
    }
472
473
292
    if (m_thread_sync.joinable()) {
474
140
        m_thread_sync.join();
475
140
    }
476
292
}
477
478
IndexSummary BaseIndex::GetSummary() const
479
287
{
480
287
    IndexSummary summary{};
481
287
    summary.name = GetName();
482
287
    summary.synced = m_synced;
483
287
    if (const auto& pindex = m_best_block_index.load()) {
484
238
        summary.best_block_height = pindex->nHeight;
485
238
        summary.best_block_hash = pindex->GetBlockHash();
486
238
    } else {
487
49
        summary.best_block_height = 0;
488
49
        summary.best_block_hash = m_chain->getBlockHash(0);
489
49
    }
490
287
    return summary;
491
287
}
492
493
void BaseIndex::SetBestBlockIndex(const CBlockIndex* block)
494
8.61k
{
495
8.61k
    assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune());
496
497
8.61k
    if (AllowPrune() && block) {
498
6.81k
        node::PruneLockInfo prune_lock;
499
6.81k
        prune_lock.height_first = block->nHeight;
500
6.81k
        WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock(GetName(), prune_lock));
501
6.81k
    }
502
503
    // Intentionally set m_best_block_index as the last step in this function,
504
    // after updating prune locks above, and after making any other references
505
    // to *this, so the BlockUntilSyncedToCurrentChain function (which checks
506
    // m_best_block_index as an optimization) can be used to wait for the last
507
    // BlockConnected notification and safely assume that prune locks are
508
    // updated and that the index object is safe to delete.
509
8.61k
    m_best_block_index = block;
510
8.61k
}