Coverage Report

Created: 2026-04-29 19:21

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/tmp/bitcoin/src/scheduler.cpp
Line
Count
Source
1
// Copyright (c) 2015-present The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <scheduler.h>
6
7
#include <sync.h>
8
#include <util/time.h>
9
10
#include <cassert>
11
#include <functional>
12
#include <utility>
13
14
1.27k
CScheduler::CScheduler() = default;
15
16
CScheduler::~CScheduler()
17
1.27k
{
18
1.27k
    assert(nThreadsServicingQueue == 0);
19
1.27k
    if (stopWhenEmpty) assert(taskQueue.empty());
20
1.27k
}
21
22
23
void CScheduler::serviceQueue()
24
1.28k
{
25
1.28k
    WAIT_LOCK(newTaskMutex, lock);
26
1.28k
    ++nThreadsServicingQueue;
27
28
    // newTaskMutex is locked throughout this loop EXCEPT
29
    // when the thread is waiting or when the user's function
30
    // is called.
31
443k
    while (!shouldStop()) {
32
441k
        try {
33
489k
            while (!shouldStop() && taskQueue.empty()) {
34
                // Wait until there is something to do.
35
47.4k
                newTaskScheduled.wait(lock);
36
47.4k
            }
37
38
            // Wait until either there is a new task, or until
39
            // the time of the first item on the queue:
40
41
656k
            while (!shouldStop() && !taskQueue.empty()) {
42
654k
                std::chrono::steady_clock::time_point timeToWaitFor = taskQueue.begin()->first;
43
654k
                if (newTaskScheduled.wait_until(lock, timeToWaitFor) == std::cv_status::timeout) {
44
440k
                    break; // Exit loop after timeout, it means we reached the time of the event
45
440k
                }
46
654k
            }
47
48
            // If there are multiple threads, the queue can empty while we're waiting (another
49
            // thread may service the task we were waiting on).
50
441k
            if (shouldStop() || taskQueue.empty())
51
1.26k
                continue;
52
53
440k
            Function f = taskQueue.begin()->second;
54
440k
            taskQueue.erase(taskQueue.begin());
55
56
440k
            {
57
                // Unlock before calling f, so it can reschedule itself or another task
58
                // without deadlocking:
59
440k
                REVERSE_LOCK(lock, newTaskMutex);
60
440k
                f();
61
440k
            }
62
440k
        } catch (...) {
63
0
            --nThreadsServicingQueue;
64
0
            throw;
65
0
        }
66
441k
    }
67
1.28k
    --nThreadsServicingQueue;
68
1.28k
    newTaskScheduled.notify_one();
69
1.28k
}
70
71
void CScheduler::schedule(CScheduler::Function f, std::chrono::steady_clock::time_point t)
72
450k
{
73
450k
    {
74
450k
        LOCK(newTaskMutex);
75
450k
        taskQueue.insert(std::make_pair(t, f));
76
450k
    }
77
450k
    newTaskScheduled.notify_one();
78
450k
}
79
80
void CScheduler::MockForward(std::chrono::seconds delta_seconds)
81
22
{
82
22
    assert(delta_seconds > 0s && delta_seconds <= 1h);
83
84
22
    {
85
22
        LOCK(newTaskMutex);
86
87
        // use temp_queue to maintain updated schedule
88
22
        std::multimap<std::chrono::steady_clock::time_point, Function> temp_queue;
89
90
161
        for (const auto& element : taskQueue) {
91
161
            temp_queue.emplace_hint(temp_queue.cend(), element.first - delta_seconds, element.second);
92
161
        }
93
94
        // point taskQueue to temp_queue
95
22
        taskQueue = std::move(temp_queue);
96
22
    }
97
98
    // notify that the taskQueue needs to be processed
99
22
    newTaskScheduled.notify_one();
100
22
}
101
102
static void Repeat(CScheduler& s, CScheduler::Function f, std::chrono::milliseconds delta)
103
254
{
104
254
    f();
105
254
    s.scheduleFromNow([=, &s] { Repeat(s, f, delta); }, delta);
106
254
}
107
108
void CScheduler::scheduleEvery(CScheduler::Function f, std::chrono::milliseconds delta)
109
6.57k
{
110
6.57k
    scheduleFromNow([this, f, delta] { Repeat(*this, f, delta); }, delta);
111
6.57k
}
112
113
size_t CScheduler::getQueueInfo(std::chrono::steady_clock::time_point& first,
114
                                std::chrono::steady_clock::time_point& last) const
115
4
{
116
4
    LOCK(newTaskMutex);
117
4
    size_t result = taskQueue.size();
118
4
    if (!taskQueue.empty()) {
119
3
        first = taskQueue.begin()->first;
120
3
        last = taskQueue.rbegin()->first;
121
3
    }
122
4
    return result;
123
4
}
124
125
bool CScheduler::AreThreadsServicingQueue() const
126
1.26k
{
127
1.26k
    LOCK(newTaskMutex);
128
1.26k
    return nThreadsServicingQueue;
129
1.26k
}
130
131
132
void SerialTaskRunner::MaybeScheduleProcessQueue()
133
784k
{
134
784k
    {
135
784k
        LOCK(m_callbacks_mutex);
136
        // Try to avoid scheduling too many copies here, but if we
137
        // accidentally have two ProcessQueue's scheduled at once its
138
        // not a big deal.
139
784k
        if (m_are_callbacks_running) return;
140
700k
        if (m_callbacks_pending.empty()) return;
141
700k
    }
142
441k
    m_scheduler.schedule([this] { this->ProcessQueue(); }, std::chrono::steady_clock::now());
143
441k
}
144
145
void SerialTaskRunner::ProcessQueue()
146
440k
{
147
440k
    std::function<void()> callback;
148
440k
    {
149
440k
        LOCK(m_callbacks_mutex);
150
440k
        if (m_are_callbacks_running) return;
151
440k
        if (m_callbacks_pending.empty()) return;
152
391k
        m_are_callbacks_running = true;
153
154
391k
        callback = std::move(m_callbacks_pending.front());
155
391k
        m_callbacks_pending.pop_front();
156
391k
    }
157
158
    // RAII the setting of fCallbacksRunning and calling MaybeScheduleProcessQueue
159
    // to ensure both happen safely even if callback() throws.
160
0
    struct RAIICallbacksRunning {
161
391k
        SerialTaskRunner* instance;
162
391k
        explicit RAIICallbacksRunning(SerialTaskRunner* _instance) : instance(_instance) {}
163
391k
        ~RAIICallbacksRunning()
164
391k
        {
165
391k
            {
166
391k
                LOCK(instance->m_callbacks_mutex);
167
391k
                instance->m_are_callbacks_running = false;
168
391k
            }
169
391k
            instance->MaybeScheduleProcessQueue();
170
391k
        }
171
391k
    } raiicallbacksrunning(this);
172
173
391k
    callback();
174
391k
}
175
176
void SerialTaskRunner::insert(std::function<void()> func)
177
392k
{
178
392k
    {
179
392k
        LOCK(m_callbacks_mutex);
180
392k
        m_callbacks_pending.emplace_back(std::move(func));
181
392k
    }
182
392k
    MaybeScheduleProcessQueue();
183
392k
}
184
185
void SerialTaskRunner::flush()
186
1.26k
{
187
1.26k
    assert(!m_scheduler.AreThreadsServicingQueue());
188
1.26k
    bool should_continue = true;
189
2.60k
    while (should_continue) {
190
1.33k
        ProcessQueue();
191
1.33k
        LOCK(m_callbacks_mutex);
192
1.33k
        should_continue = !m_callbacks_pending.empty();
193
1.33k
    }
194
1.26k
}
195
196
size_t SerialTaskRunner::size()
197
128k
{
198
128k
    LOCK(m_callbacks_mutex);
199
128k
    return m_callbacks_pending.size();
200
128k
}