ESPHome 2026.5.0-dev
Loading...
Searching...
No Matches
time_64.cpp
Go to the documentation of this file.
2
3#ifndef USE_NATIVE_64BIT_TIME
4
5#include "time_64.h"
6
8#ifdef ESPHOME_DEBUG_SCHEDULER
9#include "esphome/core/log.h"
10#include <cinttypes>
11#endif
12#ifdef ESPHOME_THREAD_MULTI_ATOMICS
13#include <atomic>
14#endif
15#include <limits>
16
17namespace esphome {
18
19#ifdef ESPHOME_DEBUG_SCHEDULER
20static const char *const TAG = "time_64";
21#endif
22
23#ifdef ESPHOME_THREAD_SINGLE
24// Storage for Millis64Impl inline compute() — defined here so all TUs share one copy.
25uint32_t Millis64Impl::last_millis_{0};
26uint16_t Millis64Impl::millis_major_{0};
27#else
28
29uint64_t Millis64Impl::compute(uint32_t now) {
30 // Half the 32-bit range - used to detect rollovers vs normal time progression
31 static constexpr uint32_t HALF_MAX_UINT32 = std::numeric_limits<uint32_t>::max() / 2;
32
33 // State variables for rollover tracking - static to persist across calls
34#ifdef ESPHOME_THREAD_MULTI_ATOMICS
35 // Mutex for rollover serialization (taken only every ~49.7 days).
36 // A spinlock would be smaller (~1 byte vs ~80-100 bytes) but is unsafe on
37 // preemptive single-core RTOS platforms due to priority inversion: a high-priority
38 // task spinning would prevent the lock holder from running to release it.
39 static Mutex lock;
40 /*
41 * Multi-threaded platforms with atomic support: last_millis needs atomic for lock-free updates.
42 * Writers publish last_millis with memory_order_release and readers use memory_order_acquire.
43 * This ensures that once a reader sees the new low word, it also observes the corresponding
44 * increment of millis_major.
45 */
46 static std::atomic<uint32_t> last_millis{0};
47 /*
48 * Upper 16 bits of the 64-bit millis counter. Incremented only while holding lock;
49 * read concurrently. Atomic (relaxed) avoids a formal data race. Ordering relative
50 * to last_millis is provided by its release store and the corresponding acquire loads.
51 */
52 static std::atomic<uint16_t> millis_major{0};
53#else /* ESPHOME_THREAD_MULTI_NO_ATOMICS */
54 static Mutex lock;
55 static uint32_t last_millis{0};
56 static uint16_t millis_major{0};
57#endif
58
59 // THREAD SAFETY NOTE:
60 // This function has two out-of-line implementations, based on the preprocessor flags:
61 // - ESPHOME_THREAD_MULTI_NO_ATOMICS - Runs on multi-threaded platforms without atomics (LibreTiny BK72xx)
62 // - ESPHOME_THREAD_MULTI_ATOMICS - Runs on multi-threaded platforms with atomics (LibreTiny RTL87xx/LN882x, etc.)
63 //
64 // The ESPHOME_THREAD_SINGLE path is inlined in time_64.h.
65 // Make sure all changes are synchronized if you edit this function.
66 //
67 // IMPORTANT: Always pass fresh millis() values to this function. The implementation
68 // handles out-of-order timestamps between threads, but minimizing time differences
69 // helps maintain accuracy.
70
71#if defined(ESPHOME_THREAD_MULTI_NO_ATOMICS)
72 // Without atomics, this implementation uses locks more aggressively:
73 // 1. Always locks when near the rollover boundary (within 10 seconds)
74 // 2. Always locks when detecting a large backwards jump
75 // 3. Updates without lock in normal forward progression (accepting minor races)
76 // This is less efficient but necessary without atomic operations.
77 uint16_t major = millis_major;
78 uint32_t last = last_millis;
79
80 // Define a safe window around the rollover point (10 seconds)
81 // This covers any reasonable scheduler delays or thread preemption
82 static constexpr uint32_t ROLLOVER_WINDOW = 10000; // 10 seconds in milliseconds
83
84 // Check if we're near the rollover boundary (close to std::numeric_limits<uint32_t>::max() or just past 0)
85 bool near_rollover = (last > (std::numeric_limits<uint32_t>::max() - ROLLOVER_WINDOW)) || (now < ROLLOVER_WINDOW);
86
87 if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) {
88 // Near rollover or detected a rollover - need lock for safety
89 LockGuard guard{lock};
90 // Re-read with lock held
91 last = last_millis;
92
93 if (now < last && (last - now) > HALF_MAX_UINT32) {
94 // True rollover detected (happens every ~49.7 days)
95 millis_major++;
96 major++;
97#ifdef ESPHOME_DEBUG_SCHEDULER
98 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
99#endif /* ESPHOME_DEBUG_SCHEDULER */
100 }
101 // Update last_millis while holding lock
102 last_millis = now;
103 } else if (now > last) {
104 // Normal case: Not near rollover and time moved forward
105 // Update without lock. While this may cause minor races (microseconds of
106 // backwards time movement), they're acceptable because:
107 // 1. The scheduler operates at millisecond resolution, not microsecond
108 // 2. We've already prevented the critical rollover race condition
109 // 3. Any backwards movement is orders of magnitude smaller than scheduler delays
110 last_millis = now;
111 }
112 // If now <= last and we're not near rollover, don't update
113 // This minimizes backwards time movement
114
115 // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
116 return now + (static_cast<uint64_t>(major) << 32);
117
118#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
119 // Uses atomic operations with acquire/release semantics to ensure coherent
120 // reads of millis_major and last_millis across cores. Features:
121 // 1. Epoch-coherency retry loop to handle concurrent updates
122 // 2. Lock only taken for actual rollover detection and update
123 // 3. Lock-free CAS updates for normal forward time progression
124 // 4. Memory ordering ensures cores see consistent time values
125
126 for (;;) {
127 uint16_t major = millis_major.load(std::memory_order_acquire);
128
129 /*
130 * Acquire so that if we later decide **not** to take the lock we still
131 * observe a millis_major value coherent with the loaded last_millis.
132 * The acquire load ensures any later read of millis_major sees its
133 * corresponding increment.
134 */
135 uint32_t last = last_millis.load(std::memory_order_acquire);
136
137 // If we might be near a rollover (large backwards jump), take the lock
138 // This ensures rollover detection and last_millis update are atomic together
139 if (now < last && (last - now) > HALF_MAX_UINT32) {
140 // Potential rollover - need lock for atomic rollover detection + update
141 LockGuard guard{lock};
142 // Re-read with lock held; mutex already provides ordering
143 last = last_millis.load(std::memory_order_relaxed);
144
145 if (now < last && (last - now) > HALF_MAX_UINT32) {
146 // True rollover detected (happens every ~49.7 days)
147 millis_major.fetch_add(1, std::memory_order_relaxed);
148 major++;
149#ifdef ESPHOME_DEBUG_SCHEDULER
150 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
151#endif /* ESPHOME_DEBUG_SCHEDULER */
152 }
153 /*
154 * Update last_millis while holding the lock to prevent races.
155 * Publish the new low-word *after* bumping millis_major (done above)
156 * so readers never see a mismatched pair.
157 */
158 last_millis.store(now, std::memory_order_release);
159 } else {
160 // Normal case: Try lock-free update, but only allow forward movement within same epoch
161 // This prevents accidentally moving backwards across a rollover boundary
162 while (now > last && (now - last) < HALF_MAX_UINT32) {
163 if (last_millis.compare_exchange_weak(last, now,
164 std::memory_order_release, // success
165 std::memory_order_relaxed)) { // failure
166 break;
167 }
168 // CAS failure means no data was published; relaxed is fine
169 // last is automatically updated by compare_exchange_weak if it fails
170 }
171 }
172 uint16_t major_end = millis_major.load(std::memory_order_relaxed);
173 if (major_end == major)
174 return now + (static_cast<uint64_t>(major) << 32);
175 }
176 // Unreachable - the loop always returns when major_end == major
177 __builtin_unreachable();
178
179#else
180#error \
181 "No platform threading model defined. One of ESPHOME_THREAD_SINGLE, ESPHOME_THREAD_MULTI_NO_ATOMICS, or ESPHOME_THREAD_MULTI_ATOMICS must be defined."
182#endif
183}
184
185#endif // !ESPHOME_THREAD_SINGLE
186
187} // namespace esphome
188
189#endif // !USE_NATIVE_64BIT_TIME
const char *const TAG
Definition spi.cpp:7
Providing packet encoding functions for exchanging data with a remote host.
Definition a01nyub.cpp:7
static void uint32_t