ESPHome 2026.3.0-dev
Loading...
Searching...
No Matches
time_64.cpp
Go to the documentation of this file.
2
3#ifndef USE_NATIVE_64BIT_TIME
4
5#include "time_64.h"
6
8#ifdef ESPHOME_DEBUG_SCHEDULER
9#include "esphome/core/log.h"
10#include <cinttypes>
11#endif
12#ifdef ESPHOME_THREAD_MULTI_ATOMICS
13#include <atomic>
14#endif
15#include <limits>
16
17namespace esphome {
18
19#ifdef ESPHOME_DEBUG_SCHEDULER
20static const char *const TAG = "time_64";
21#endif
22
23uint64_t Millis64Impl::compute(uint32_t now) {
24 // Half the 32-bit range - used to detect rollovers vs normal time progression
25 static constexpr uint32_t HALF_MAX_UINT32 = std::numeric_limits<uint32_t>::max() / 2;
26
27 // State variables for rollover tracking - static to persist across calls
28#ifdef ESPHOME_THREAD_MULTI_ATOMICS
29 // Mutex for rollover serialization (taken only every ~49.7 days).
30 // A spinlock would be smaller (~1 byte vs ~80-100 bytes) but is unsafe on
31 // preemptive single-core RTOS platforms due to priority inversion: a high-priority
32 // task spinning would prevent the lock holder from running to release it.
33 static Mutex lock;
34 /*
35 * Multi-threaded platforms with atomic support: last_millis needs atomic for lock-free updates.
36 * Writers publish last_millis with memory_order_release and readers use memory_order_acquire.
37 * This ensures that once a reader sees the new low word, it also observes the corresponding
38 * increment of millis_major.
39 */
40 static std::atomic<uint32_t> last_millis{0};
41 /*
42 * Upper 16 bits of the 64-bit millis counter. Incremented only while holding lock;
43 * read concurrently. Atomic (relaxed) avoids a formal data race. Ordering relative
44 * to last_millis is provided by its release store and the corresponding acquire loads.
45 */
46 static std::atomic<uint16_t> millis_major{0};
47#elif !defined(ESPHOME_THREAD_SINGLE) /* ESPHOME_THREAD_MULTI_NO_ATOMICS */
48 static Mutex lock;
49 static uint32_t last_millis{0};
50 static uint16_t millis_major{0};
51#else /* ESPHOME_THREAD_SINGLE */
52 static uint32_t last_millis{0};
53 static uint16_t millis_major{0};
54#endif
55
56 // THREAD SAFETY NOTE:
57 // This function has three implementations, based on the precompiler flags
58 // - ESPHOME_THREAD_SINGLE - Runs on single-threaded platforms (ESP8266, etc.)
59 // - ESPHOME_THREAD_MULTI_NO_ATOMICS - Runs on multi-threaded platforms without atomics (LibreTiny BK72xx)
60 // - ESPHOME_THREAD_MULTI_ATOMICS - Runs on multi-threaded platforms with atomics (LibreTiny RTL87xx/LN882x, etc.)
61 //
62 // Make sure all changes are synchronized if you edit this function.
63 //
64 // IMPORTANT: Always pass fresh millis() values to this function. The implementation
65 // handles out-of-order timestamps between threads, but minimizing time differences
66 // helps maintain accuracy.
67
68#ifdef ESPHOME_THREAD_SINGLE
69 // Single-core platforms have no concurrency, so this is a simple implementation
70 // that just tracks 32-bit rollover (every 49.7 days) without any locking or atomics.
71
72 uint16_t major = millis_major;
73 uint32_t last = last_millis;
74
75 // Check for rollover
76 if (now < last && (last - now) > HALF_MAX_UINT32) {
77 millis_major++;
78 major++;
79 last_millis = now;
80#ifdef ESPHOME_DEBUG_SCHEDULER
81 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
82#endif /* ESPHOME_DEBUG_SCHEDULER */
83 } else if (now > last) {
84 // Only update if time moved forward
85 last_millis = now;
86 }
87
88 // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
89 return now + (static_cast<uint64_t>(major) << 32);
90
91#elif defined(ESPHOME_THREAD_MULTI_NO_ATOMICS)
92 // Without atomics, this implementation uses locks more aggressively:
93 // 1. Always locks when near the rollover boundary (within 10 seconds)
94 // 2. Always locks when detecting a large backwards jump
95 // 3. Updates without lock in normal forward progression (accepting minor races)
96 // This is less efficient but necessary without atomic operations.
97 uint16_t major = millis_major;
98 uint32_t last = last_millis;
99
100 // Define a safe window around the rollover point (10 seconds)
101 // This covers any reasonable scheduler delays or thread preemption
102 static constexpr uint32_t ROLLOVER_WINDOW = 10000; // 10 seconds in milliseconds
103
104 // Check if we're near the rollover boundary (close to std::numeric_limits<uint32_t>::max() or just past 0)
105 bool near_rollover = (last > (std::numeric_limits<uint32_t>::max() - ROLLOVER_WINDOW)) || (now < ROLLOVER_WINDOW);
106
107 if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) {
108 // Near rollover or detected a rollover - need lock for safety
109 LockGuard guard{lock};
110 // Re-read with lock held
111 last = last_millis;
112
113 if (now < last && (last - now) > HALF_MAX_UINT32) {
114 // True rollover detected (happens every ~49.7 days)
115 millis_major++;
116 major++;
117#ifdef ESPHOME_DEBUG_SCHEDULER
118 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
119#endif /* ESPHOME_DEBUG_SCHEDULER */
120 }
121 // Update last_millis while holding lock
122 last_millis = now;
123 } else if (now > last) {
124 // Normal case: Not near rollover and time moved forward
125 // Update without lock. While this may cause minor races (microseconds of
126 // backwards time movement), they're acceptable because:
127 // 1. The scheduler operates at millisecond resolution, not microsecond
128 // 2. We've already prevented the critical rollover race condition
129 // 3. Any backwards movement is orders of magnitude smaller than scheduler delays
130 last_millis = now;
131 }
132 // If now <= last and we're not near rollover, don't update
133 // This minimizes backwards time movement
134
135 // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
136 return now + (static_cast<uint64_t>(major) << 32);
137
138#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
139 // Uses atomic operations with acquire/release semantics to ensure coherent
140 // reads of millis_major and last_millis across cores. Features:
141 // 1. Epoch-coherency retry loop to handle concurrent updates
142 // 2. Lock only taken for actual rollover detection and update
143 // 3. Lock-free CAS updates for normal forward time progression
144 // 4. Memory ordering ensures cores see consistent time values
145
146 for (;;) {
147 uint16_t major = millis_major.load(std::memory_order_acquire);
148
149 /*
150 * Acquire so that if we later decide **not** to take the lock we still
151 * observe a millis_major value coherent with the loaded last_millis.
152 * The acquire load ensures any later read of millis_major sees its
153 * corresponding increment.
154 */
155 uint32_t last = last_millis.load(std::memory_order_acquire);
156
157 // If we might be near a rollover (large backwards jump), take the lock
158 // This ensures rollover detection and last_millis update are atomic together
159 if (now < last && (last - now) > HALF_MAX_UINT32) {
160 // Potential rollover - need lock for atomic rollover detection + update
161 LockGuard guard{lock};
162 // Re-read with lock held; mutex already provides ordering
163 last = last_millis.load(std::memory_order_relaxed);
164
165 if (now < last && (last - now) > HALF_MAX_UINT32) {
166 // True rollover detected (happens every ~49.7 days)
167 millis_major.fetch_add(1, std::memory_order_relaxed);
168 major++;
169#ifdef ESPHOME_DEBUG_SCHEDULER
170 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
171#endif /* ESPHOME_DEBUG_SCHEDULER */
172 }
173 /*
174 * Update last_millis while holding the lock to prevent races.
175 * Publish the new low-word *after* bumping millis_major (done above)
176 * so readers never see a mismatched pair.
177 */
178 last_millis.store(now, std::memory_order_release);
179 } else {
180 // Normal case: Try lock-free update, but only allow forward movement within same epoch
181 // This prevents accidentally moving backwards across a rollover boundary
182 while (now > last && (now - last) < HALF_MAX_UINT32) {
183 if (last_millis.compare_exchange_weak(last, now,
184 std::memory_order_release, // success
185 std::memory_order_relaxed)) { // failure
186 break;
187 }
188 // CAS failure means no data was published; relaxed is fine
189 // last is automatically updated by compare_exchange_weak if it fails
190 }
191 }
192 uint16_t major_end = millis_major.load(std::memory_order_relaxed);
193 if (major_end == major)
194 return now + (static_cast<uint64_t>(major) << 32);
195 }
196 // Unreachable - the loop always returns when major_end == major
197 __builtin_unreachable();
198
199#else
200#error \
201 "No platform threading model defined. One of ESPHOME_THREAD_SINGLE, ESPHOME_THREAD_MULTI_NO_ATOMICS, or ESPHOME_THREAD_MULTI_ATOMICS must be defined."
202#endif
203}
204
205} // namespace esphome
206
207#endif // !USE_NATIVE_64BIT_TIME
const char *const TAG
Definition spi.cpp:7
Providing packet encoding functions for exchanging data with a remote host.
Definition a01nyub.cpp:7