ESPHome 2025.12.0-dev
Loading...
Searching...
No Matches
scheduler.h
Go to the documentation of this file.
1#pragma once
2
4#include <vector>
5#include <memory>
6#include <cstring>
7#ifdef ESPHOME_THREAD_MULTI_ATOMICS
8#include <atomic>
9#endif
10
13
14namespace esphome {
15
16class Component;
17struct RetryArgs;
18
19// Forward declaration of retry_handler - needs to be non-static for friend declaration
20void retry_handler(const std::shared_ptr<RetryArgs> &args);
21
22class Scheduler {
23 // Allow retry_handler to access protected members for internal retry mechanism
24 friend void ::esphome::retry_handler(const std::shared_ptr<RetryArgs> &args);
25 // Allow DelayAction to call set_timer_common_ with skip_cancel=true for parallel script delays.
26 // This is needed to fix issue #10264 where parallel scripts with delays interfere with each other.
27 // We use friend instead of a public API because skip_cancel is dangerous - it can cause delays
28 // to accumulate and overload the scheduler if misused.
29 template<typename... Ts> friend class DelayAction;
30
31 public:
32 // Public API - accepts std::string for backward compatibility
33 void set_timeout(Component *component, const std::string &name, uint32_t timeout, std::function<void()> func);
34
45 void set_timeout(Component *component, const char *name, uint32_t timeout, std::function<void()> func);
46
47 bool cancel_timeout(Component *component, const std::string &name);
48 bool cancel_timeout(Component *component, const char *name);
49
50 void set_interval(Component *component, const std::string &name, uint32_t interval, std::function<void()> func);
51
62 void set_interval(Component *component, const char *name, uint32_t interval, std::function<void()> func);
63
64 bool cancel_interval(Component *component, const std::string &name);
65 bool cancel_interval(Component *component, const char *name);
66 void set_retry(Component *component, const std::string &name, uint32_t initial_wait_time, uint8_t max_attempts,
67 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
68 void set_retry(Component *component, const char *name, uint32_t initial_wait_time, uint8_t max_attempts,
69 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
70 bool cancel_retry(Component *component, const std::string &name);
71 bool cancel_retry(Component *component, const char *name);
72
73 // Calculate when the next scheduled item should run
74 // @param now Fresh timestamp from millis() - must not be stale/cached
75 // Returns the time in milliseconds until the next scheduled item, or nullopt if no items
76 // This method performs cleanup of removed items before checking the schedule
77 // IMPORTANT: This method should only be called from the main thread (loop task).
78 optional<uint32_t> next_schedule_in(uint32_t now);
79
80 // Execute all scheduled items that are ready
81 // @param now Fresh timestamp from millis() - must not be stale/cached
82 void call(uint32_t now);
83
84 void process_to_add();
85
86 protected:
87 struct SchedulerItem {
88 // Ordered by size to minimize padding
89 Component *component;
90 // Optimized name storage using tagged union
91 union {
92 const char *static_name; // For string literals (no allocation)
93 char *dynamic_name; // For allocated strings
94 } name_;
95 uint32_t interval;
96 // Split time to handle millis() rollover. The scheduler combines the 32-bit millis()
97 // with a 16-bit rollover counter to create a 48-bit time space (using 32+16 bits).
98 // This is intentionally limited to 48 bits, not stored as a full 64-bit value.
99 // With 49.7 days per 32-bit rollover, the 16-bit counter supports
100 // 49.7 days × 65536 = ~8900 years. This ensures correct scheduling
101 // even when devices run for months. Split into two fields for better memory
102 // alignment on 32-bit systems.
103 uint32_t next_execution_low_; // Lower 32 bits of execution time (millis value)
104 std::function<void()> callback;
105 uint16_t next_execution_high_; // Upper 16 bits (millis_major counter)
106
107#ifdef ESPHOME_THREAD_MULTI_ATOMICS
108 // Multi-threaded with atomics: use atomic for lock-free access
109 // Place atomic<bool> separately since it can't be packed with bit fields
110 std::atomic<bool> remove{false};
111
112 // Bit-packed fields (3 bits used, 5 bits padding in 1 byte)
113 enum Type : uint8_t { TIMEOUT, INTERVAL } type : 1;
114 bool name_is_dynamic : 1; // True if name was dynamically allocated (needs delete[])
115 bool is_retry : 1; // True if this is a retry timeout
116 // 5 bits padding
117#else
118 // Single-threaded or multi-threaded without atomics: can pack all fields together
119 // Bit-packed fields (4 bits used, 4 bits padding in 1 byte)
120 enum Type : uint8_t { TIMEOUT, INTERVAL } type : 1;
121 bool remove : 1;
122 bool name_is_dynamic : 1; // True if name was dynamically allocated (needs delete[])
123 bool is_retry : 1; // True if this is a retry timeout
124 // 4 bits padding
125#endif
126
127 // Constructor
128 SchedulerItem()
129 : component(nullptr),
130 interval(0),
131 next_execution_low_(0),
132 next_execution_high_(0),
133#ifdef ESPHOME_THREAD_MULTI_ATOMICS
134 // remove is initialized in the member declaration as std::atomic<bool>{false}
135 type(TIMEOUT),
136 name_is_dynamic(false),
137 is_retry(false) {
138#else
139 type(TIMEOUT),
140 remove(false),
141 name_is_dynamic(false),
142 is_retry(false) {
143#endif
144 name_.static_name = nullptr;
145 }
146
147 // Destructor to clean up dynamic names
148 ~SchedulerItem() { clear_dynamic_name(); }
149
150 // Delete copy operations to prevent accidental copies
151 SchedulerItem(const SchedulerItem &) = delete;
152 SchedulerItem &operator=(const SchedulerItem &) = delete;
153
154 // Delete move operations: SchedulerItem objects are only managed via unique_ptr, never moved directly
155 SchedulerItem(SchedulerItem &&) = delete;
156 SchedulerItem &operator=(SchedulerItem &&) = delete;
157
158 // Helper to get the name regardless of storage type
159 const char *get_name() const { return name_is_dynamic ? name_.dynamic_name : name_.static_name; }
160
161 // Helper to clear dynamic name if allocated
162 void clear_dynamic_name() {
163 if (name_is_dynamic && name_.dynamic_name) {
164 delete[] name_.dynamic_name;
165 name_.dynamic_name = nullptr;
166 name_is_dynamic = false;
167 }
168 }
169
170 // Helper to set name with proper ownership
171 void set_name(const char *name, bool make_copy = false) {
172 // Clean up old dynamic name if any
173 clear_dynamic_name();
174
175 if (!name) {
176 // nullptr case - no name provided
177 name_.static_name = nullptr;
178 } else if (make_copy) {
179 // Make a copy for dynamic strings (including empty strings)
180 size_t len = strlen(name);
181 name_.dynamic_name = new char[len + 1];
182 memcpy(name_.dynamic_name, name, len + 1);
183 name_is_dynamic = true;
184 } else {
185 // Use static string directly (including empty strings)
186 name_.static_name = name;
187 }
188 }
189
190 static bool cmp(const std::unique_ptr<SchedulerItem> &a, const std::unique_ptr<SchedulerItem> &b);
191
192 // Note: We use 48 bits total (32 + 16), stored in a 64-bit value for API compatibility.
193 // The upper 16 bits of the 64-bit value are always zero, which is fine since
194 // millis_major_ is also 16 bits and they must match.
195 constexpr uint64_t get_next_execution() const {
196 return (static_cast<uint64_t>(next_execution_high_) << 32) | next_execution_low_;
197 }
198
199 constexpr void set_next_execution(uint64_t value) {
200 next_execution_low_ = static_cast<uint32_t>(value);
201 // Cast to uint16_t intentionally truncates to lower 16 bits of the upper 32 bits.
202 // This is correct because millis_major_ that creates these values is also 16 bits.
203 next_execution_high_ = static_cast<uint16_t>(value >> 32);
204 }
205 constexpr const char *get_type_str() const { return (type == TIMEOUT) ? "timeout" : "interval"; }
206 const LogString *get_source() const { return component ? component->get_component_log_str() : LOG_STR("unknown"); }
207 };
208
209 // Common implementation for both timeout and interval
210 void set_timer_common_(Component *component, SchedulerItem::Type type, bool is_static_string, const void *name_ptr,
211 uint32_t delay, std::function<void()> func, bool is_retry = false, bool skip_cancel = false);
212
213 // Common implementation for retry
214 void set_retry_common_(Component *component, bool is_static_string, const void *name_ptr, uint32_t initial_wait_time,
215 uint8_t max_attempts, std::function<RetryResult(uint8_t)> func, float backoff_increase_factor);
216
217 uint64_t millis_64_(uint32_t now);
218 // Cleanup logically deleted items from the scheduler
219 // Returns the number of items remaining after cleanup
220 // IMPORTANT: This method should only be called from the main thread (loop task).
221 size_t cleanup_();
222 void pop_raw_();
223
224 private:
225 // Helper to cancel items by name - must be called with lock held
226 bool cancel_item_locked_(Component *component, const char *name, SchedulerItem::Type type, bool match_retry = false);
227
228 // Helper to extract name as const char* from either static string or std::string
229 inline const char *get_name_cstr_(bool is_static_string, const void *name_ptr) {
230 return is_static_string ? static_cast<const char *>(name_ptr) : static_cast<const std::string *>(name_ptr)->c_str();
231 }
232
233 // Common implementation for cancel operations
234 bool cancel_item_(Component *component, bool is_static_string, const void *name_ptr, SchedulerItem::Type type);
235
236 // Helper to check if two scheduler item names match
237 inline bool HOT names_match_(const char *name1, const char *name2) const {
238 // Check pointer equality first (common for static strings), then string contents
239 // The core ESPHome codebase uses static strings (const char*) for component names,
240 // making pointer comparison effective. The std::string overloads exist only for
241 // compatibility with external components but are rarely used in practice.
242 return (name1 != nullptr && name2 != nullptr) && ((name1 == name2) || (strcmp(name1, name2) == 0));
243 }
244
245 // Helper function to check if item matches criteria for cancellation
246 inline bool HOT matches_item_(const std::unique_ptr<SchedulerItem> &item, Component *component, const char *name_cstr,
247 SchedulerItem::Type type, bool match_retry, bool skip_removed = true) const {
248 if (item->component != component || item->type != type || (skip_removed && item->remove) ||
249 (match_retry && !item->is_retry)) {
250 return false;
251 }
252 return this->names_match_(item->get_name(), name_cstr);
253 }
254
255 // Helper to execute a scheduler item
256 uint32_t execute_item_(SchedulerItem *item, uint32_t now);
257
258 // Helper to check if item should be skipped
259 bool should_skip_item_(SchedulerItem *item) const {
260 return is_item_removed_(item) || (item->component != nullptr && item->component->is_failed());
261 }
262
263 // Helper to recycle a SchedulerItem
264 void recycle_item_(std::unique_ptr<SchedulerItem> item);
265
266 // Helper to perform full cleanup when too many items are cancelled
267 void full_cleanup_removed_items_();
268
269#ifdef ESPHOME_DEBUG_SCHEDULER
270 // Helper for debug logging in set_timer_common_ - extracted to reduce code size
271 void debug_log_timer_(const SchedulerItem *item, bool is_static_string, const char *name_cstr,
272 SchedulerItem::Type type, uint32_t delay, uint64_t now);
273#endif /* ESPHOME_DEBUG_SCHEDULER */
274
275#ifndef ESPHOME_THREAD_SINGLE
276 // Helper to process defer queue - inline for performance in hot path
277 inline void process_defer_queue_(uint32_t &now) {
278 // Process defer queue first to guarantee FIFO execution order for deferred items.
279 // Previously, defer() used the heap which gave undefined order for equal timestamps,
280 // causing race conditions on multi-core systems (ESP32, BK7200).
281 // With the defer queue:
282 // - Deferred items (delay=0) go directly to defer_queue_ in set_timer_common_
283 // - Items execute in exact order they were deferred (FIFO guarantee)
284 // - No deferred items exist in to_add_, so processing order doesn't affect correctness
285 // Single-core platforms don't use this queue and fall back to the heap-based approach.
286 //
287 // Note: Items cancelled via cancel_item_locked_() are marked with remove=true but still
288 // processed here. They are skipped during execution by should_skip_item_().
289 // This is intentional - no memory leak occurs.
290 //
291 // We use an index (defer_queue_front_) to track the read position instead of calling
292 // erase() on every pop, which would be O(n). The queue is processed once per loop -
293 // any items added during processing are left for the next loop iteration.
294
295 // Snapshot the queue end point - only process items that existed at loop start
296 // Items added during processing (by callbacks or other threads) run next loop
297 // No lock needed: single consumer (main loop), stale read just means we process less this iteration
298 size_t defer_queue_end = this->defer_queue_.size();
299
300 while (this->defer_queue_front_ < defer_queue_end) {
301 std::unique_ptr<SchedulerItem> item;
302 {
303 LockGuard lock(this->lock_);
304 // SAFETY: Moving out the unique_ptr leaves a nullptr in the vector at defer_queue_front_.
305 // This is intentional and safe because:
306 // 1. The vector is only cleaned up by cleanup_defer_queue_locked_() at the end of this function
307 // 2. Any code iterating defer_queue_ MUST check for nullptr items (see mark_matching_items_removed_
308 // and has_cancelled_timeout_in_container_ in scheduler.h)
309 // 3. The lock protects concurrent access, but the nullptr remains until cleanup
310 item = std::move(this->defer_queue_[this->defer_queue_front_]);
311 this->defer_queue_front_++;
312 }
313
314 // Execute callback without holding lock to prevent deadlocks
315 // if the callback tries to call defer() again
316 if (!this->should_skip_item_(item.get())) {
317 now = this->execute_item_(item.get(), now);
318 }
319 // Recycle the defer item after execution
320 this->recycle_item_(std::move(item));
321 }
322
323 // If we've consumed all items up to the snapshot point, clean up the dead space
324 // Single consumer (main loop), so no lock needed for this check
325 if (this->defer_queue_front_ >= defer_queue_end) {
326 LockGuard lock(this->lock_);
327 this->cleanup_defer_queue_locked_();
328 }
329 }
330
331 // Helper to cleanup defer_queue_ after processing
332 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
333 inline void cleanup_defer_queue_locked_() {
334 // Check if new items were added by producers during processing
335 if (this->defer_queue_front_ >= this->defer_queue_.size()) {
336 // Common case: no new items - clear everything
337 this->defer_queue_.clear();
338 } else {
339 // Rare case: new items were added during processing - compact the vector
340 // This only happens when:
341 // 1. A deferred callback calls defer() again, or
342 // 2. Another thread calls defer() while we're processing
343 //
344 // Move unprocessed items (added during this loop) to the front for next iteration
345 //
346 // SAFETY: Compacted items may include cancelled items (marked for removal via
347 // cancel_item_locked_() during execution). This is safe because should_skip_item_()
348 // checks is_item_removed_() before executing, so cancelled items will be skipped
349 // and recycled on the next loop iteration.
350 size_t remaining = this->defer_queue_.size() - this->defer_queue_front_;
351 for (size_t i = 0; i < remaining; i++) {
352 this->defer_queue_[i] = std::move(this->defer_queue_[this->defer_queue_front_ + i]);
353 }
354 this->defer_queue_.resize(remaining);
355 }
356 this->defer_queue_front_ = 0;
357 }
358#endif /* not ESPHOME_THREAD_SINGLE */
359
360 // Helper to check if item is marked for removal (platform-specific)
361 // Returns true if item should be skipped, handles platform-specific synchronization
362 // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
363 // function.
364 bool is_item_removed_(SchedulerItem *item) const {
365#ifdef ESPHOME_THREAD_MULTI_ATOMICS
366 // Multi-threaded with atomics: use atomic load for lock-free access
367 return item->remove.load(std::memory_order_acquire);
368#else
369 // Single-threaded (ESPHOME_THREAD_SINGLE) or
370 // multi-threaded without atomics (ESPHOME_THREAD_MULTI_NO_ATOMICS): direct read
371 // For ESPHOME_THREAD_MULTI_NO_ATOMICS, caller MUST hold lock!
372 return item->remove;
373#endif
374 }
375
376 // Helper to set item removal flag (platform-specific)
377 // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
378 // function. Uses memory_order_release when setting to true (for cancellation synchronization),
379 // and memory_order_relaxed when setting to false (for initialization).
380 void set_item_removed_(SchedulerItem *item, bool removed) {
381#ifdef ESPHOME_THREAD_MULTI_ATOMICS
382 // Multi-threaded with atomics: use atomic store with appropriate ordering
383 // Release ordering when setting to true ensures cancellation is visible to other threads
384 // Relaxed ordering when setting to false is sufficient for initialization
385 item->remove.store(removed, removed ? std::memory_order_release : std::memory_order_relaxed);
386#else
387 // Single-threaded (ESPHOME_THREAD_SINGLE) or
388 // multi-threaded without atomics (ESPHOME_THREAD_MULTI_NO_ATOMICS): direct write
389 // For ESPHOME_THREAD_MULTI_NO_ATOMICS, caller MUST hold lock!
390 item->remove = removed;
391#endif
392 }
393
394 // Helper to mark matching items in a container as removed
395 // Returns the number of items marked for removal
396 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
397 template<typename Container>
398 size_t mark_matching_items_removed_(Container &container, Component *component, const char *name_cstr,
399 SchedulerItem::Type type, bool match_retry) {
400 size_t count = 0;
401 for (auto &item : container) {
402 // Skip nullptr items (can happen in defer_queue_ when items are being processed)
403 // The defer_queue_ uses index-based processing: items are std::moved out but left in the
404 // vector as nullptr until cleanup. Even though this function is called with lock held,
405 // the vector can still contain nullptr items from the processing loop. This check prevents crashes.
406 if (!item)
407 continue;
408 if (this->matches_item_(item, component, name_cstr, type, match_retry)) {
409 // Mark item for removal (platform-specific)
410 this->set_item_removed_(item.get(), true);
411 count++;
412 }
413 }
414 return count;
415 }
416
417 // Template helper to check if any item in a container matches our criteria
418 template<typename Container>
419 bool has_cancelled_timeout_in_container_(const Container &container, Component *component, const char *name_cstr,
420 bool match_retry) const {
421 for (const auto &item : container) {
422 // Skip nullptr items (can happen in defer_queue_ when items are being processed)
423 // The defer_queue_ uses index-based processing: items are std::moved out but left in the
424 // vector as nullptr until cleanup. If this function is called during defer queue processing,
425 // it will iterate over these nullptr items. This check prevents crashes.
426 if (!item)
427 continue;
428 if (is_item_removed_(item.get()) &&
429 this->matches_item_(item, component, name_cstr, SchedulerItem::TIMEOUT, match_retry,
430 /* skip_removed= */ false)) {
431 return true;
432 }
433 }
434 return false;
435 }
436
437 Mutex lock_;
438 std::vector<std::unique_ptr<SchedulerItem>> items_;
439 std::vector<std::unique_ptr<SchedulerItem>> to_add_;
440#ifndef ESPHOME_THREAD_SINGLE
441 // Single-core platforms don't need the defer queue and save ~32 bytes of RAM
442 // Using std::vector instead of std::deque avoids 512-byte chunked allocations
443 // Index tracking avoids O(n) erase() calls when draining the queue each loop
444 std::vector<std::unique_ptr<SchedulerItem>> defer_queue_; // FIFO queue for defer() calls
445 size_t defer_queue_front_{0}; // Index of first valid item in defer_queue_ (tracks consumed items)
446#endif /* ESPHOME_THREAD_SINGLE */
447 uint32_t to_remove_{0};
448
449 // Memory pool for recycling SchedulerItem objects to reduce heap churn.
450 // Design decisions:
451 // - std::vector is used instead of a fixed array because many systems only need 1-2 scheduler items
452 // - The vector grows dynamically up to MAX_POOL_SIZE (5) only when needed, saving memory on simple setups
453 // - Pool size of 5 matches typical usage (2-4 timers) while keeping memory overhead low (~250 bytes on ESP32)
454 // - The pool significantly reduces heap fragmentation which is critical because heap allocation/deallocation
455 // can stall the entire system, causing timing issues and dropped events for any components that need
456 // to synchronize between tasks (see https://github.com/esphome/backlog/issues/52)
457 std::vector<std::unique_ptr<SchedulerItem>> scheduler_item_pool_;
458
459#ifdef ESPHOME_THREAD_MULTI_ATOMICS
460 /*
461 * Multi-threaded platforms with atomic support: last_millis_ needs atomic for lock-free updates
462 *
463 * MEMORY-ORDERING NOTE
464 * --------------------
465 * `last_millis_` and `millis_major_` form a single 64-bit timestamp split in half.
466 * Writers publish `last_millis_` with memory_order_release and readers use
467 * memory_order_acquire. This ensures that once a reader sees the new low word,
468 * it also observes the corresponding increment of `millis_major_`.
469 */
470 std::atomic<uint32_t> last_millis_{0};
471#else /* not ESPHOME_THREAD_MULTI_ATOMICS */
472 // Platforms without atomic support or single-threaded platforms
473 uint32_t last_millis_{0};
474#endif /* else ESPHOME_THREAD_MULTI_ATOMICS */
475
476 /*
477 * Upper 16 bits of the 64-bit millis counter. Incremented only while holding
478 * `lock_`; read concurrently. Atomic (relaxed) avoids a formal data race.
479 * Ordering relative to `last_millis_` is provided by its release store and the
480 * corresponding acquire loads.
481 */
482#ifdef ESPHOME_THREAD_MULTI_ATOMICS
483 std::atomic<uint16_t> millis_major_{0};
484#else /* not ESPHOME_THREAD_MULTI_ATOMICS */
485 uint16_t millis_major_{0};
486#endif /* else ESPHOME_THREAD_MULTI_ATOMICS */
487};
488
489} // namespace esphome
const Component * component
Definition component.cpp:37
uint16_t type
Providing packet encoding functions for exchanging data with a remote host.
Definition a01nyub.cpp:7
void retry_handler(const std::shared_ptr< RetryArgs > &args)
uint32_t len