ESPHome 2026.5.0-dev
Loading...
Searching...
No Matches
scheduler.h
Go to the documentation of this file.
1#pragma once
2
4#include <cstring>
5#include <string>
6#include <vector>
7#ifdef ESPHOME_THREAD_MULTI_ATOMICS
8#include <atomic>
9#endif
10
12#include "esphome/core/hal.h"
15
16namespace esphome {
17
18class Component;
19struct RetryArgs;
20
21// Forward declaration of retry_handler - needs to be non-static for friend declaration
22void retry_handler(const std::shared_ptr<RetryArgs> &args);
23
24class Scheduler {
25 // Allow retry_handler to access protected members for internal retry mechanism
26 friend void ::esphome::retry_handler(const std::shared_ptr<RetryArgs> &args);
27 // Allow DelayAction to call set_timer_common_ with skip_cancel=true for parallel script delays.
28 // This is needed to fix issue #10264 where parallel scripts with delays interfere with each other.
29 // We use friend instead of a public API because skip_cancel is dangerous - it can cause delays
30 // to accumulate and overload the scheduler if misused.
31 template<typename... Ts> friend class DelayAction;
32
33 public:
34 // std::string overload - deprecated, use const char* or uint32_t instead
35 // Remove before 2026.7.0
36 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
37 void set_timeout(Component *component, const std::string &name, uint32_t timeout, std::function<void()> &&func);
38
47 void set_timeout(Component *component, const char *name, uint32_t timeout, std::function<void()> &&func);
49 void set_timeout(Component *component, uint32_t id, uint32_t timeout, std::function<void()> &&func);
51 void set_timeout(Component *component, InternalSchedulerID id, uint32_t timeout, std::function<void()> &&func) {
52 this->set_timer_common_(component, SchedulerItem::TIMEOUT, NameType::NUMERIC_ID_INTERNAL, nullptr,
53 static_cast<uint32_t>(id), timeout, std::move(func));
54 }
55
56 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
57 bool cancel_timeout(Component *component, const std::string &name);
58 bool cancel_timeout(Component *component, const char *name);
59 bool cancel_timeout(Component *component, uint32_t id);
60 bool cancel_timeout(Component *component, InternalSchedulerID id) {
61 return this->cancel_item_(component, NameType::NUMERIC_ID_INTERNAL, nullptr, static_cast<uint32_t>(id),
62 SchedulerItem::TIMEOUT);
63 }
64
65 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
66 void set_interval(Component *component, const std::string &name, uint32_t interval, std::function<void()> &&func);
67
76 void set_interval(Component *component, const char *name, uint32_t interval, std::function<void()> &&func);
78 void set_interval(Component *component, uint32_t id, uint32_t interval, std::function<void()> &&func);
80 void set_interval(Component *component, InternalSchedulerID id, uint32_t interval, std::function<void()> &&func) {
81 this->set_timer_common_(component, SchedulerItem::INTERVAL, NameType::NUMERIC_ID_INTERNAL, nullptr,
82 static_cast<uint32_t>(id), interval, std::move(func));
83 }
84
85 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
86 bool cancel_interval(Component *component, const std::string &name);
87 bool cancel_interval(Component *component, const char *name);
88 bool cancel_interval(Component *component, uint32_t id);
89 bool cancel_interval(Component *component, InternalSchedulerID id) {
90 return this->cancel_item_(component, NameType::NUMERIC_ID_INTERNAL, nullptr, static_cast<uint32_t>(id),
91 SchedulerItem::INTERVAL);
92 }
93
94 // Remove before 2026.8.0
95 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
96 "2026.2.0")
97 void set_retry(Component *component, const std::string &name, uint32_t initial_wait_time, uint8_t max_attempts,
98 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
99 // Remove before 2026.8.0
100 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
101 "2026.2.0")
102 void set_retry(Component *component, const char *name, uint32_t initial_wait_time, uint8_t max_attempts,
103 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
104 // Remove before 2026.8.0
105 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
106 "2026.2.0")
107 void set_retry(Component *component, uint32_t id, uint32_t initial_wait_time, uint8_t max_attempts,
108 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
109
110 // Remove before 2026.8.0
111 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
112 bool cancel_retry(Component *component, const std::string &name);
113 // Remove before 2026.8.0
114 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
115 bool cancel_retry(Component *component, const char *name);
116 // Remove before 2026.8.0
117 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
118 bool cancel_retry(Component *component, uint32_t id);
119
121 uint64_t millis_64() { return esphome::millis_64(); }
122
123 // Calculate when the next scheduled item should run.
124 // @param now On ESP32, unused for 64-bit extension (native); on other platforms, extended to 64-bit via rollover.
125 // Returns the time in milliseconds until the next scheduled item, or nullopt if no items.
126 // This method performs cleanup of removed items before checking the schedule.
127 // IMPORTANT: This method should only be called from the main thread (loop task).
128 optional<uint32_t> next_schedule_in(uint32_t now);
129
130 // Execute all scheduled items that are ready
131 // @param now Fresh timestamp from millis() - must not be stale/cached
132 void call(uint32_t now);
133
134 // Move items from to_add_ into the main heap.
135 // IMPORTANT: This method should only be called from the main thread (loop task).
136 // Inlined: the fast path (nothing to add) is just an atomic load / empty check.
137 // The lock-free fast path uses to_add_count_ (atomic) or to_add_.empty()
138 // (single-threaded). This is safe because the main loop is the only thread
139 // that reads to_add_ without holding lock_; other threads may read it only
140 // while holding the mutex (e.g. cancel_item_locked_).
141 inline void ESPHOME_ALWAYS_INLINE HOT process_to_add() {
142 if (this->to_add_empty_())
143 return;
144 this->process_to_add_slow_path_();
145 }
146
147 // Name storage type discriminator for SchedulerItem
148 // Used to distinguish between static strings, hashed strings, numeric IDs, and internal numeric IDs
149 enum class NameType : uint8_t {
150 STATIC_STRING = 0, // const char* pointer to static/flash storage
151 HASHED_STRING = 1, // uint32_t FNV-1a hash of a runtime string
152 NUMERIC_ID = 2, // uint32_t numeric identifier (component-level)
153 NUMERIC_ID_INTERNAL = 3 // uint32_t numeric identifier (core/internal, separate namespace)
154 };
155
156 protected:
157 struct SchedulerItem {
158 // Ordered by size to minimize padding
159 Component *component;
160 // Optimized name storage using tagged union - zero heap allocation
161 union {
162 const char *static_name; // For STATIC_STRING (string literals, no allocation)
163 uint32_t hash_or_id; // For HASHED_STRING or NUMERIC_ID
164 } name_;
165 uint32_t interval;
166 // Split time to handle millis() rollover. The scheduler combines the 32-bit millis()
167 // with a 16-bit rollover counter to create a 48-bit time space (using 32+16 bits).
168 // This is intentionally limited to 48 bits, not stored as a full 64-bit value.
169 // With 49.7 days per 32-bit rollover, the 16-bit counter supports
170 // 49.7 days × 65536 = ~8900 years. This ensures correct scheduling
171 // even when devices run for months. Split into two fields for better memory
172 // alignment on 32-bit systems.
173 uint32_t next_execution_low_; // Lower 32 bits of execution time (millis value)
174 std::function<void()> callback;
175 uint16_t next_execution_high_; // Upper 16 bits (millis_major counter)
176
177#ifdef ESPHOME_THREAD_MULTI_ATOMICS
178 // Multi-threaded with atomics: use atomic uint8_t for lock-free access.
179 // std::atomic<bool> is not used because GCC on Xtensa generates an indirect
180 // function call for std::atomic<bool>::load() instead of inlining it.
181 // std::atomic<uint8_t> inlines correctly on all platforms.
182 std::atomic<uint8_t> remove{0};
183
184 // Bit-packed fields (4 bits used, 4 bits padding in 1 byte)
185 enum Type : uint8_t { TIMEOUT, INTERVAL } type : 1;
186 NameType name_type_ : 2; // Discriminator for name_ union (0–3, see NameType enum)
187 bool is_retry : 1; // True if this is a retry timeout
188 // 4 bits padding
189#else
190 // Single-threaded or multi-threaded without atomics: can pack all fields together
191 // Bit-packed fields (5 bits used, 3 bits padding in 1 byte)
192 enum Type : uint8_t { TIMEOUT, INTERVAL } type : 1;
193 bool remove : 1;
194 NameType name_type_ : 2; // Discriminator for name_ union (0–3, see NameType enum)
195 bool is_retry : 1; // True if this is a retry timeout
196 // 3 bits padding
197#endif
198
199 // Constructor
200 SchedulerItem()
201 : component(nullptr),
202 interval(0),
203 next_execution_low_(0),
204 next_execution_high_(0),
205#ifdef ESPHOME_THREAD_MULTI_ATOMICS
206 // remove is initialized in the member declaration
207 type(TIMEOUT),
208 name_type_(NameType::STATIC_STRING),
209 is_retry(false) {
210#else
211 type(TIMEOUT),
212 remove(false),
213 name_type_(NameType::STATIC_STRING),
214 is_retry(false) {
215#endif
216 name_.static_name = nullptr;
217 }
218
219 // Destructor - no dynamic memory to clean up (callback's std::function handles its own)
220 ~SchedulerItem() = default;
221
222 // Delete copy operations to prevent accidental copies
223 SchedulerItem(const SchedulerItem &) = delete;
224 SchedulerItem &operator=(const SchedulerItem &) = delete;
225
226 // Delete move operations: SchedulerItem objects are managed via raw pointers, never moved directly
227 SchedulerItem(SchedulerItem &&) = delete;
228 SchedulerItem &operator=(SchedulerItem &&) = delete;
229
230 // Helper to get the static name (only valid for STATIC_STRING type)
231 const char *get_name() const { return (name_type_ == NameType::STATIC_STRING) ? name_.static_name : nullptr; }
232
233 // Helper to get the hash or numeric ID (only valid for HASHED_STRING or NUMERIC_ID types)
234 uint32_t get_name_hash_or_id() const { return (name_type_ != NameType::STATIC_STRING) ? name_.hash_or_id : 0; }
235
236 // Helper to get the name type
237 NameType get_name_type() const { return name_type_; }
238
239 // Set name storage: for STATIC_STRING stores the pointer, for all other types stores hash_or_id.
240 // Both union members occupy the same offset, so only one store is needed.
241 void set_name(NameType type, const char *static_name, uint32_t hash_or_id) {
242 if (type == NameType::STATIC_STRING) {
243 name_.static_name = static_name;
244 } else {
245 name_.hash_or_id = hash_or_id;
246 }
247 name_type_ = type;
248 }
249
250 static bool cmp(SchedulerItem *a, SchedulerItem *b);
251
252 // Note: We use 48 bits total (32 + 16), stored in a 64-bit value for API compatibility.
253 // The upper 16 bits of the 64-bit value are always zero, which is fine since
254 // millis_major_ is also 16 bits and they must match.
255 constexpr uint64_t get_next_execution() const {
256 return (static_cast<uint64_t>(next_execution_high_) << 32) | next_execution_low_;
257 }
258
259 constexpr void set_next_execution(uint64_t value) {
260 next_execution_low_ = static_cast<uint32_t>(value);
261 // Cast to uint16_t intentionally truncates to lower 16 bits of the upper 32 bits.
262 // This is correct because millis_major_ that creates these values is also 16 bits.
263 next_execution_high_ = static_cast<uint16_t>(value >> 32);
264 }
265 constexpr const char *get_type_str() const { return (type == TIMEOUT) ? "timeout" : "interval"; }
266 const LogString *get_source() const { return component ? component->get_component_log_str() : LOG_STR("unknown"); }
267 };
268
269 // Common implementation for both timeout and interval
270 // name_type determines storage type: STATIC_STRING uses static_name, others use hash_or_id
271 void set_timer_common_(Component *component, SchedulerItem::Type type, NameType name_type, const char *static_name,
272 uint32_t hash_or_id, uint32_t delay, std::function<void()> &&func, bool is_retry = false,
273 bool skip_cancel = false);
274
275 // Common implementation for retry - Remove before 2026.8.0
276 // name_type determines storage type: STATIC_STRING uses static_name, others use hash_or_id
277#pragma GCC diagnostic push
278#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
279 void set_retry_common_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
280 uint32_t initial_wait_time, uint8_t max_attempts, std::function<RetryResult(uint8_t)> func,
281 float backoff_increase_factor);
282#pragma GCC diagnostic pop
283 // Common implementation for cancel_retry
284 bool cancel_retry_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id);
285
286 // Extend a 32-bit millis() value to 64-bit. Use when the caller already has a fresh now.
287 // On platforms with native 64-bit time, ignores now and uses millis_64() directly.
288 // On other platforms, extends now to 64-bit using rollover tracking.
289 uint64_t ESPHOME_ALWAYS_INLINE millis_64_from_(uint32_t now) {
290#ifdef USE_NATIVE_64BIT_TIME
291 (void) now;
292 return millis_64();
293#else
294 return Millis64Impl::compute(now);
295#endif
296 }
297 // Cleanup logically deleted items from the scheduler
298 // Returns true if items remain after cleanup
299 // IMPORTANT: This method should only be called from the main thread (loop task).
300 // Inlined: the fast path (nothing to remove) is just an atomic load + empty check.
301 // Reading items_.empty() without the lock is safe here because only the main
302 // loop thread structurally modifies items_ (push/pop/erase). Other threads may
303 // iterate items_ and mark items removed under lock_, but never change the
304 // vector's size or data pointer.
305 inline bool ESPHOME_ALWAYS_INLINE HOT cleanup_() {
306 if (this->to_remove_empty_())
307 return !this->items_.empty();
308 return this->cleanup_slow_path_();
309 }
310 // Slow path for cleanup_() when there are items to remove - defined in scheduler.cpp
311 bool cleanup_slow_path_();
312 // Slow path for process_to_add() when there are items to merge - defined in scheduler.cpp
313 void process_to_add_slow_path_();
314 // Remove and return the front item from the heap as a raw pointer.
315 // Caller takes ownership and must either recycle or delete the item.
316 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
317 SchedulerItem *pop_raw_locked_();
318 // Get or create a scheduler item from the pool
319 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
320 SchedulerItem *get_item_from_pool_locked_();
321
322 private:
323 // Helper to cancel matching items - must be called with lock held.
324 // When find_first=true, stops after the first match (used by set_timer_common_ where
325 // the cancel-before-add invariant guarantees at most one match).
326 // When find_first=false (default), cancels ALL matches (needed for DelayAction parallel
327 // mode where skip_cancel=true allows multiple items with the same key).
328 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
329 bool cancel_item_locked_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
330 SchedulerItem::Type type, bool match_retry = false, bool find_first = false);
331
332 // Common implementation for cancel operations - handles locking
333 bool cancel_item_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
334 SchedulerItem::Type type, bool match_retry = false);
335
336 // Helper to check if two static string names match
337 inline bool HOT names_match_static_(const char *name1, const char *name2) const {
338 // Check pointer equality first (common for static strings), then string contents
339 // The core ESPHome codebase uses static strings (const char*) for component names,
340 // making pointer comparison effective. The std::string overloads exist only for
341 // compatibility with external components but are rarely used in practice.
342 return (name1 != nullptr && name2 != nullptr) && ((name1 == name2) || (strcmp(name1, name2) == 0));
343 }
344
345 // Helper function to check if item matches criteria for cancellation
346 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
347 // IMPORTANT: Must be called with scheduler lock held
348 inline bool HOT matches_item_locked_(SchedulerItem *item, Component *component, NameType name_type,
349 const char *static_name, uint32_t hash_or_id, SchedulerItem::Type type,
350 bool match_retry, bool skip_removed = true) const {
351 // THREAD SAFETY: Check for nullptr first to prevent LoadProhibited crashes. On multi-threaded
352 // platforms, items can be nulled in defer_queue_ during processing.
353 // Fixes: https://github.com/esphome/esphome/issues/11940
354 if (item == nullptr)
355 return false;
356 if (item->component != component || item->type != type || (skip_removed && this->is_item_removed_locked_(item)) ||
357 (match_retry && !item->is_retry)) {
358 return false;
359 }
360 // Name type must match
361 if (item->get_name_type() != name_type)
362 return false;
363 // For static strings, compare the string content; for hash/ID, compare the value
364 if (name_type == NameType::STATIC_STRING) {
365 return this->names_match_static_(item->get_name(), static_name);
366 }
367 return item->get_name_hash_or_id() == hash_or_id;
368 }
369
370 // Helper to execute a scheduler item
371 uint32_t execute_item_(SchedulerItem *item, uint32_t now);
372
373 // Helper to check if item should be skipped
374 bool should_skip_item_(SchedulerItem *item) const {
375 return is_item_removed_(item) || (item->component != nullptr && item->component->is_failed());
376 }
377
378 // Helper to recycle a SchedulerItem back to the pool.
379 // Takes a raw pointer — caller transfers ownership. The item is either added to the
380 // pool or deleted if the pool is full.
381 // IMPORTANT: Only call from main loop context! Recycling clears the callback,
382 // so calling from another thread while the callback is executing causes use-after-free.
383 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
384 void recycle_item_main_loop_(SchedulerItem *item);
385
386 // Helper to perform full cleanup when too many items are cancelled
387 void full_cleanup_removed_items_();
388
389 // Helper to calculate random offset for interval timers - extracted to reduce code size of set_timer_common_
390 // IMPORTANT: Must not be inlined - called only for intervals, keeping it out of the hot path saves flash.
391 uint32_t __attribute__((noinline)) calculate_interval_offset_(uint32_t delay);
392
393 // Helper to check if a retry was already cancelled - extracted to reduce code size of set_timer_common_
394 // Remove before 2026.8.0 along with all retry code.
395 // IMPORTANT: Must not be inlined - retry path is cold and deprecated.
396 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
397 bool __attribute__((noinline))
398 is_retry_cancelled_locked_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id);
399
400#ifdef ESPHOME_DEBUG_SCHEDULER
401 // Helper for debug logging in set_timer_common_ - extracted to reduce code size
402 void debug_log_timer_(const SchedulerItem *item, NameType name_type, const char *static_name, uint32_t hash_or_id,
403 SchedulerItem::Type type, uint32_t delay, uint64_t now);
404#endif /* ESPHOME_DEBUG_SCHEDULER */
405
406#ifndef ESPHOME_THREAD_SINGLE
407 // Process defer queue for FIFO execution of deferred items.
408 // IMPORTANT: This method should only be called from the main thread (loop task).
409 // Inlined: the fast path (nothing deferred) is just an atomic load check.
410 inline void ESPHOME_ALWAYS_INLINE HOT process_defer_queue_(uint32_t &now) {
411 // Fast path: nothing to process, avoid lock entirely.
412 // Worst case is a one-loop-iteration delay before newly deferred items are processed.
413 if (this->defer_empty_())
414 return;
415 this->process_defer_queue_slow_path_(now);
416 }
417
418 // Slow path for process_defer_queue_() - defined in scheduler.cpp
419 void process_defer_queue_slow_path_(uint32_t &now);
420
421 // Helper to cleanup defer_queue_ after processing.
422 // Keeps the common clear() path inline, outlines the rare compaction to keep
423 // cold code out of the hot instruction cache lines.
424 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
425 inline void cleanup_defer_queue_locked_() {
426 // Check if new items were added by producers during processing
427 if (this->defer_queue_front_ >= this->defer_queue_.size()) {
428 // Common case: no new items - clear everything
429 this->defer_queue_.clear();
430 } else {
431 // Rare case: new items were added during processing - outlined to keep cold code
432 // out of the hot instruction cache lines
433 this->compact_defer_queue_locked_();
434 }
435 this->defer_queue_front_ = 0;
436 }
437
438 // Cold path for compacting defer_queue_ when new items were added during processing.
439 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
440 // IMPORTANT: Must not be inlined - rare path, outlined to keep it out of the hot instruction cache lines.
441 void __attribute__((noinline)) compact_defer_queue_locked_();
442#endif /* not ESPHOME_THREAD_SINGLE */
443
444 // Helper to check if item is marked for removal (platform-specific)
445 // Returns true if item should be skipped, handles platform-specific synchronization
446 // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
447 // function.
448 bool is_item_removed_(SchedulerItem *item) const {
449#ifdef ESPHOME_THREAD_MULTI_ATOMICS
450 // Multi-threaded with atomics: use atomic load for lock-free access
451 return item->remove.load(std::memory_order_acquire);
452#else
453 // Single-threaded (ESPHOME_THREAD_SINGLE) or
454 // multi-threaded without atomics (ESPHOME_THREAD_MULTI_NO_ATOMICS): direct read
455 // For ESPHOME_THREAD_MULTI_NO_ATOMICS, caller MUST hold lock!
456 return item->remove;
457#endif
458 }
459
460 // Helper to check if item is marked for removal when lock is already held.
461 // Uses relaxed ordering since the mutex provides all necessary synchronization.
462 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
463 bool is_item_removed_locked_(SchedulerItem *item) const {
464#ifdef ESPHOME_THREAD_MULTI_ATOMICS
465 // Lock already held - relaxed is sufficient, mutex provides ordering
466 return item->remove.load(std::memory_order_relaxed);
467#else
468 return item->remove;
469#endif
470 }
471
472 // Helper to set item removal flag (platform-specific)
473 // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
474 // function. Uses memory_order_release when setting to true (for cancellation synchronization),
475 // and memory_order_relaxed when setting to false (for initialization).
476 void set_item_removed_(SchedulerItem *item, bool removed) {
477#ifdef ESPHOME_THREAD_MULTI_ATOMICS
478 // Multi-threaded with atomics: use atomic store with appropriate ordering
479 // Release ordering when setting to true ensures cancellation is visible to other threads
480 // Relaxed ordering when setting to false is sufficient for initialization
481 item->remove.store(removed ? 1 : 0, removed ? std::memory_order_release : std::memory_order_relaxed);
482#else
483 // Single-threaded (ESPHOME_THREAD_SINGLE) or
484 // multi-threaded without atomics (ESPHOME_THREAD_MULTI_NO_ATOMICS): direct write
485 // For ESPHOME_THREAD_MULTI_NO_ATOMICS, caller MUST hold lock!
486 item->remove = removed;
487#endif
488 }
489
490 // Helper to mark matching items in a container as removed.
491 // When find_first=true, stops after the first match (used by set_timer_common_ where
492 // the cancel-before-add invariant guarantees at most one match).
493 // When find_first=false, marks ALL matches (needed for public cancel path where
494 // DelayAction parallel mode with skip_cancel=true can create multiple items with the same key).
495 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
496 // Returns the number of items marked for removal.
497 // IMPORTANT: Must be called with scheduler lock held
498 // Inlined: the fast path (empty container) avoids calling the out-of-line scan.
499 inline size_t HOT mark_matching_items_removed_locked_(std::vector<SchedulerItem *> &container, Component *component,
500 NameType name_type, const char *static_name,
501 uint32_t hash_or_id, SchedulerItem::Type type, bool match_retry,
502 bool find_first = false) {
503 if (container.empty())
504 return 0;
505 return this->mark_matching_items_removed_slow_locked_(container, component, name_type, static_name, hash_or_id,
506 type, match_retry, find_first);
507 }
508
509 // Out-of-line slow path for mark_matching_items_removed_locked_ when container is non-empty.
510 // IMPORTANT: Must be called with scheduler lock held
511 __attribute__((noinline)) size_t mark_matching_items_removed_slow_locked_(
512 std::vector<SchedulerItem *> &container, Component *component, NameType name_type, const char *static_name,
513 uint32_t hash_or_id, SchedulerItem::Type type, bool match_retry, bool find_first);
514
515 Mutex lock_;
516 std::vector<SchedulerItem *> items_;
517 std::vector<SchedulerItem *> to_add_;
518
519#ifndef ESPHOME_THREAD_SINGLE
520 // Fast-path counter for process_to_add() to skip taking the lock when there is
521 // nothing to add. Uses std::atomic on platforms that support it, plain uint32_t
522 // otherwise. On non-atomic platforms, callers must hold the scheduler lock when
523 // mutating this counter. Not needed on single-threaded platforms where we can
524 // check to_add_.empty() directly.
525#ifdef ESPHOME_THREAD_MULTI_ATOMICS
526 std::atomic<uint32_t> to_add_count_{0};
527#else
528 uint32_t to_add_count_{0};
529#endif
530#endif /* ESPHOME_THREAD_SINGLE */
531
532 // Fast-path helper for process_to_add() to decide if it can try the lock-free path.
533 // - On ESPHOME_THREAD_SINGLE: direct container check is safe (no concurrent writers).
534 // - On ESPHOME_THREAD_MULTI_ATOMICS: performs a lock-free check via to_add_count_.
535 // - On ESPHOME_THREAD_MULTI_NO_ATOMICS: always returns false to force the caller
536 // down the locked path; this is NOT a lock-free emptiness check on that platform.
537 bool to_add_empty_() const {
538#ifdef ESPHOME_THREAD_SINGLE
539 return this->to_add_.empty();
540#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
541 return this->to_add_count_.load(std::memory_order_relaxed) == 0;
542#else
543 return false;
544#endif
545 }
546
547 // Increment to_add_count_ (no-op on single-threaded platforms)
548 void to_add_count_increment_() {
549#ifdef ESPHOME_THREAD_SINGLE
550 // No counter needed — to_add_empty_() checks the vector directly
551#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
552 this->to_add_count_.fetch_add(1, std::memory_order_relaxed);
553#else
554 this->to_add_count_++;
555#endif
556 }
557
558 // Reset to_add_count_ (no-op on single-threaded platforms)
559 void to_add_count_clear_() {
560#ifdef ESPHOME_THREAD_SINGLE
561 // No counter needed — to_add_empty_() checks the vector directly
562#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
563 this->to_add_count_.store(0, std::memory_order_relaxed);
564#else
565 this->to_add_count_ = 0;
566#endif
567 }
568
569#ifndef ESPHOME_THREAD_SINGLE
570 // Single-core platforms don't need the defer queue and save ~32 bytes of RAM
571 // Using std::vector instead of std::deque avoids 512-byte chunked allocations
572 // Index tracking avoids O(n) erase() calls when draining the queue each loop
573 std::vector<SchedulerItem *> defer_queue_; // FIFO queue for defer() calls
574 size_t defer_queue_front_{0}; // Index of first valid item in defer_queue_ (tracks consumed items)
575
576 // Fast-path counter for process_defer_queue_() to skip lock when nothing to process.
577#ifdef ESPHOME_THREAD_MULTI_ATOMICS
578 std::atomic<uint32_t> defer_count_{0};
579#else
580 uint32_t defer_count_{0};
581#endif
582
583 bool defer_empty_() const {
584 // defer_queue_ only exists on multi-threaded platforms, so no ESPHOME_THREAD_SINGLE path
585 // ESPHOME_THREAD_MULTI_NO_ATOMICS: always take the lock
586#ifdef ESPHOME_THREAD_MULTI_ATOMICS
587 return this->defer_count_.load(std::memory_order_relaxed) == 0;
588#else
589 return false;
590#endif
591 }
592
593 void defer_count_increment_() {
594#ifdef ESPHOME_THREAD_MULTI_ATOMICS
595 this->defer_count_.fetch_add(1, std::memory_order_relaxed);
596#else
597 this->defer_count_++;
598#endif
599 }
600
601 void defer_count_clear_() {
602#ifdef ESPHOME_THREAD_MULTI_ATOMICS
603 this->defer_count_.store(0, std::memory_order_relaxed);
604#else
605 this->defer_count_ = 0;
606#endif
607 }
608
609#endif /* ESPHOME_THREAD_SINGLE */
610
611 // Counter for items marked for removal. Incremented cross-thread in cancel_item_locked_().
612 // On ESPHOME_THREAD_MULTI_ATOMICS this is read without a lock in the cleanup_() fast path;
613 // on ESPHOME_THREAD_MULTI_NO_ATOMICS the fast path is disabled so cleanup_() always takes the lock.
614#ifdef ESPHOME_THREAD_MULTI_ATOMICS
615 std::atomic<uint32_t> to_remove_{0};
616#else
617 uint32_t to_remove_{0};
618#endif
619
620 // Lock-free check if there are items to remove (for fast-path in cleanup_)
621 bool to_remove_empty_() const {
622#ifdef ESPHOME_THREAD_MULTI_ATOMICS
623 return this->to_remove_.load(std::memory_order_relaxed) == 0;
624#elif defined(ESPHOME_THREAD_SINGLE)
625 return this->to_remove_ == 0;
626#else
627 return false; // Always take the lock path
628#endif
629 }
630
631 void to_remove_add_(uint32_t count) {
632#ifdef ESPHOME_THREAD_MULTI_ATOMICS
633 this->to_remove_.fetch_add(count, std::memory_order_relaxed);
634#else
635 this->to_remove_ += count;
636#endif
637 }
638
639 void to_remove_decrement_() {
640#ifdef ESPHOME_THREAD_MULTI_ATOMICS
641 this->to_remove_.fetch_sub(1, std::memory_order_relaxed);
642#else
643 this->to_remove_--;
644#endif
645 }
646
647 void to_remove_clear_() {
648#ifdef ESPHOME_THREAD_MULTI_ATOMICS
649 this->to_remove_.store(0, std::memory_order_relaxed);
650#else
651 this->to_remove_ = 0;
652#endif
653 }
654
655 uint32_t to_remove_count_() const {
656#ifdef ESPHOME_THREAD_MULTI_ATOMICS
657 return this->to_remove_.load(std::memory_order_relaxed);
658#else
659 return this->to_remove_;
660#endif
661 }
662
663 // Memory pool for recycling SchedulerItem objects to reduce heap churn.
664 // Design decisions:
665 // - std::vector is used instead of a fixed array because many systems only need 1-2 scheduler items
666 // - The vector grows dynamically up to MAX_POOL_SIZE (5) only when needed, saving memory on simple setups
667 // - Pool size of 5 matches typical usage (2-4 timers) while keeping memory overhead low (~250 bytes on ESP32)
668 // - The pool significantly reduces heap fragmentation which is critical because heap allocation/deallocation
669 // can stall the entire system, causing timing issues and dropped events for any components that need
670 // to synchronize between tasks (see https://github.com/esphome/backlog/issues/52)
671 std::vector<SchedulerItem *> scheduler_item_pool_;
672
673#ifdef ESPHOME_DEBUG_SCHEDULER
674 // Leak detection: tracks total live SchedulerItem allocations.
675 // Invariant: debug_live_items_ == items_.size() + to_add_.size() + defer_queue_.size() + scheduler_item_pool_.size()
676 // Verified periodically in call() to catch leaks early.
677 size_t debug_live_items_{0};
678
679 // Verify the scheduler memory invariant: all allocated items are accounted for.
680 // Returns true if no leak detected. Logs an error and asserts on failure.
681 bool debug_verify_no_leak_() const;
682#endif
683};
684
685} // namespace esphome
struct @65::@66 __attribute__
Wake the main loop task from an ISR. ISR-safe.
Definition main_task.h:32
const Component * component
Definition component.cpp:34
uint16_t type
ESPDEPRECATED("Use modbus::helpers::value_type_is_float() instead. Removed in 2026.10.0", "2026.4.0") inline bool value_type_is_float(SensorValueType v)
Providing packet encoding functions for exchanging data with a remote host.
Definition a01nyub.cpp:7
void retry_handler(const std::shared_ptr< RetryArgs > &args)
const char int const __FlashStringHelper va_list args
Definition log.h:74
uint64_t HOT millis_64()
Definition core.cpp:27
static void uint32_t