ESPHome 2026.3.0-dev
Loading...
Searching...
No Matches
scheduler.h
Go to the documentation of this file.
1#pragma once
2
4#include <cstring>
5#include <memory>
6#include <string>
7#include <vector>
8#ifdef ESPHOME_THREAD_MULTI_ATOMICS
9#include <atomic>
10#endif
11
13#include "esphome/core/hal.h"
16
17namespace esphome {
18
19class Component;
20struct RetryArgs;
21
22// Forward declaration of retry_handler - needs to be non-static for friend declaration
23void retry_handler(const std::shared_ptr<RetryArgs> &args);
24
25class Scheduler {
26 // Allow retry_handler to access protected members for internal retry mechanism
27 friend void ::esphome::retry_handler(const std::shared_ptr<RetryArgs> &args);
28 // Allow DelayAction to call set_timer_common_ with skip_cancel=true for parallel script delays.
29 // This is needed to fix issue #10264 where parallel scripts with delays interfere with each other.
30 // We use friend instead of a public API because skip_cancel is dangerous - it can cause delays
31 // to accumulate and overload the scheduler if misused.
32 template<typename... Ts> friend class DelayAction;
33
34 public:
35 // std::string overload - deprecated, use const char* or uint32_t instead
36 // Remove before 2026.7.0
37 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
38 void set_timeout(Component *component, const std::string &name, uint32_t timeout, std::function<void()> &&func);
39
48 void set_timeout(Component *component, const char *name, uint32_t timeout, std::function<void()> &&func);
50 void set_timeout(Component *component, uint32_t id, uint32_t timeout, std::function<void()> &&func);
52 void set_timeout(Component *component, InternalSchedulerID id, uint32_t timeout, std::function<void()> &&func) {
53 this->set_timer_common_(component, SchedulerItem::TIMEOUT, NameType::NUMERIC_ID_INTERNAL, nullptr,
54 static_cast<uint32_t>(id), timeout, std::move(func));
55 }
56
57 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
58 bool cancel_timeout(Component *component, const std::string &name);
59 bool cancel_timeout(Component *component, const char *name);
60 bool cancel_timeout(Component *component, uint32_t id);
61 bool cancel_timeout(Component *component, InternalSchedulerID id) {
62 return this->cancel_item_(component, NameType::NUMERIC_ID_INTERNAL, nullptr, static_cast<uint32_t>(id),
63 SchedulerItem::TIMEOUT);
64 }
65
66 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
67 void set_interval(Component *component, const std::string &name, uint32_t interval, std::function<void()> &&func);
68
77 void set_interval(Component *component, const char *name, uint32_t interval, std::function<void()> &&func);
79 void set_interval(Component *component, uint32_t id, uint32_t interval, std::function<void()> &&func);
81 void set_interval(Component *component, InternalSchedulerID id, uint32_t interval, std::function<void()> &&func) {
82 this->set_timer_common_(component, SchedulerItem::INTERVAL, NameType::NUMERIC_ID_INTERNAL, nullptr,
83 static_cast<uint32_t>(id), interval, std::move(func));
84 }
85
86 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
87 bool cancel_interval(Component *component, const std::string &name);
88 bool cancel_interval(Component *component, const char *name);
89 bool cancel_interval(Component *component, uint32_t id);
90 bool cancel_interval(Component *component, InternalSchedulerID id) {
91 return this->cancel_item_(component, NameType::NUMERIC_ID_INTERNAL, nullptr, static_cast<uint32_t>(id),
92 SchedulerItem::INTERVAL);
93 }
94
95 // Remove before 2026.8.0
96 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
97 "2026.2.0")
98 void set_retry(Component *component, const std::string &name, uint32_t initial_wait_time, uint8_t max_attempts,
99 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
100 // Remove before 2026.8.0
101 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
102 "2026.2.0")
103 void set_retry(Component *component, const char *name, uint32_t initial_wait_time, uint8_t max_attempts,
104 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
105 // Remove before 2026.8.0
106 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
107 "2026.2.0")
108 void set_retry(Component *component, uint32_t id, uint32_t initial_wait_time, uint8_t max_attempts,
109 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
110
111 // Remove before 2026.8.0
112 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
113 bool cancel_retry(Component *component, const std::string &name);
114 // Remove before 2026.8.0
115 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
116 bool cancel_retry(Component *component, const char *name);
117 // Remove before 2026.8.0
118 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
119 bool cancel_retry(Component *component, uint32_t id);
120
122 uint64_t millis_64() { return esphome::millis_64(); }
123
124 // Calculate when the next scheduled item should run.
125 // @param now On ESP32, unused for 64-bit extension (native); on other platforms, extended to 64-bit via rollover.
126 // Returns the time in milliseconds until the next scheduled item, or nullopt if no items.
127 // This method performs cleanup of removed items before checking the schedule.
128 // IMPORTANT: This method should only be called from the main thread (loop task).
129 optional<uint32_t> next_schedule_in(uint32_t now);
130
131 // Execute all scheduled items that are ready
132 // @param now Fresh timestamp from millis() - must not be stale/cached
133 void call(uint32_t now);
134
135 void process_to_add();
136
137 // Name storage type discriminator for SchedulerItem
138 // Used to distinguish between static strings, hashed strings, numeric IDs, and internal numeric IDs
139 enum class NameType : uint8_t {
140 STATIC_STRING = 0, // const char* pointer to static/flash storage
141 HASHED_STRING = 1, // uint32_t FNV-1a hash of a runtime string
142 NUMERIC_ID = 2, // uint32_t numeric identifier (component-level)
143 NUMERIC_ID_INTERNAL = 3 // uint32_t numeric identifier (core/internal, separate namespace)
144 };
145
146 protected:
147 struct SchedulerItem;
148
149 // Custom deleter for SchedulerItem unique_ptr that prevents the compiler from
150 // inlining the destructor at every destruction site. On BK7231N (Thumb-1), GCC
151 // inlines ~unique_ptr<SchedulerItem> (~30 bytes: null check + ~std::function +
152 // operator delete) at every destruction site, while ESP32/ESP8266/RTL8720CF outline
153 // it into a single helper. This noinline deleter ensures only one copy exists.
154 // operator() is defined in scheduler.cpp to prevent inlining.
155 struct SchedulerItemDeleter {
156 void operator()(SchedulerItem *ptr) const noexcept;
157 };
158 using SchedulerItemPtr = std::unique_ptr<SchedulerItem, SchedulerItemDeleter>;
159
160 struct SchedulerItem {
161 // Ordered by size to minimize padding
162 Component *component;
163 // Optimized name storage using tagged union - zero heap allocation
164 union {
165 const char *static_name; // For STATIC_STRING (string literals, no allocation)
166 uint32_t hash_or_id; // For HASHED_STRING or NUMERIC_ID
167 } name_;
168 uint32_t interval;
169 // Split time to handle millis() rollover. The scheduler combines the 32-bit millis()
170 // with a 16-bit rollover counter to create a 48-bit time space (using 32+16 bits).
171 // This is intentionally limited to 48 bits, not stored as a full 64-bit value.
172 // With 49.7 days per 32-bit rollover, the 16-bit counter supports
173 // 49.7 days × 65536 = ~8900 years. This ensures correct scheduling
174 // even when devices run for months. Split into two fields for better memory
175 // alignment on 32-bit systems.
176 uint32_t next_execution_low_; // Lower 32 bits of execution time (millis value)
177 std::function<void()> callback;
178 uint16_t next_execution_high_; // Upper 16 bits (millis_major counter)
179
180#ifdef ESPHOME_THREAD_MULTI_ATOMICS
181 // Multi-threaded with atomics: use atomic for lock-free access
182 // Place atomic<bool> separately since it can't be packed with bit fields
183 std::atomic<bool> remove{false};
184
185 // Bit-packed fields (4 bits used, 4 bits padding in 1 byte)
186 enum Type : uint8_t { TIMEOUT, INTERVAL } type : 1;
187 NameType name_type_ : 2; // Discriminator for name_ union (0–3, see NameType enum)
188 bool is_retry : 1; // True if this is a retry timeout
189 // 4 bits padding
190#else
191 // Single-threaded or multi-threaded without atomics: can pack all fields together
192 // Bit-packed fields (5 bits used, 3 bits padding in 1 byte)
193 enum Type : uint8_t { TIMEOUT, INTERVAL } type : 1;
194 bool remove : 1;
195 NameType name_type_ : 2; // Discriminator for name_ union (0–3, see NameType enum)
196 bool is_retry : 1; // True if this is a retry timeout
197 // 3 bits padding
198#endif
199
200 // Constructor
201 SchedulerItem()
202 : component(nullptr),
203 interval(0),
204 next_execution_low_(0),
205 next_execution_high_(0),
206#ifdef ESPHOME_THREAD_MULTI_ATOMICS
207 // remove is initialized in the member declaration as std::atomic<bool>{false}
208 type(TIMEOUT),
209 name_type_(NameType::STATIC_STRING),
210 is_retry(false) {
211#else
212 type(TIMEOUT),
213 remove(false),
214 name_type_(NameType::STATIC_STRING),
215 is_retry(false) {
216#endif
217 name_.static_name = nullptr;
218 }
219
220 // Destructor - no dynamic memory to clean up
221 ~SchedulerItem() = default;
222
223 // Delete copy operations to prevent accidental copies
224 SchedulerItem(const SchedulerItem &) = delete;
225 SchedulerItem &operator=(const SchedulerItem &) = delete;
226
227 // Delete move operations: SchedulerItem objects are only managed via unique_ptr, never moved directly
228 SchedulerItem(SchedulerItem &&) = delete;
229 SchedulerItem &operator=(SchedulerItem &&) = delete;
230
231 // Helper to get the static name (only valid for STATIC_STRING type)
232 const char *get_name() const { return (name_type_ == NameType::STATIC_STRING) ? name_.static_name : nullptr; }
233
234 // Helper to get the hash or numeric ID (only valid for HASHED_STRING or NUMERIC_ID types)
235 uint32_t get_name_hash_or_id() const { return (name_type_ != NameType::STATIC_STRING) ? name_.hash_or_id : 0; }
236
237 // Helper to get the name type
238 NameType get_name_type() const { return name_type_; }
239
240 // Set name storage: for STATIC_STRING stores the pointer, for all other types stores hash_or_id.
241 // Both union members occupy the same offset, so only one store is needed.
242 void set_name(NameType type, const char *static_name, uint32_t hash_or_id) {
243 if (type == NameType::STATIC_STRING) {
244 name_.static_name = static_name;
245 } else {
246 name_.hash_or_id = hash_or_id;
247 }
248 name_type_ = type;
249 }
250
251 static bool cmp(const SchedulerItemPtr &a, const SchedulerItemPtr &b);
252
253 // Note: We use 48 bits total (32 + 16), stored in a 64-bit value for API compatibility.
254 // The upper 16 bits of the 64-bit value are always zero, which is fine since
255 // millis_major_ is also 16 bits and they must match.
256 constexpr uint64_t get_next_execution() const {
257 return (static_cast<uint64_t>(next_execution_high_) << 32) | next_execution_low_;
258 }
259
260 constexpr void set_next_execution(uint64_t value) {
261 next_execution_low_ = static_cast<uint32_t>(value);
262 // Cast to uint16_t intentionally truncates to lower 16 bits of the upper 32 bits.
263 // This is correct because millis_major_ that creates these values is also 16 bits.
264 next_execution_high_ = static_cast<uint16_t>(value >> 32);
265 }
266 constexpr const char *get_type_str() const { return (type == TIMEOUT) ? "timeout" : "interval"; }
267 const LogString *get_source() const { return component ? component->get_component_log_str() : LOG_STR("unknown"); }
268 };
269
270 // Common implementation for both timeout and interval
271 // name_type determines storage type: STATIC_STRING uses static_name, others use hash_or_id
272 void set_timer_common_(Component *component, SchedulerItem::Type type, NameType name_type, const char *static_name,
273 uint32_t hash_or_id, uint32_t delay, std::function<void()> &&func, bool is_retry = false,
274 bool skip_cancel = false);
275
276 // Common implementation for retry - Remove before 2026.8.0
277 // name_type determines storage type: STATIC_STRING uses static_name, others use hash_or_id
278#pragma GCC diagnostic push
279#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
280 void set_retry_common_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
281 uint32_t initial_wait_time, uint8_t max_attempts, std::function<RetryResult(uint8_t)> func,
282 float backoff_increase_factor);
283#pragma GCC diagnostic pop
284 // Common implementation for cancel_retry
285 bool cancel_retry_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id);
286
287 // Extend a 32-bit millis() value to 64-bit. Use when the caller already has a fresh now.
288 // On platforms with native 64-bit time, ignores now and uses millis_64() directly.
289 // On other platforms, extends now to 64-bit using rollover tracking.
290 uint64_t millis_64_from_(uint32_t now) {
291#ifdef USE_NATIVE_64BIT_TIME
292 (void) now;
293 return millis_64();
294#else
295 return Millis64Impl::compute(now);
296#endif
297 }
298 // Cleanup logically deleted items from the scheduler
299 // Returns the number of items remaining after cleanup
300 // IMPORTANT: This method should only be called from the main thread (loop task).
301 size_t cleanup_();
302 // Remove and return the front item from the heap
303 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
304 SchedulerItemPtr pop_raw_locked_();
305 // Get or create a scheduler item from the pool
306 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
307 SchedulerItemPtr get_item_from_pool_locked_();
308
309 private:
310 // Helper to cancel items - must be called with lock held
311 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
312 bool cancel_item_locked_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
313 SchedulerItem::Type type, bool match_retry = false);
314
315 // Common implementation for cancel operations - handles locking
316 bool cancel_item_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
317 SchedulerItem::Type type, bool match_retry = false);
318
319 // Helper to check if two static string names match
320 inline bool HOT names_match_static_(const char *name1, const char *name2) const {
321 // Check pointer equality first (common for static strings), then string contents
322 // The core ESPHome codebase uses static strings (const char*) for component names,
323 // making pointer comparison effective. The std::string overloads exist only for
324 // compatibility with external components but are rarely used in practice.
325 return (name1 != nullptr && name2 != nullptr) && ((name1 == name2) || (strcmp(name1, name2) == 0));
326 }
327
328 // Helper function to check if item matches criteria for cancellation
329 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
330 // IMPORTANT: Must be called with scheduler lock held
331 inline bool HOT matches_item_locked_(const SchedulerItemPtr &item, Component *component, NameType name_type,
332 const char *static_name, uint32_t hash_or_id, SchedulerItem::Type type,
333 bool match_retry, bool skip_removed = true) const {
334 // THREAD SAFETY: Check for nullptr first to prevent LoadProhibited crashes. On multi-threaded
335 // platforms, items can be moved out of defer_queue_ during processing, leaving nullptr entries.
336 // PR #11305 added nullptr checks in callers (mark_matching_items_removed_locked_()), but this check
337 // provides defense-in-depth: helper
338 // functions should be safe regardless of caller behavior.
339 // Fixes: https://github.com/esphome/esphome/issues/11940
340 if (!item)
341 return false;
342 if (item->component != component || item->type != type ||
343 (skip_removed && this->is_item_removed_locked_(item.get())) || (match_retry && !item->is_retry)) {
344 return false;
345 }
346 // Name type must match
347 if (item->get_name_type() != name_type)
348 return false;
349 // For static strings, compare the string content; for hash/ID, compare the value
350 if (name_type == NameType::STATIC_STRING) {
351 return this->names_match_static_(item->get_name(), static_name);
352 }
353 return item->get_name_hash_or_id() == hash_or_id;
354 }
355
356 // Helper to execute a scheduler item
357 uint32_t execute_item_(SchedulerItem *item, uint32_t now);
358
359 // Helper to check if item should be skipped
360 bool should_skip_item_(SchedulerItem *item) const {
361 return is_item_removed_(item) || (item->component != nullptr && item->component->is_failed());
362 }
363
364 // Helper to recycle a SchedulerItem back to the pool.
365 // IMPORTANT: Only call from main loop context! Recycling clears the callback,
366 // so calling from another thread while the callback is executing causes use-after-free.
367 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
368 void recycle_item_main_loop_(SchedulerItemPtr item);
369
370 // Helper to perform full cleanup when too many items are cancelled
371 void full_cleanup_removed_items_();
372
373 // Helper to calculate random offset for interval timers - extracted to reduce code size of set_timer_common_
374 // IMPORTANT: Must not be inlined - called only for intervals, keeping it out of the hot path saves flash.
375 uint32_t __attribute__((noinline)) calculate_interval_offset_(uint32_t delay);
376
377 // Helper to check if a retry was already cancelled - extracted to reduce code size of set_timer_common_
378 // Remove before 2026.8.0 along with all retry code.
379 // IMPORTANT: Must not be inlined - retry path is cold and deprecated.
380 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
381 bool __attribute__((noinline))
382 is_retry_cancelled_locked_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id);
383
384#ifdef ESPHOME_DEBUG_SCHEDULER
385 // Helper for debug logging in set_timer_common_ - extracted to reduce code size
386 void debug_log_timer_(const SchedulerItem *item, NameType name_type, const char *static_name, uint32_t hash_or_id,
387 SchedulerItem::Type type, uint32_t delay, uint64_t now);
388#endif /* ESPHOME_DEBUG_SCHEDULER */
389
390#ifndef ESPHOME_THREAD_SINGLE
391 // Helper to process defer queue - inline for performance in hot path
392 inline void process_defer_queue_(uint32_t &now) {
393 // Process defer queue first to guarantee FIFO execution order for deferred items.
394 // Previously, defer() used the heap which gave undefined order for equal timestamps,
395 // causing race conditions on multi-core systems (ESP32, BK7200).
396 // With the defer queue:
397 // - Deferred items (delay=0) go directly to defer_queue_ in set_timer_common_
398 // - Items execute in exact order they were deferred (FIFO guarantee)
399 // - No deferred items exist in to_add_, so processing order doesn't affect correctness
400 // Single-core platforms don't use this queue and fall back to the heap-based approach.
401 //
402 // Note: Items cancelled via cancel_item_locked_() are marked with remove=true but still
403 // processed here. They are skipped during execution by should_skip_item_().
404 // This is intentional - no memory leak occurs.
405 //
406 // We use an index (defer_queue_front_) to track the read position instead of calling
407 // erase() on every pop, which would be O(n). The queue is processed once per loop -
408 // any items added during processing are left for the next loop iteration.
409
410 // Snapshot the queue end point - only process items that existed at loop start
411 // Items added during processing (by callbacks or other threads) run next loop
412 // No lock needed: single consumer (main loop), stale read just means we process less this iteration
413 size_t defer_queue_end = this->defer_queue_.size();
414
415 // Fast path: nothing to process, avoid lock entirely.
416 // Safe without lock: single consumer (main loop) reads front_, and a stale size() read
417 // from a concurrent push can only make us see fewer items — they'll be processed next loop.
418 if (this->defer_queue_front_ >= defer_queue_end)
419 return;
420
421 // Merge lock acquisitions: instead of separate locks for move-out and recycle (2N+1 total),
422 // recycle each item after re-acquiring the lock for the next iteration (N+1 total).
423 // The lock is held across: recycle → loop condition → move-out, then released for execution.
424 SchedulerItemPtr item;
425
426 this->lock_.lock();
427 while (this->defer_queue_front_ < defer_queue_end) {
428 // SAFETY: Moving out the unique_ptr leaves a nullptr in the vector at defer_queue_front_.
429 // This is intentional and safe because:
430 // 1. The vector is only cleaned up by cleanup_defer_queue_locked_() at the end of this function
431 // 2. Any code iterating defer_queue_ MUST check for nullptr items (see mark_matching_items_removed_locked_)
432 // 3. The lock protects concurrent access, but the nullptr remains until cleanup
433 item = std::move(this->defer_queue_[this->defer_queue_front_]);
434 this->defer_queue_front_++;
435 this->lock_.unlock();
436
437 // Execute callback without holding lock to prevent deadlocks
438 // if the callback tries to call defer() again
439 if (!this->should_skip_item_(item.get())) {
440 now = this->execute_item_(item.get(), now);
441 }
442
443 this->lock_.lock();
444 this->recycle_item_main_loop_(std::move(item));
445 }
446 // Clean up the queue (lock already held from last recycle or initial acquisition)
447 this->cleanup_defer_queue_locked_();
448 this->lock_.unlock();
449 }
450
451 // Helper to cleanup defer_queue_ after processing.
452 // Keeps the common clear() path inline, outlines the rare compaction to keep
453 // cold code out of the hot instruction cache lines.
454 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
455 inline void cleanup_defer_queue_locked_() {
456 // Check if new items were added by producers during processing
457 if (this->defer_queue_front_ >= this->defer_queue_.size()) {
458 // Common case: no new items - clear everything
459 this->defer_queue_.clear();
460 } else {
461 // Rare case: new items were added during processing - outlined to keep cold code
462 // out of the hot instruction cache lines
463 this->compact_defer_queue_locked_();
464 }
465 this->defer_queue_front_ = 0;
466 }
467
468 // Cold path for compacting defer_queue_ when new items were added during processing.
469 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
470 // IMPORTANT: Must not be inlined - rare path, outlined to keep it out of the hot instruction cache lines.
471 void __attribute__((noinline)) compact_defer_queue_locked_();
472#endif /* not ESPHOME_THREAD_SINGLE */
473
474 // Helper to check if item is marked for removal (platform-specific)
475 // Returns true if item should be skipped, handles platform-specific synchronization
476 // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
477 // function.
478 bool is_item_removed_(SchedulerItem *item) const {
479#ifdef ESPHOME_THREAD_MULTI_ATOMICS
480 // Multi-threaded with atomics: use atomic load for lock-free access
481 return item->remove.load(std::memory_order_acquire);
482#else
483 // Single-threaded (ESPHOME_THREAD_SINGLE) or
484 // multi-threaded without atomics (ESPHOME_THREAD_MULTI_NO_ATOMICS): direct read
485 // For ESPHOME_THREAD_MULTI_NO_ATOMICS, caller MUST hold lock!
486 return item->remove;
487#endif
488 }
489
490 // Helper to check if item is marked for removal when lock is already held.
491 // Uses relaxed ordering since the mutex provides all necessary synchronization.
492 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
493 bool is_item_removed_locked_(SchedulerItem *item) const {
494#ifdef ESPHOME_THREAD_MULTI_ATOMICS
495 // Lock already held - relaxed is sufficient, mutex provides ordering
496 return item->remove.load(std::memory_order_relaxed);
497#else
498 return item->remove;
499#endif
500 }
501
502 // Helper to set item removal flag (platform-specific)
503 // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
504 // function. Uses memory_order_release when setting to true (for cancellation synchronization),
505 // and memory_order_relaxed when setting to false (for initialization).
506 void set_item_removed_(SchedulerItem *item, bool removed) {
507#ifdef ESPHOME_THREAD_MULTI_ATOMICS
508 // Multi-threaded with atomics: use atomic store with appropriate ordering
509 // Release ordering when setting to true ensures cancellation is visible to other threads
510 // Relaxed ordering when setting to false is sufficient for initialization
511 item->remove.store(removed, removed ? std::memory_order_release : std::memory_order_relaxed);
512#else
513 // Single-threaded (ESPHOME_THREAD_SINGLE) or
514 // multi-threaded without atomics (ESPHOME_THREAD_MULTI_NO_ATOMICS): direct write
515 // For ESPHOME_THREAD_MULTI_NO_ATOMICS, caller MUST hold lock!
516 item->remove = removed;
517#endif
518 }
519
520 // Helper to mark matching items in a container as removed
521 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
522 // Returns the number of items marked for removal
523 // IMPORTANT: Must be called with scheduler lock held
524 __attribute__((noinline)) size_t mark_matching_items_removed_locked_(std::vector<SchedulerItemPtr> &container,
525 Component *component, NameType name_type,
526 const char *static_name, uint32_t hash_or_id,
527 SchedulerItem::Type type, bool match_retry) {
528 size_t count = 0;
529 for (auto &item : container) {
530 // Skip nullptr items (can happen in defer_queue_ when items are being processed)
531 // The defer_queue_ uses index-based processing: items are std::moved out but left in the
532 // vector as nullptr until cleanup. Even though this function is called with lock held,
533 // the vector can still contain nullptr items from the processing loop. This check prevents crashes.
534 if (item && this->matches_item_locked_(item, component, name_type, static_name, hash_or_id, type, match_retry)) {
535 this->set_item_removed_(item.get(), true);
536 count++;
537 }
538 }
539 return count;
540 }
541
542 Mutex lock_;
543 std::vector<SchedulerItemPtr> items_;
544 std::vector<SchedulerItemPtr> to_add_;
545#ifndef ESPHOME_THREAD_SINGLE
546 // Single-core platforms don't need the defer queue and save ~32 bytes of RAM
547 // Using std::vector instead of std::deque avoids 512-byte chunked allocations
548 // Index tracking avoids O(n) erase() calls when draining the queue each loop
549 std::vector<SchedulerItemPtr> defer_queue_; // FIFO queue for defer() calls
550 size_t defer_queue_front_{0}; // Index of first valid item in defer_queue_ (tracks consumed items)
551#endif /* ESPHOME_THREAD_SINGLE */
552 uint32_t to_remove_{0};
553
554 // Memory pool for recycling SchedulerItem objects to reduce heap churn.
555 // Design decisions:
556 // - std::vector is used instead of a fixed array because many systems only need 1-2 scheduler items
557 // - The vector grows dynamically up to MAX_POOL_SIZE (5) only when needed, saving memory on simple setups
558 // - Pool size of 5 matches typical usage (2-4 timers) while keeping memory overhead low (~250 bytes on ESP32)
559 // - The pool significantly reduces heap fragmentation which is critical because heap allocation/deallocation
560 // can stall the entire system, causing timing issues and dropped events for any components that need
561 // to synchronize between tasks (see https://github.com/esphome/backlog/issues/52)
562 std::vector<SchedulerItemPtr> scheduler_item_pool_;
563};
564
565} // namespace esphome
struct @65::@66 __attribute__
const Component * component
Definition component.cpp:37
uint16_t type
Providing packet encoding functions for exchanging data with a remote host.
Definition a01nyub.cpp:7
void retry_handler(const std::shared_ptr< RetryArgs > &args)
uint64_t HOT millis_64()
Definition core.cpp:26
struct ESPDEPRECATED("Use std::index_sequence instead. Removed in 2026.6.0", "2025.12.0") seq
Definition automation.h:26