ESPHome 2026.1.0-dev
Loading...
Searching...
No Matches
scheduler.cpp
Go to the documentation of this file.
1#include "scheduler.h"
2
3#include "application.h"
5#include "esphome/core/hal.h"
7#include "esphome/core/log.h"
8#include <algorithm>
9#include <cinttypes>
10#include <cstring>
11#include <limits>
12
13namespace esphome {
14
15static const char *const TAG = "scheduler";
16
17// Memory pool configuration constants
18// Pool size of 5 matches typical usage patterns (2-4 active timers)
19// - Minimal memory overhead (~250 bytes on ESP32)
20// - Sufficient for most configs with a couple sensors/components
21// - Still prevents heap fragmentation and allocation stalls
22// - Complex setups with many timers will just allocate beyond the pool
23// See https://github.com/esphome/backlog/issues/52
24static constexpr size_t MAX_POOL_SIZE = 5;
25
26// Maximum number of logically deleted (cancelled) items before forcing cleanup.
27// Set to 5 to match the pool size - when we have as many cancelled items as our
28// pool can hold, it's time to clean up and recycle them.
29static constexpr uint32_t MAX_LOGICALLY_DELETED_ITEMS = 5;
30// Half the 32-bit range - used to detect rollovers vs normal time progression
31static constexpr uint32_t HALF_MAX_UINT32 = std::numeric_limits<uint32_t>::max() / 2;
32// max delay to start an interval sequence
33static constexpr uint32_t MAX_INTERVAL_DELAY = 5000;
34
35// Uncomment to debug scheduler
36// #define ESPHOME_DEBUG_SCHEDULER
37
38#ifdef ESPHOME_DEBUG_SCHEDULER
39// Helper to validate that a pointer looks like it's in static memory
40static void validate_static_string(const char *name) {
41 if (name == nullptr)
42 return;
43
44 // This is a heuristic check - stack and heap pointers are typically
45 // much higher in memory than static data
46 uintptr_t addr = reinterpret_cast<uintptr_t>(name);
47
48 // Create a stack variable to compare against
49 int stack_var;
50 uintptr_t stack_addr = reinterpret_cast<uintptr_t>(&stack_var);
51
52 // If the string pointer is near our stack variable, it's likely on the stack
53 // Using 8KB range as ESP32 main task stack is typically 8192 bytes
54 if (addr > (stack_addr - 0x2000) && addr < (stack_addr + 0x2000)) {
55 ESP_LOGW(TAG,
56 "WARNING: Scheduler name '%s' at %p appears to be on the stack - this is unsafe!\n"
57 " Stack reference at %p",
58 name, name, &stack_var);
59 }
60
61 // Also check if it might be on the heap by seeing if it's in a very different range
62 // This is platform-specific but generally heap is allocated far from static memory
63 static const char *static_str = "test";
64 uintptr_t static_addr = reinterpret_cast<uintptr_t>(static_str);
65
66 // If the address is very far from known static memory, it might be heap
67 if (addr > static_addr + 0x100000 || (static_addr > 0x100000 && addr < static_addr - 0x100000)) {
68 ESP_LOGW(TAG, "WARNING: Scheduler name '%s' at %p might be on heap (static ref at %p)", name, name, static_str);
69 }
70}
71#endif /* ESPHOME_DEBUG_SCHEDULER */
72
73// A note on locking: the `lock_` lock protects the `items_` and `to_add_` containers. It must be taken when writing to
74// them (i.e. when adding/removing items, but not when changing items). As items are only deleted from the loop task,
75// iterating over them from the loop task is fine; but iterating from any other context requires the lock to be held to
76// avoid the main thread modifying the list while it is being accessed.
77
78// Common implementation for both timeout and interval
79void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type type, bool is_static_string,
80 const void *name_ptr, uint32_t delay, std::function<void()> func, bool is_retry,
81 bool skip_cancel) {
82 // Get the name as const char*
83 const char *name_cstr = this->get_name_cstr_(is_static_string, name_ptr);
84
85 if (delay == SCHEDULER_DONT_RUN) {
86 // Still need to cancel existing timer if name is not empty
87 if (!skip_cancel) {
88 LockGuard guard{this->lock_};
89 this->cancel_item_locked_(component, name_cstr, type);
90 }
91 return;
92 }
93
94 // Get fresh timestamp BEFORE taking lock - millis_64_ may need to acquire lock itself
95 const uint64_t now = this->millis_64_(millis());
96
97 // Take lock early to protect scheduler_item_pool_ access
98 LockGuard guard{this->lock_};
99
100 // Create and populate the scheduler item
101 std::unique_ptr<SchedulerItem> item;
102 if (!this->scheduler_item_pool_.empty()) {
103 // Reuse from pool
104 item = std::move(this->scheduler_item_pool_.back());
105 this->scheduler_item_pool_.pop_back();
106#ifdef ESPHOME_DEBUG_SCHEDULER
107 ESP_LOGD(TAG, "Reused item from pool (pool size now: %zu)", this->scheduler_item_pool_.size());
108#endif
109 } else {
110 // Allocate new if pool is empty
111 item = make_unique<SchedulerItem>();
112#ifdef ESPHOME_DEBUG_SCHEDULER
113 ESP_LOGD(TAG, "Allocated new item (pool empty)");
114#endif
115 }
116 item->component = component;
117 item->set_name(name_cstr, !is_static_string);
118 item->type = type;
119 item->callback = std::move(func);
120 // Reset remove flag - recycled items may have been cancelled (remove=true) in previous use
121 this->set_item_removed_(item.get(), false);
122 item->is_retry = is_retry;
123
124#ifndef ESPHOME_THREAD_SINGLE
125 // Special handling for defer() (delay = 0, type = TIMEOUT)
126 // Single-core platforms don't need thread-safe defer handling
127 if (delay == 0 && type == SchedulerItem::TIMEOUT) {
128 // Put in defer queue for guaranteed FIFO execution
129 if (!skip_cancel) {
130 this->cancel_item_locked_(component, name_cstr, type);
131 }
132 this->defer_queue_.push_back(std::move(item));
133 return;
134 }
135#endif /* not ESPHOME_THREAD_SINGLE */
136
137 // Type-specific setup
138 if (type == SchedulerItem::INTERVAL) {
139 item->interval = delay;
140 // first execution happens immediately after a random smallish offset
141 // Calculate random offset (0 to min(interval/2, 5s))
142 uint32_t offset = (uint32_t) (std::min(delay / 2, MAX_INTERVAL_DELAY) * random_float());
143 item->set_next_execution(now + offset);
144 ESP_LOGV(TAG, "Scheduler interval for %s is %" PRIu32 "ms, offset %" PRIu32 "ms", name_cstr ? name_cstr : "", delay,
145 offset);
146 } else {
147 item->interval = 0;
148 item->set_next_execution(now + delay);
149 }
150
151#ifdef ESPHOME_DEBUG_SCHEDULER
152 this->debug_log_timer_(item.get(), is_static_string, name_cstr, type, delay, now);
153#endif /* ESPHOME_DEBUG_SCHEDULER */
154
155 // For retries, check if there's a cancelled timeout first
156 if (is_retry && name_cstr != nullptr && type == SchedulerItem::TIMEOUT &&
157 (has_cancelled_timeout_in_container_locked_(this->items_, component, name_cstr, /* match_retry= */ true) ||
158 has_cancelled_timeout_in_container_locked_(this->to_add_, component, name_cstr, /* match_retry= */ true))) {
159 // Skip scheduling - the retry was cancelled
160#ifdef ESPHOME_DEBUG_SCHEDULER
161 ESP_LOGD(TAG, "Skipping retry '%s' - found cancelled item", name_cstr);
162#endif
163 return;
164 }
165
166 // If name is provided, do atomic cancel-and-add (unless skip_cancel is true)
167 // Cancel existing items
168 if (!skip_cancel) {
169 this->cancel_item_locked_(component, name_cstr, type);
170 }
171 // Add new item directly to to_add_
172 // since we have the lock held
173 this->to_add_.push_back(std::move(item));
174}
175
176void HOT Scheduler::set_timeout(Component *component, const char *name, uint32_t timeout, std::function<void()> func) {
177 this->set_timer_common_(component, SchedulerItem::TIMEOUT, true, name, timeout, std::move(func));
178}
179
180void HOT Scheduler::set_timeout(Component *component, const std::string &name, uint32_t timeout,
181 std::function<void()> func) {
182 this->set_timer_common_(component, SchedulerItem::TIMEOUT, false, &name, timeout, std::move(func));
183}
184bool HOT Scheduler::cancel_timeout(Component *component, const std::string &name) {
185 return this->cancel_item_(component, false, &name, SchedulerItem::TIMEOUT);
186}
187bool HOT Scheduler::cancel_timeout(Component *component, const char *name) {
188 return this->cancel_item_(component, true, name, SchedulerItem::TIMEOUT);
189}
190void HOT Scheduler::set_interval(Component *component, const std::string &name, uint32_t interval,
191 std::function<void()> func) {
192 this->set_timer_common_(component, SchedulerItem::INTERVAL, false, &name, interval, std::move(func));
193}
194
195void HOT Scheduler::set_interval(Component *component, const char *name, uint32_t interval,
196 std::function<void()> func) {
197 this->set_timer_common_(component, SchedulerItem::INTERVAL, true, name, interval, std::move(func));
198}
199bool HOT Scheduler::cancel_interval(Component *component, const std::string &name) {
200 return this->cancel_item_(component, false, &name, SchedulerItem::INTERVAL);
201}
202bool HOT Scheduler::cancel_interval(Component *component, const char *name) {
203 return this->cancel_item_(component, true, name, SchedulerItem::INTERVAL);
204}
205
206struct RetryArgs {
207 // Ordered to minimize padding on 32-bit systems
208 std::function<RetryResult(uint8_t)> func;
209 Component *component;
210 Scheduler *scheduler;
211 const char *name; // Points to static string or owned copy
212 uint32_t current_interval;
213 float backoff_increase_factor;
214 uint8_t retry_countdown;
215 bool name_is_dynamic; // True if name needs delete[]
216
217 ~RetryArgs() {
218 if (this->name_is_dynamic && this->name) {
219 delete[] this->name;
220 }
221 }
222};
223
224void retry_handler(const std::shared_ptr<RetryArgs> &args) {
225 RetryResult const retry_result = args->func(--args->retry_countdown);
226 if (retry_result == RetryResult::DONE || args->retry_countdown <= 0)
227 return;
228 // second execution of `func` happens after `initial_wait_time`
229 // Pass is_static_string=true because args->name is owned by the shared_ptr<RetryArgs>
230 // which is captured in the lambda and outlives the SchedulerItem
231 args->scheduler->set_timer_common_(
232 args->component, Scheduler::SchedulerItem::TIMEOUT, true, args->name, args->current_interval,
233 [args]() { retry_handler(args); }, /* is_retry= */ true);
234 // backoff_increase_factor applied to third & later executions
235 args->current_interval *= args->backoff_increase_factor;
236}
237
238void HOT Scheduler::set_retry_common_(Component *component, bool is_static_string, const void *name_ptr,
239 uint32_t initial_wait_time, uint8_t max_attempts,
240 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor) {
241 const char *name_cstr = this->get_name_cstr_(is_static_string, name_ptr);
242
243 if (name_cstr != nullptr)
244 this->cancel_retry(component, name_cstr);
245
246 if (initial_wait_time == SCHEDULER_DONT_RUN)
247 return;
248
249 ESP_LOGVV(TAG, "set_retry(name='%s', initial_wait_time=%" PRIu32 ", max_attempts=%u, backoff_factor=%0.1f)",
250 name_cstr ? name_cstr : "", initial_wait_time, max_attempts, backoff_increase_factor);
251
252 if (backoff_increase_factor < 0.0001) {
253 ESP_LOGE(TAG, "backoff_factor %0.1f too small, using 1.0: %s", backoff_increase_factor, name_cstr ? name_cstr : "");
254 backoff_increase_factor = 1;
255 }
256
257 auto args = std::make_shared<RetryArgs>();
258 args->func = std::move(func);
259 args->component = component;
260 args->scheduler = this;
261 args->current_interval = initial_wait_time;
262 args->backoff_increase_factor = backoff_increase_factor;
263 args->retry_countdown = max_attempts;
264
265 // Store name - either as static pointer or owned copy
266 if (name_cstr == nullptr || name_cstr[0] == '\0') {
267 // Empty or null name - use empty string literal
268 args->name = "";
269 args->name_is_dynamic = false;
270 } else if (is_static_string) {
271 // Static string - just store the pointer
272 args->name = name_cstr;
273 args->name_is_dynamic = false;
274 } else {
275 // Dynamic string - make a copy
276 size_t len = strlen(name_cstr);
277 char *copy = new char[len + 1];
278 memcpy(copy, name_cstr, len + 1);
279 args->name = copy;
280 args->name_is_dynamic = true;
281 }
282
283 // First execution of `func` immediately - use set_timer_common_ with is_retry=true
284 // Pass is_static_string=true because args->name is owned by the shared_ptr<RetryArgs>
285 // which is captured in the lambda and outlives the SchedulerItem
286 this->set_timer_common_(
287 component, SchedulerItem::TIMEOUT, true, args->name, 0, [args]() { retry_handler(args); },
288 /* is_retry= */ true);
289}
290
291void HOT Scheduler::set_retry(Component *component, const std::string &name, uint32_t initial_wait_time,
292 uint8_t max_attempts, std::function<RetryResult(uint8_t)> func,
293 float backoff_increase_factor) {
294 this->set_retry_common_(component, false, &name, initial_wait_time, max_attempts, std::move(func),
295 backoff_increase_factor);
296}
297
298void HOT Scheduler::set_retry(Component *component, const char *name, uint32_t initial_wait_time, uint8_t max_attempts,
299 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor) {
300 this->set_retry_common_(component, true, name, initial_wait_time, max_attempts, std::move(func),
301 backoff_increase_factor);
302}
303bool HOT Scheduler::cancel_retry(Component *component, const std::string &name) {
304 return this->cancel_retry(component, name.c_str());
305}
306
307bool HOT Scheduler::cancel_retry(Component *component, const char *name) {
308 // Cancel timeouts that have is_retry flag set
309 LockGuard guard{this->lock_};
310 return this->cancel_item_locked_(component, name, SchedulerItem::TIMEOUT, /* match_retry= */ true);
311}
312
313optional<uint32_t> HOT Scheduler::next_schedule_in(uint32_t now) {
314 // IMPORTANT: This method should only be called from the main thread (loop task).
315 // It performs cleanup and accesses items_[0] without holding a lock, which is only
316 // safe when called from the main thread. Other threads must not call this method.
317
318 // If no items, return empty optional
319 if (this->cleanup_() == 0)
320 return {};
321
322 auto &item = this->items_[0];
323 // Convert the fresh timestamp from caller (usually Application::loop()) to 64-bit
324 const auto now_64 = this->millis_64_(now); // 'now' from parameter - fresh from caller
325 const uint64_t next_exec = item->get_next_execution();
326 if (next_exec < now_64)
327 return 0;
328 return next_exec - now_64;
329}
330
331void Scheduler::full_cleanup_removed_items_() {
332 // We hold the lock for the entire cleanup operation because:
333 // 1. We're rebuilding the entire items_ list, so we need exclusive access throughout
334 // 2. Other threads must see either the old state or the new state, not intermediate states
335 // 3. The operation is already expensive (O(n)), so lock overhead is negligible
336 // 4. No operations inside can block or take other locks, so no deadlock risk
337 LockGuard guard{this->lock_};
338
339 std::vector<std::unique_ptr<SchedulerItem>> valid_items;
340
341 // Move all non-removed items to valid_items, recycle removed ones
342 for (auto &item : this->items_) {
343 if (!is_item_removed_(item.get())) {
344 valid_items.push_back(std::move(item));
345 } else {
346 // Recycle removed items
347 this->recycle_item_main_loop_(std::move(item));
348 }
349 }
350
351 // Replace items_ with the filtered list
352 this->items_ = std::move(valid_items);
353 // Rebuild the heap structure since items are no longer in heap order
354 std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
355 this->to_remove_ = 0;
356}
357
358void HOT Scheduler::call(uint32_t now) {
359#ifndef ESPHOME_THREAD_SINGLE
360 this->process_defer_queue_(now);
361#endif /* not ESPHOME_THREAD_SINGLE */
362
363 // Convert the fresh timestamp from main loop to 64-bit for scheduler operations
364 const auto now_64 = this->millis_64_(now); // 'now' from parameter - fresh from Application::loop()
365 this->process_to_add();
366
367 // Track if any items were added to to_add_ during this call (intervals or from callbacks)
368 bool has_added_items = false;
369
370#ifdef ESPHOME_DEBUG_SCHEDULER
371 static uint64_t last_print = 0;
372
373 if (now_64 - last_print > 2000) {
374 last_print = now_64;
375 std::vector<std::unique_ptr<SchedulerItem>> old_items;
376#ifdef ESPHOME_THREAD_MULTI_ATOMICS
377 const auto last_dbg = this->last_millis_.load(std::memory_order_relaxed);
378 const auto major_dbg = this->millis_major_.load(std::memory_order_relaxed);
379 ESP_LOGD(TAG, "Items: count=%zu, pool=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(),
380 this->scheduler_item_pool_.size(), now_64, major_dbg, last_dbg);
381#else /* not ESPHOME_THREAD_MULTI_ATOMICS */
382 ESP_LOGD(TAG, "Items: count=%zu, pool=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(),
383 this->scheduler_item_pool_.size(), now_64, this->millis_major_, this->last_millis_);
384#endif /* else ESPHOME_THREAD_MULTI_ATOMICS */
385 // Cleanup before debug output
386 this->cleanup_();
387 while (!this->items_.empty()) {
388 std::unique_ptr<SchedulerItem> item;
389 {
390 LockGuard guard{this->lock_};
391 item = this->pop_raw_locked_();
392 }
393
394 const char *name = item->get_name();
395 bool is_cancelled = is_item_removed_(item.get());
396 ESP_LOGD(TAG, " %s '%s/%s' interval=%" PRIu32 " next_execution in %" PRIu64 "ms at %" PRIu64 "%s",
397 item->get_type_str(), LOG_STR_ARG(item->get_source()), name ? name : "(null)", item->interval,
398 item->get_next_execution() - now_64, item->get_next_execution(), is_cancelled ? " [CANCELLED]" : "");
399
400 old_items.push_back(std::move(item));
401 }
402 ESP_LOGD(TAG, "\n");
403
404 {
405 LockGuard guard{this->lock_};
406 this->items_ = std::move(old_items);
407 // Rebuild heap after moving items back
408 std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
409 }
410 }
411#endif /* ESPHOME_DEBUG_SCHEDULER */
412
413 // Cleanup removed items before processing
414 // First try to clean items from the top of the heap (fast path)
415 this->cleanup_();
416
417 // If we still have too many cancelled items, do a full cleanup
418 // This only happens if cancelled items are stuck in the middle/bottom of the heap
419 if (this->to_remove_ >= MAX_LOGICALLY_DELETED_ITEMS) {
420 this->full_cleanup_removed_items_();
421 }
422 while (!this->items_.empty()) {
423 // Don't copy-by value yet
424 auto &item = this->items_[0];
425 if (item->get_next_execution() > now_64) {
426 // Not reached timeout yet, done for this call
427 break;
428 }
429 // Don't run on failed components
430 if (item->component != nullptr && item->component->is_failed()) {
431 LockGuard guard{this->lock_};
432 this->recycle_item_main_loop_(this->pop_raw_locked_());
433 continue;
434 }
435
436 // Check if item is marked for removal
437 // This handles two cases:
438 // 1. Item was marked for removal after cleanup_() but before we got here
439 // 2. Item is marked for removal but wasn't at the front of the heap during cleanup_()
440#ifdef ESPHOME_THREAD_MULTI_NO_ATOMICS
441 // Multi-threaded platforms without atomics: must take lock to safely read remove flag
442 {
443 LockGuard guard{this->lock_};
444 if (is_item_removed_(item.get())) {
445 this->recycle_item_main_loop_(this->pop_raw_locked_());
446 this->to_remove_--;
447 continue;
448 }
449 }
450#else
451 // Single-threaded or multi-threaded with atomics: can check without lock
452 if (is_item_removed_(item.get())) {
453 LockGuard guard{this->lock_};
454 this->recycle_item_main_loop_(this->pop_raw_locked_());
455 this->to_remove_--;
456 continue;
457 }
458#endif
459
460#ifdef ESPHOME_DEBUG_SCHEDULER
461 const char *item_name = item->get_name();
462 ESP_LOGV(TAG, "Running %s '%s/%s' with interval=%" PRIu32 " next_execution=%" PRIu64 " (now=%" PRIu64 ")",
463 item->get_type_str(), LOG_STR_ARG(item->get_source()), item_name ? item_name : "(null)", item->interval,
464 item->get_next_execution(), now_64);
465#endif /* ESPHOME_DEBUG_SCHEDULER */
466
467 // Warning: During callback(), a lot of stuff can happen, including:
468 // - timeouts/intervals get added, potentially invalidating vector pointers
469 // - timeouts/intervals get cancelled
470 now = this->execute_item_(item.get(), now);
471
472 LockGuard guard{this->lock_};
473
474 // Only pop after function call, this ensures we were reachable
475 // during the function call and know if we were cancelled.
476 auto executed_item = this->pop_raw_locked_();
477
478 if (executed_item->remove) {
479 // We were removed/cancelled in the function call, recycle and continue
480 this->to_remove_--;
481 this->recycle_item_main_loop_(std::move(executed_item));
482 continue;
483 }
484
485 if (executed_item->type == SchedulerItem::INTERVAL) {
486 executed_item->set_next_execution(now_64 + executed_item->interval);
487 // Add new item directly to to_add_
488 // since we have the lock held
489 this->to_add_.push_back(std::move(executed_item));
490 } else {
491 // Timeout completed - recycle it
492 this->recycle_item_main_loop_(std::move(executed_item));
493 }
494
495 has_added_items |= !this->to_add_.empty();
496 }
497
498 if (has_added_items) {
499 this->process_to_add();
500 }
501}
502void HOT Scheduler::process_to_add() {
503 LockGuard guard{this->lock_};
504 for (auto &it : this->to_add_) {
505 if (is_item_removed_(it.get())) {
506 // Recycle cancelled items
507 this->recycle_item_main_loop_(std::move(it));
508 continue;
509 }
510
511 this->items_.push_back(std::move(it));
512 std::push_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
513 }
514 this->to_add_.clear();
515}
516size_t HOT Scheduler::cleanup_() {
517 // Fast path: if nothing to remove, just return the current size
518 // Reading to_remove_ without lock is safe because:
519 // 1. We only call this from the main thread during call()
520 // 2. If it's 0, there's definitely nothing to cleanup
521 // 3. If it becomes non-zero after we check, cleanup will happen on the next loop iteration
522 // 4. Not all platforms support atomics, so we accept this race in favor of performance
523 // 5. The worst case is a one-loop-iteration delay in cleanup, which is harmless
524 if (this->to_remove_ == 0)
525 return this->items_.size();
526
527 // We must hold the lock for the entire cleanup operation because:
528 // 1. We're modifying items_ (via pop_raw_locked_) which requires exclusive access
529 // 2. We're decrementing to_remove_ which is also modified by other threads
530 // (though all modifications are already under lock)
531 // 3. Other threads read items_ when searching for items to cancel in cancel_item_locked_()
532 // 4. We need a consistent view of items_ and to_remove_ throughout the operation
533 // Without the lock, we could access items_ while another thread is reading it,
534 // leading to race conditions
535 LockGuard guard{this->lock_};
536 while (!this->items_.empty()) {
537 auto &item = this->items_[0];
538 if (!item->remove)
539 break;
540 this->to_remove_--;
541 this->recycle_item_main_loop_(this->pop_raw_locked_());
542 }
543 return this->items_.size();
544}
545std::unique_ptr<Scheduler::SchedulerItem> HOT Scheduler::pop_raw_locked_() {
546 std::pop_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
547
548 // Move the item out before popping - this is the item that was at the front of the heap
549 auto item = std::move(this->items_.back());
550
551 this->items_.pop_back();
552 return item;
553}
554
555// Helper to execute a scheduler item
556uint32_t HOT Scheduler::execute_item_(SchedulerItem *item, uint32_t now) {
557 App.set_current_component(item->component);
558 WarnIfComponentBlockingGuard guard{item->component, now};
559 item->callback();
560 return guard.finish();
561}
562
563// Common implementation for cancel operations
564bool HOT Scheduler::cancel_item_(Component *component, bool is_static_string, const void *name_ptr,
565 SchedulerItem::Type type) {
566 // Get the name as const char*
567 const char *name_cstr = this->get_name_cstr_(is_static_string, name_ptr);
568
569 // obtain lock because this function iterates and can be called from non-loop task context
570 LockGuard guard{this->lock_};
571 return this->cancel_item_locked_(component, name_cstr, type);
572}
573
574// Helper to cancel items by name - must be called with lock held
575bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_cstr, SchedulerItem::Type type,
576 bool match_retry) {
577 // Early return if name is invalid - no items to cancel
578 if (name_cstr == nullptr) {
579 return false;
580 }
581
582 size_t total_cancelled = 0;
583
584 // Check all containers for matching items
585#ifndef ESPHOME_THREAD_SINGLE
586 // Mark items in defer queue as cancelled (they'll be skipped when processed)
587 if (type == SchedulerItem::TIMEOUT) {
588 total_cancelled +=
589 this->mark_matching_items_removed_locked_(this->defer_queue_, component, name_cstr, type, match_retry);
590 }
591#endif /* not ESPHOME_THREAD_SINGLE */
592
593 // Cancel items in the main heap
594 // We only mark items for removal here - never recycle directly.
595 // The main loop may be executing an item's callback right now, and recycling
596 // would destroy the callback while it's running (use-after-free).
597 // Only the main loop in call() should recycle items after execution completes.
598 if (!this->items_.empty()) {
599 size_t heap_cancelled =
600 this->mark_matching_items_removed_locked_(this->items_, component, name_cstr, type, match_retry);
601 total_cancelled += heap_cancelled;
602 this->to_remove_ += heap_cancelled;
603 }
604
605 // Cancel items in to_add_
606 total_cancelled += this->mark_matching_items_removed_locked_(this->to_add_, component, name_cstr, type, match_retry);
607
608 return total_cancelled > 0;
609}
610
611uint64_t Scheduler::millis_64_(uint32_t now) {
612 // THREAD SAFETY NOTE:
613 // This function has three implementations, based on the precompiler flags
614 // - ESPHOME_THREAD_SINGLE - Runs on single-threaded platforms (ESP8266, RP2040, etc.)
615 // - ESPHOME_THREAD_MULTI_NO_ATOMICS - Runs on multi-threaded platforms without atomics (LibreTiny)
616 // - ESPHOME_THREAD_MULTI_ATOMICS - Runs on multi-threaded platforms with atomics (ESP32, HOST, etc.)
617 //
618 // Make sure all changes are synchronized if you edit this function.
619 //
620 // IMPORTANT: Always pass fresh millis() values to this function. The implementation
621 // handles out-of-order timestamps between threads, but minimizing time differences
622 // helps maintain accuracy.
623 //
624
625#ifdef ESPHOME_THREAD_SINGLE
626 // This is the single core implementation.
627 //
628 // Single-core platforms have no concurrency, so this is a simple implementation
629 // that just tracks 32-bit rollover (every 49.7 days) without any locking or atomics.
630
631 uint16_t major = this->millis_major_;
632 uint32_t last = this->last_millis_;
633
634 // Check for rollover
635 if (now < last && (last - now) > HALF_MAX_UINT32) {
636 this->millis_major_++;
637 major++;
638 this->last_millis_ = now;
639#ifdef ESPHOME_DEBUG_SCHEDULER
640 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
641#endif /* ESPHOME_DEBUG_SCHEDULER */
642 } else if (now > last) {
643 // Only update if time moved forward
644 this->last_millis_ = now;
645 }
646
647 // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
648 return now + (static_cast<uint64_t>(major) << 32);
649
650#elif defined(ESPHOME_THREAD_MULTI_NO_ATOMICS)
651 // This is the multi core no atomics implementation.
652 //
653 // Without atomics, this implementation uses locks more aggressively:
654 // 1. Always locks when near the rollover boundary (within 10 seconds)
655 // 2. Always locks when detecting a large backwards jump
656 // 3. Updates without lock in normal forward progression (accepting minor races)
657 // This is less efficient but necessary without atomic operations.
658 uint16_t major = this->millis_major_;
659 uint32_t last = this->last_millis_;
660
661 // Define a safe window around the rollover point (10 seconds)
662 // This covers any reasonable scheduler delays or thread preemption
663 static const uint32_t ROLLOVER_WINDOW = 10000; // 10 seconds in milliseconds
664
665 // Check if we're near the rollover boundary (close to std::numeric_limits<uint32_t>::max() or just past 0)
666 bool near_rollover = (last > (std::numeric_limits<uint32_t>::max() - ROLLOVER_WINDOW)) || (now < ROLLOVER_WINDOW);
667
668 if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) {
669 // Near rollover or detected a rollover - need lock for safety
670 LockGuard guard{this->lock_};
671 // Re-read with lock held
672 last = this->last_millis_;
673
674 if (now < last && (last - now) > HALF_MAX_UINT32) {
675 // True rollover detected (happens every ~49.7 days)
676 this->millis_major_++;
677 major++;
678#ifdef ESPHOME_DEBUG_SCHEDULER
679 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
680#endif /* ESPHOME_DEBUG_SCHEDULER */
681 }
682 // Update last_millis_ while holding lock
683 this->last_millis_ = now;
684 } else if (now > last) {
685 // Normal case: Not near rollover and time moved forward
686 // Update without lock. While this may cause minor races (microseconds of
687 // backwards time movement), they're acceptable because:
688 // 1. The scheduler operates at millisecond resolution, not microsecond
689 // 2. We've already prevented the critical rollover race condition
690 // 3. Any backwards movement is orders of magnitude smaller than scheduler delays
691 this->last_millis_ = now;
692 }
693 // If now <= last and we're not near rollover, don't update
694 // This minimizes backwards time movement
695
696 // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
697 return now + (static_cast<uint64_t>(major) << 32);
698
699#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
700 // This is the multi core with atomics implementation.
701 //
702 // Uses atomic operations with acquire/release semantics to ensure coherent
703 // reads of millis_major_ and last_millis_ across cores. Features:
704 // 1. Epoch-coherency retry loop to handle concurrent updates
705 // 2. Lock only taken for actual rollover detection and update
706 // 3. Lock-free CAS updates for normal forward time progression
707 // 4. Memory ordering ensures cores see consistent time values
708
709 for (;;) {
710 uint16_t major = this->millis_major_.load(std::memory_order_acquire);
711
712 /*
713 * Acquire so that if we later decide **not** to take the lock we still
714 * observe a `millis_major_` value coherent with the loaded `last_millis_`.
715 * The acquire load ensures any later read of `millis_major_` sees its
716 * corresponding increment.
717 */
718 uint32_t last = this->last_millis_.load(std::memory_order_acquire);
719
720 // If we might be near a rollover (large backwards jump), take the lock for the entire operation
721 // This ensures rollover detection and last_millis_ update are atomic together
722 if (now < last && (last - now) > HALF_MAX_UINT32) {
723 // Potential rollover - need lock for atomic rollover detection + update
724 LockGuard guard{this->lock_};
725 // Re-read with lock held; mutex already provides ordering
726 last = this->last_millis_.load(std::memory_order_relaxed);
727
728 if (now < last && (last - now) > HALF_MAX_UINT32) {
729 // True rollover detected (happens every ~49.7 days)
730 this->millis_major_.fetch_add(1, std::memory_order_relaxed);
731 major++;
732#ifdef ESPHOME_DEBUG_SCHEDULER
733 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
734#endif /* ESPHOME_DEBUG_SCHEDULER */
735 }
736 /*
737 * Update last_millis_ while holding the lock to prevent races
738 * Publish the new low-word *after* bumping `millis_major_` (done above)
739 * so readers never see a mismatched pair.
740 */
741 this->last_millis_.store(now, std::memory_order_release);
742 } else {
743 // Normal case: Try lock-free update, but only allow forward movement within same epoch
744 // This prevents accidentally moving backwards across a rollover boundary
745 while (now > last && (now - last) < HALF_MAX_UINT32) {
746 if (this->last_millis_.compare_exchange_weak(last, now,
747 std::memory_order_release, // success
748 std::memory_order_relaxed)) { // failure
749 break;
750 }
751 // CAS failure means no data was published; relaxed is fine
752 // last is automatically updated by compare_exchange_weak if it fails
753 }
754 }
755 uint16_t major_end = this->millis_major_.load(std::memory_order_relaxed);
756 if (major_end == major)
757 return now + (static_cast<uint64_t>(major) << 32);
758 }
759 // Unreachable - the loop always returns when major_end == major
760 __builtin_unreachable();
761
762#else
763#error \
764 "No platform threading model defined. One of ESPHOME_THREAD_SINGLE, ESPHOME_THREAD_MULTI_NO_ATOMICS, or ESPHOME_THREAD_MULTI_ATOMICS must be defined."
765#endif
766}
767
768bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a,
769 const std::unique_ptr<SchedulerItem> &b) {
770 // High bits are almost always equal (change only on 32-bit rollover ~49 days)
771 // Optimize for common case: check low bits first when high bits are equal
772 return (a->next_execution_high_ == b->next_execution_high_) ? (a->next_execution_low_ > b->next_execution_low_)
773 : (a->next_execution_high_ > b->next_execution_high_);
774}
775
776// Recycle a SchedulerItem back to the pool for reuse.
777// IMPORTANT: Caller must hold the scheduler lock before calling this function.
778// This protects scheduler_item_pool_ from concurrent access by other threads
779// that may be acquiring items from the pool in set_timer_common_().
780void Scheduler::recycle_item_main_loop_(std::unique_ptr<SchedulerItem> item) {
781 if (!item)
782 return;
783
784 if (this->scheduler_item_pool_.size() < MAX_POOL_SIZE) {
785 // Clear callback to release captured resources
786 item->callback = nullptr;
787 // Clear dynamic name if any
788 item->clear_dynamic_name();
789 this->scheduler_item_pool_.push_back(std::move(item));
790#ifdef ESPHOME_DEBUG_SCHEDULER
791 ESP_LOGD(TAG, "Recycled item to pool (pool size now: %zu)", this->scheduler_item_pool_.size());
792#endif
793 } else {
794#ifdef ESPHOME_DEBUG_SCHEDULER
795 ESP_LOGD(TAG, "Pool full (size: %zu), deleting item", this->scheduler_item_pool_.size());
796#endif
797 }
798 // else: unique_ptr will delete the item when it goes out of scope
799}
800
801#ifdef ESPHOME_DEBUG_SCHEDULER
802void Scheduler::debug_log_timer_(const SchedulerItem *item, bool is_static_string, const char *name_cstr,
803 SchedulerItem::Type type, uint32_t delay, uint64_t now) {
804 // Validate static strings in debug mode
805 if (is_static_string && name_cstr != nullptr) {
806 validate_static_string(name_cstr);
807 }
808
809 // Debug logging
810 const char *type_str = (type == SchedulerItem::TIMEOUT) ? "timeout" : "interval";
811 if (type == SchedulerItem::TIMEOUT) {
812 ESP_LOGD(TAG, "set_%s(name='%s/%s', %s=%" PRIu32 ")", type_str, LOG_STR_ARG(item->get_source()),
813 name_cstr ? name_cstr : "(null)", type_str, delay);
814 } else {
815 ESP_LOGD(TAG, "set_%s(name='%s/%s', %s=%" PRIu32 ", offset=%" PRIu32 ")", type_str, LOG_STR_ARG(item->get_source()),
816 name_cstr ? name_cstr : "(null)", type_str, delay,
817 static_cast<uint32_t>(item->get_next_execution() - now));
818 }
819}
820#endif /* ESPHOME_DEBUG_SCHEDULER */
821
822} // namespace esphome
void set_current_component(Component *component)
const Component * component
Definition component.cpp:37
uint16_t type
const char *const TAG
Definition spi.cpp:7
Providing packet encoding functions for exchanging data with a remote host.
Definition a01nyub.cpp:7
float random_float()
Return a random float between 0 and 1.
Definition helpers.cpp:158
void retry_handler(const std::shared_ptr< RetryArgs > &args)
std::string size_t len
Definition helpers.h:533
void IRAM_ATTR HOT delay(uint32_t ms)
Definition core.cpp:26
uint32_t IRAM_ATTR HOT millis()
Definition core.cpp:25
Application App
Global storage of Application pointer - only one Application can exist.