From a5f5af9596d83dbd3518fd4720ddc93d1a24b8c8 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Sat, 19 Jul 2025 10:36:49 -1000 Subject: [PATCH] make more readable --- esphome/core/scheduler.cpp | 237 ++++++++++++++++--------------------- esphome/core/scheduler.h | 4 +- 2 files changed, 103 insertions(+), 138 deletions(-) diff --git a/esphome/core/scheduler.cpp b/esphome/core/scheduler.cpp index ee4ec3818c..317426b51e 100644 --- a/esphome/core/scheduler.cpp +++ b/esphome/core/scheduler.cpp @@ -494,23 +494,23 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c return total_cancelled > 0; } -#ifdef ESPHOME_SINGLE_CORE - uint64_t Scheduler::millis_64_(uint32_t now) { // THREAD SAFETY NOTE: - // This function has three implemenations, based on the precompiler flags - // - ESPHOME_SINGLE_CORE - // - ESPHOME_MULTI_CORE_NO_ATOMICS - // - ESPHOME_MULTI_CORE_ATOMICS + // This function has three implementations, based on the precompiler flags + // - ESPHOME_SINGLE_CORE - Runs on single-core platforms (ESP8266, RP2040, etc.) + // - ESPHOME_MULTI_CORE_NO_ATOMICS - Runs on multi-core platforms without atomics (LibreTiny) + // - ESPHOME_MULTI_CORE_ATOMICS - Runs on multi-core platforms with atomics (ESP32, HOST, etc.) // - // Make sure all changes are synchronous if you edit this function. - // - // This is the single core implementation. + // Make sure all changes are synchronized if you edit this function. // // IMPORTANT: Always pass fresh millis() values to this function. The implementation // handles out-of-order timestamps between threads, but minimizing time differences // helps maintain accuracy. // + +#ifdef ESPHOME_SINGLE_CORE + // This is the single core implementation. + // // The implementation handles the 32-bit rollover (every 49.7 days) by: // 1. Using a lock when detecting rollover to ensure atomic update // 2. Restricting normal updates to forward movement within the same epoch @@ -539,158 +539,121 @@ uint64_t Scheduler::millis_64_(uint32_t now) { // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time return now + (static_cast(major) << 32); } - -#endif +#endif // ESPHOME_SINGLE_CORE #ifdef ESPHOME_MULTI_CORE_NO_ATOMICS +// This is the multi core no atomics implementation. +// +// The implementation handles the 32-bit rollover (every 49.7 days) by: +// 1. Using a lock when detecting rollover to ensure atomic update +// 2. Restricting normal updates to forward movement within the same epoch +// This prevents race conditions at the rollover boundary without requiring +// 64-bit atomics or locking on every call. -uint64_t Scheduler::millis_64_(uint32_t now) { - // THREAD SAFETY NOTE: - // This function has three implemenations, based on the precompiler flags - // - ESPHOME_SINGLE_CORE - // - ESPHOME_MULTI_CORE_NO_ATOMICS - // - ESPHOME_MULTI_CORE_ATOMICS - // - // Make sure all changes are synchronous if you edit this function. - // - // This is the multi core no atomics implementation. - // - // IMPORTANT: Always pass fresh millis() values to this function. The implementation - // handles out-of-order timestamps between threads, but minimizing time differences - // helps maintain accuracy. - // - // The implementation handles the 32-bit rollover (every 49.7 days) by: - // 1. Using a lock when detecting rollover to ensure atomic update - // 2. Restricting normal updates to forward movement within the same epoch - // This prevents race conditions at the rollover boundary without requiring - // 64-bit atomics or locking on every call. +uint16_t major = this->millis_major_; +uint32_t last = this->last_millis_; - uint16_t major = this->millis_major_; +// Define a safe window around the rollover point (10 seconds) +// This covers any reasonable scheduler delays or thread preemption +static const uint32_t ROLLOVER_WINDOW = 10000; // 10 seconds in milliseconds - // LibreTiny: Multi-threaded but lacks atomic operation support - // TODO: If LibreTiny ever adds atomic support, remove this entire block and - // let it fall through to the atomic-based implementation below - // We need to use a lock when near the rollover boundary to prevent races - uint32_t last = this->last_millis_; +// Check if we're near the rollover boundary (close to std::numeric_limits::max() or just past 0) +bool near_rollover = (last > (std::numeric_limits::max() - ROLLOVER_WINDOW)) || (now < ROLLOVER_WINDOW); - // Define a safe window around the rollover point (10 seconds) - // This covers any reasonable scheduler delays or thread preemption - static const uint32_t ROLLOVER_WINDOW = 10000; // 10 seconds in milliseconds +if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) { + // Near rollover or detected a rollover - need lock for safety + LockGuard guard{this->lock_}; + // Re-read with lock held + last = this->last_millis_; - // Check if we're near the rollover boundary (close to std::numeric_limits::max() or just past 0) - bool near_rollover = (last > (std::numeric_limits::max() - ROLLOVER_WINDOW)) || (now < ROLLOVER_WINDOW); + if (now < last && (last - now) > HALF_MAX_UINT32) { + // True rollover detected (happens every ~49.7 days) + this->millis_major_++; + major++; +#ifdef ESPHOME_DEBUG_SCHEDULER + ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last); +#endif /* ESPHOME_DEBUG_SCHEDULER */ + } + // Update last_millis_ while holding lock + this->last_millis_ = now; +} else if (now > last) { + // Normal case: Not near rollover and time moved forward + // Update without lock. While this may cause minor races (microseconds of + // backwards time movement), they're acceptable because: + // 1. The scheduler operates at millisecond resolution, not microsecond + // 2. We've already prevented the critical rollover race condition + // 3. Any backwards movement is orders of magnitude smaller than scheduler delays + this->last_millis_ = now; +} +// If now <= last and we're not near rollover, don't update +// This minimizes backwards time movement - if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) { - // Near rollover or detected a rollover - need lock for safety +// Combine major (high 32 bits) and now (low 32 bits) into 64-bit time +return now + (static_cast(major) << 32); +#endif // ESPHOME_MULTI_CORE_NO_ATOMICS + +#ifdef ESPHOME_MULTI_CORE_ATOMICS +// This is the multi core with atomics implementation. +// +// The implementation handles the 32-bit rollover (every 49.7 days) by: +// 1. Using a lock when detecting rollover to ensure atomic update +// 2. Restricting normal updates to forward movement within the same epoch +// This prevents race conditions at the rollover boundary without requiring +// 64-bit atomics or locking on every call. + +for (;;) { + uint16_t major = this->millis_major_.load(std::memory_order_acquire); + + /* + * Acquire so that if we later decide **not** to take the lock we still + * observe a `millis_major_` value coherent with the loaded `last_millis_`. + * The acquire load ensures any later read of `millis_major_` sees its + * corresponding increment. + */ + uint32_t last = this->last_millis_.load(std::memory_order_acquire); + + // If we might be near a rollover (large backwards jump), take the lock for the entire operation + // This ensures rollover detection and last_millis_ update are atomic together + if (now < last && (last - now) > HALF_MAX_UINT32) { + // Potential rollover - need lock for atomic rollover detection + update LockGuard guard{this->lock_}; - // Re-read with lock held - last = this->last_millis_; + // Re-read with lock held; mutex already provides ordering + last = this->last_millis_.load(std::memory_order_relaxed); if (now < last && (last - now) > HALF_MAX_UINT32) { // True rollover detected (happens every ~49.7 days) - this->millis_major_++; + this->millis_major_.fetch_add(1, std::memory_order_relaxed); major++; #ifdef ESPHOME_DEBUG_SCHEDULER ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last); #endif /* ESPHOME_DEBUG_SCHEDULER */ } - // Update last_millis_ while holding lock - this->last_millis_ = now; - } else if (now > last) { - // Normal case: Not near rollover and time moved forward - // Update without lock. While this may cause minor races (microseconds of - // backwards time movement), they're acceptable because: - // 1. The scheduler operates at millisecond resolution, not microsecond - // 2. We've already prevented the critical rollover race condition - // 3. Any backwards movement is orders of magnitude smaller than scheduler delays - this->last_millis_ = now; - } - // If now <= last and we're not near rollover, don't update - // This minimizes backwards time movement - - // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time - return now + (static_cast(major) << 32); -} - -#endif - -#ifdef ESPHOME_MULTI_CORE_ATOMICS - -uint64_t Scheduler::millis_64_(uint32_t now) { - // THREAD SAFETY NOTE: - // This function has three implemenations, based on the precompiler flags - // - ESPHOME_SINGLE_CORE - // - ESPHOME_MULTI_CORE_NO_ATOMICS - // - ESPHOME_MULTI_CORE_ATOMICS - // - // Make sure all changes are synchronous if you edit this function. - // - // This is the multi core with atomics implementation. - // - // IMPORTANT: Always pass fresh millis() values to this function. The implementation - // handles out-of-order timestamps between threads, but minimizing time differences - // helps maintain accuracy. - // - // The implementation handles the 32-bit rollover (every 49.7 days) by: - // 1. Using a lock when detecting rollover to ensure atomic update - // 2. Restricting normal updates to forward movement within the same epoch - // This prevents race conditions at the rollover boundary without requiring - // 64-bit atomics or locking on every call. - - for (;;) { - uint16_t major = this->millis_major_.load(std::memory_order_acquire); - /* - * Multi-threaded platforms with atomic support (ESP32) - * Acquire so that if we later decide **not** to take the lock we still - * observe a `millis_major_` value coherent with the loaded `last_millis_`. - * The acquire load ensures any later read of `millis_major_` sees its - * corresponding increment. + * Update last_millis_ while holding the lock to prevent races + * Publish the new low-word *after* bumping `millis_major_` (done above) + * so readers never see a mismatched pair. */ - uint32_t last = this->last_millis_.load(std::memory_order_acquire); - - // If we might be near a rollover (large backwards jump), take the lock for the entire operation - // This ensures rollover detection and last_millis_ update are atomic together - if (now < last && (last - now) > HALF_MAX_UINT32) { - // Potential rollover - need lock for atomic rollover detection + update - LockGuard guard{this->lock_}; - // Re-read with lock held; mutex already provides ordering - last = this->last_millis_.load(std::memory_order_relaxed); - - if (now < last && (last - now) > HALF_MAX_UINT32) { - // True rollover detected (happens every ~49.7 days) - this->millis_major_.fetch_add(1, std::memory_order_relaxed); - major++; -#ifdef ESPHOME_DEBUG_SCHEDULER - ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last); -#endif /* ESPHOME_DEBUG_SCHEDULER */ - } - /* - * Update last_millis_ while holding the lock to prevent races - * Publish the new low-word *after* bumping `millis_major_` (done above) - * so readers never see a mismatched pair. - */ - this->last_millis_.store(now, std::memory_order_release); - } else { - // Normal case: Try lock-free update, but only allow forward movement within same epoch - // This prevents accidentally moving backwards across a rollover boundary - while (now > last && (now - last) < HALF_MAX_UINT32) { - if (this->last_millis_.compare_exchange_weak(last, now, - std::memory_order_release, // success - std::memory_order_relaxed)) { // failure - break; - } - // CAS failure means no data was published; relaxed is fine - // last is automatically updated by compare_exchange_weak if it fails + this->last_millis_.store(now, std::memory_order_release); + } else { + // Normal case: Try lock-free update, but only allow forward movement within same epoch + // This prevents accidentally moving backwards across a rollover boundary + while (now > last && (now - last) < HALF_MAX_UINT32) { + if (this->last_millis_.compare_exchange_weak(last, now, + std::memory_order_release, // success + std::memory_order_relaxed)) { // failure + break; } + // CAS failure means no data was published; relaxed is fine + // last is automatically updated by compare_exchange_weak if it fails } - uint16_t major_end = this->millis_major_.load(std::memory_order_relaxed); - if (major_end == major) - return now + (static_cast(major) << 32); } + uint16_t major_end = this->millis_major_.load(std::memory_order_relaxed); + if (major_end == major) + return now + (static_cast(major) << 32); } +#endif // ESPHOME_MULTI_CORE_ATOMICS -#endif +} bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr &a, const std::unique_ptr &b) { diff --git a/esphome/core/scheduler.h b/esphome/core/scheduler.h index 0e3bca22a6..c9c2008718 100644 --- a/esphome/core/scheduler.h +++ b/esphome/core/scheduler.h @@ -209,6 +209,8 @@ class Scheduler { // Single-core platforms don't need the defer queue and save 40 bytes of RAM std::deque> defer_queue_; // FIFO queue for defer() calls #endif /* ESPHOME_SINGLE_CORE */ + uint32_t to_remove_{0}; + #ifdef ESPHOME_MULTI_CORE_ATOMICS /* * Multi-threaded platforms with atomic support: last_millis_ needs atomic for lock-free updates @@ -225,6 +227,7 @@ class Scheduler { // Platforms without atomic support or single-threaded platforms uint32_t last_millis_{0}; #endif /* else ESPHOME_MULTI_CORE_ATOMICS */ + /* * Upper 16 bits of the 64-bit millis counter. Incremented only while holding * `lock_`; read concurrently. Atomic (relaxed) avoids a formal data race. @@ -236,7 +239,6 @@ class Scheduler { #else /* not ESPHOME_MULTI_CORE_ATOMICS */ uint16_t millis_major_{0}; #endif /* else ESPHOME_MULTI_CORE_ATOMICS */ - uint32_t to_remove_{0}; }; } // namespace esphome