make more readable

This commit is contained in:
J. Nick Koston 2025-07-19 10:36:49 -10:00
parent fde80bc530
commit a5f5af9596
No known key found for this signature in database
2 changed files with 103 additions and 138 deletions

View File

@ -494,23 +494,23 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c
return total_cancelled > 0;
}
#ifdef ESPHOME_SINGLE_CORE
uint64_t Scheduler::millis_64_(uint32_t now) {
// THREAD SAFETY NOTE:
// This function has three implemenations, based on the precompiler flags
// - ESPHOME_SINGLE_CORE
// - ESPHOME_MULTI_CORE_NO_ATOMICS
// - ESPHOME_MULTI_CORE_ATOMICS
// This function has three implementations, based on the precompiler flags
// - ESPHOME_SINGLE_CORE - Runs on single-core platforms (ESP8266, RP2040, etc.)
// - ESPHOME_MULTI_CORE_NO_ATOMICS - Runs on multi-core platforms without atomics (LibreTiny)
// - ESPHOME_MULTI_CORE_ATOMICS - Runs on multi-core platforms with atomics (ESP32, HOST, etc.)
//
// Make sure all changes are synchronous if you edit this function.
//
// This is the single core implementation.
// Make sure all changes are synchronized if you edit this function.
//
// IMPORTANT: Always pass fresh millis() values to this function. The implementation
// handles out-of-order timestamps between threads, but minimizing time differences
// helps maintain accuracy.
//
#ifdef ESPHOME_SINGLE_CORE
// This is the single core implementation.
//
// The implementation handles the 32-bit rollover (every 49.7 days) by:
// 1. Using a lock when detecting rollover to ensure atomic update
// 2. Restricting normal updates to forward movement within the same epoch
@ -539,158 +539,121 @@ uint64_t Scheduler::millis_64_(uint32_t now) {
// Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
return now + (static_cast<uint64_t>(major) << 32);
}
#endif
#endif // ESPHOME_SINGLE_CORE
#ifdef ESPHOME_MULTI_CORE_NO_ATOMICS
// This is the multi core no atomics implementation.
//
// The implementation handles the 32-bit rollover (every 49.7 days) by:
// 1. Using a lock when detecting rollover to ensure atomic update
// 2. Restricting normal updates to forward movement within the same epoch
// This prevents race conditions at the rollover boundary without requiring
// 64-bit atomics or locking on every call.
uint64_t Scheduler::millis_64_(uint32_t now) {
// THREAD SAFETY NOTE:
// This function has three implemenations, based on the precompiler flags
// - ESPHOME_SINGLE_CORE
// - ESPHOME_MULTI_CORE_NO_ATOMICS
// - ESPHOME_MULTI_CORE_ATOMICS
//
// Make sure all changes are synchronous if you edit this function.
//
// This is the multi core no atomics implementation.
//
// IMPORTANT: Always pass fresh millis() values to this function. The implementation
// handles out-of-order timestamps between threads, but minimizing time differences
// helps maintain accuracy.
//
// The implementation handles the 32-bit rollover (every 49.7 days) by:
// 1. Using a lock when detecting rollover to ensure atomic update
// 2. Restricting normal updates to forward movement within the same epoch
// This prevents race conditions at the rollover boundary without requiring
// 64-bit atomics or locking on every call.
uint16_t major = this->millis_major_;
uint32_t last = this->last_millis_;
uint16_t major = this->millis_major_;
// Define a safe window around the rollover point (10 seconds)
// This covers any reasonable scheduler delays or thread preemption
static const uint32_t ROLLOVER_WINDOW = 10000; // 10 seconds in milliseconds
// LibreTiny: Multi-threaded but lacks atomic operation support
// TODO: If LibreTiny ever adds atomic support, remove this entire block and
// let it fall through to the atomic-based implementation below
// We need to use a lock when near the rollover boundary to prevent races
uint32_t last = this->last_millis_;
// Check if we're near the rollover boundary (close to std::numeric_limits<uint32_t>::max() or just past 0)
bool near_rollover = (last > (std::numeric_limits<uint32_t>::max() - ROLLOVER_WINDOW)) || (now < ROLLOVER_WINDOW);
// Define a safe window around the rollover point (10 seconds)
// This covers any reasonable scheduler delays or thread preemption
static const uint32_t ROLLOVER_WINDOW = 10000; // 10 seconds in milliseconds
if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) {
// Near rollover or detected a rollover - need lock for safety
LockGuard guard{this->lock_};
// Re-read with lock held
last = this->last_millis_;
// Check if we're near the rollover boundary (close to std::numeric_limits<uint32_t>::max() or just past 0)
bool near_rollover = (last > (std::numeric_limits<uint32_t>::max() - ROLLOVER_WINDOW)) || (now < ROLLOVER_WINDOW);
if (now < last && (last - now) > HALF_MAX_UINT32) {
// True rollover detected (happens every ~49.7 days)
this->millis_major_++;
major++;
#ifdef ESPHOME_DEBUG_SCHEDULER
ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
#endif /* ESPHOME_DEBUG_SCHEDULER */
}
// Update last_millis_ while holding lock
this->last_millis_ = now;
} else if (now > last) {
// Normal case: Not near rollover and time moved forward
// Update without lock. While this may cause minor races (microseconds of
// backwards time movement), they're acceptable because:
// 1. The scheduler operates at millisecond resolution, not microsecond
// 2. We've already prevented the critical rollover race condition
// 3. Any backwards movement is orders of magnitude smaller than scheduler delays
this->last_millis_ = now;
}
// If now <= last and we're not near rollover, don't update
// This minimizes backwards time movement
if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) {
// Near rollover or detected a rollover - need lock for safety
// Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
return now + (static_cast<uint64_t>(major) << 32);
#endif // ESPHOME_MULTI_CORE_NO_ATOMICS
#ifdef ESPHOME_MULTI_CORE_ATOMICS
// This is the multi core with atomics implementation.
//
// The implementation handles the 32-bit rollover (every 49.7 days) by:
// 1. Using a lock when detecting rollover to ensure atomic update
// 2. Restricting normal updates to forward movement within the same epoch
// This prevents race conditions at the rollover boundary without requiring
// 64-bit atomics or locking on every call.
for (;;) {
uint16_t major = this->millis_major_.load(std::memory_order_acquire);
/*
* Acquire so that if we later decide **not** to take the lock we still
* observe a `millis_major_` value coherent with the loaded `last_millis_`.
* The acquire load ensures any later read of `millis_major_` sees its
* corresponding increment.
*/
uint32_t last = this->last_millis_.load(std::memory_order_acquire);
// If we might be near a rollover (large backwards jump), take the lock for the entire operation
// This ensures rollover detection and last_millis_ update are atomic together
if (now < last && (last - now) > HALF_MAX_UINT32) {
// Potential rollover - need lock for atomic rollover detection + update
LockGuard guard{this->lock_};
// Re-read with lock held
last = this->last_millis_;
// Re-read with lock held; mutex already provides ordering
last = this->last_millis_.load(std::memory_order_relaxed);
if (now < last && (last - now) > HALF_MAX_UINT32) {
// True rollover detected (happens every ~49.7 days)
this->millis_major_++;
this->millis_major_.fetch_add(1, std::memory_order_relaxed);
major++;
#ifdef ESPHOME_DEBUG_SCHEDULER
ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
#endif /* ESPHOME_DEBUG_SCHEDULER */
}
// Update last_millis_ while holding lock
this->last_millis_ = now;
} else if (now > last) {
// Normal case: Not near rollover and time moved forward
// Update without lock. While this may cause minor races (microseconds of
// backwards time movement), they're acceptable because:
// 1. The scheduler operates at millisecond resolution, not microsecond
// 2. We've already prevented the critical rollover race condition
// 3. Any backwards movement is orders of magnitude smaller than scheduler delays
this->last_millis_ = now;
}
// If now <= last and we're not near rollover, don't update
// This minimizes backwards time movement
// Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
return now + (static_cast<uint64_t>(major) << 32);
}
#endif
#ifdef ESPHOME_MULTI_CORE_ATOMICS
uint64_t Scheduler::millis_64_(uint32_t now) {
// THREAD SAFETY NOTE:
// This function has three implemenations, based on the precompiler flags
// - ESPHOME_SINGLE_CORE
// - ESPHOME_MULTI_CORE_NO_ATOMICS
// - ESPHOME_MULTI_CORE_ATOMICS
//
// Make sure all changes are synchronous if you edit this function.
//
// This is the multi core with atomics implementation.
//
// IMPORTANT: Always pass fresh millis() values to this function. The implementation
// handles out-of-order timestamps between threads, but minimizing time differences
// helps maintain accuracy.
//
// The implementation handles the 32-bit rollover (every 49.7 days) by:
// 1. Using a lock when detecting rollover to ensure atomic update
// 2. Restricting normal updates to forward movement within the same epoch
// This prevents race conditions at the rollover boundary without requiring
// 64-bit atomics or locking on every call.
for (;;) {
uint16_t major = this->millis_major_.load(std::memory_order_acquire);
/*
* Multi-threaded platforms with atomic support (ESP32)
* Acquire so that if we later decide **not** to take the lock we still
* observe a `millis_major_` value coherent with the loaded `last_millis_`.
* The acquire load ensures any later read of `millis_major_` sees its
* corresponding increment.
* Update last_millis_ while holding the lock to prevent races
* Publish the new low-word *after* bumping `millis_major_` (done above)
* so readers never see a mismatched pair.
*/
uint32_t last = this->last_millis_.load(std::memory_order_acquire);
// If we might be near a rollover (large backwards jump), take the lock for the entire operation
// This ensures rollover detection and last_millis_ update are atomic together
if (now < last && (last - now) > HALF_MAX_UINT32) {
// Potential rollover - need lock for atomic rollover detection + update
LockGuard guard{this->lock_};
// Re-read with lock held; mutex already provides ordering
last = this->last_millis_.load(std::memory_order_relaxed);
if (now < last && (last - now) > HALF_MAX_UINT32) {
// True rollover detected (happens every ~49.7 days)
this->millis_major_.fetch_add(1, std::memory_order_relaxed);
major++;
#ifdef ESPHOME_DEBUG_SCHEDULER
ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
#endif /* ESPHOME_DEBUG_SCHEDULER */
}
/*
* Update last_millis_ while holding the lock to prevent races
* Publish the new low-word *after* bumping `millis_major_` (done above)
* so readers never see a mismatched pair.
*/
this->last_millis_.store(now, std::memory_order_release);
} else {
// Normal case: Try lock-free update, but only allow forward movement within same epoch
// This prevents accidentally moving backwards across a rollover boundary
while (now > last && (now - last) < HALF_MAX_UINT32) {
if (this->last_millis_.compare_exchange_weak(last, now,
std::memory_order_release, // success
std::memory_order_relaxed)) { // failure
break;
}
// CAS failure means no data was published; relaxed is fine
// last is automatically updated by compare_exchange_weak if it fails
this->last_millis_.store(now, std::memory_order_release);
} else {
// Normal case: Try lock-free update, but only allow forward movement within same epoch
// This prevents accidentally moving backwards across a rollover boundary
while (now > last && (now - last) < HALF_MAX_UINT32) {
if (this->last_millis_.compare_exchange_weak(last, now,
std::memory_order_release, // success
std::memory_order_relaxed)) { // failure
break;
}
// CAS failure means no data was published; relaxed is fine
// last is automatically updated by compare_exchange_weak if it fails
}
uint16_t major_end = this->millis_major_.load(std::memory_order_relaxed);
if (major_end == major)
return now + (static_cast<uint64_t>(major) << 32);
}
uint16_t major_end = this->millis_major_.load(std::memory_order_relaxed);
if (major_end == major)
return now + (static_cast<uint64_t>(major) << 32);
}
#endif // ESPHOME_MULTI_CORE_ATOMICS
#endif
}
bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a,
const std::unique_ptr<SchedulerItem> &b) {

View File

@ -209,6 +209,8 @@ class Scheduler {
// Single-core platforms don't need the defer queue and save 40 bytes of RAM
std::deque<std::unique_ptr<SchedulerItem>> defer_queue_; // FIFO queue for defer() calls
#endif /* ESPHOME_SINGLE_CORE */
uint32_t to_remove_{0};
#ifdef ESPHOME_MULTI_CORE_ATOMICS
/*
* Multi-threaded platforms with atomic support: last_millis_ needs atomic for lock-free updates
@ -225,6 +227,7 @@ class Scheduler {
// Platforms without atomic support or single-threaded platforms
uint32_t last_millis_{0};
#endif /* else ESPHOME_MULTI_CORE_ATOMICS */
/*
* Upper 16 bits of the 64-bit millis counter. Incremented only while holding
* `lock_`; read concurrently. Atomic (relaxed) avoids a formal data race.
@ -236,7 +239,6 @@ class Scheduler {
#else /* not ESPHOME_MULTI_CORE_ATOMICS */
uint16_t millis_major_{0};
#endif /* else ESPHOME_MULTI_CORE_ATOMICS */
uint32_t to_remove_{0};
};
} // namespace esphome