mirror of
https://github.com/esphome/esphome.git
synced 2025-08-02 00:17:48 +00:00
make more readable
This commit is contained in:
parent
a5f5af9596
commit
58696961bd
@ -511,15 +511,10 @@ uint64_t Scheduler::millis_64_(uint32_t now) {
|
|||||||
#ifdef ESPHOME_SINGLE_CORE
|
#ifdef ESPHOME_SINGLE_CORE
|
||||||
// This is the single core implementation.
|
// This is the single core implementation.
|
||||||
//
|
//
|
||||||
// The implementation handles the 32-bit rollover (every 49.7 days) by:
|
// Single-core platforms have no concurrency, so this is a simple implementation
|
||||||
// 1. Using a lock when detecting rollover to ensure atomic update
|
// that just tracks 32-bit rollover (every 49.7 days) without any locking or atomics.
|
||||||
// 2. Restricting normal updates to forward movement within the same epoch
|
|
||||||
// This prevents race conditions at the rollover boundary without requiring
|
|
||||||
// 64-bit atomics or locking on every call.
|
|
||||||
|
|
||||||
uint16_t major = this->millis_major_;
|
uint16_t major = this->millis_major_;
|
||||||
|
|
||||||
// Single-core platforms: No atomics needed
|
|
||||||
uint32_t last = this->last_millis_;
|
uint32_t last = this->last_millis_;
|
||||||
|
|
||||||
// Check for rollover
|
// Check for rollover
|
||||||
@ -538,29 +533,28 @@ uint64_t Scheduler::millis_64_(uint32_t now) {
|
|||||||
|
|
||||||
// Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
|
// Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
|
||||||
return now + (static_cast<uint64_t>(major) << 32);
|
return now + (static_cast<uint64_t>(major) << 32);
|
||||||
}
|
|
||||||
#endif // ESPHOME_SINGLE_CORE
|
#endif // ESPHOME_SINGLE_CORE
|
||||||
|
|
||||||
#ifdef ESPHOME_MULTI_CORE_NO_ATOMICS
|
#ifdef ESPHOME_MULTI_CORE_NO_ATOMICS
|
||||||
// This is the multi core no atomics implementation.
|
// This is the multi core no atomics implementation.
|
||||||
//
|
//
|
||||||
// The implementation handles the 32-bit rollover (every 49.7 days) by:
|
// The implementation handles the 32-bit rollover (every 49.7 days) by:
|
||||||
// 1. Using a lock when detecting rollover to ensure atomic update
|
// 1. Using a lock when detecting rollover to ensure atomic update
|
||||||
// 2. Restricting normal updates to forward movement within the same epoch
|
// 2. Restricting normal updates to forward movement within the same epoch
|
||||||
// This prevents race conditions at the rollover boundary without requiring
|
// This prevents race conditions at the rollover boundary without requiring
|
||||||
// 64-bit atomics or locking on every call.
|
// 64-bit atomics or locking on every call.
|
||||||
|
|
||||||
uint16_t major = this->millis_major_;
|
uint16_t major = this->millis_major_;
|
||||||
uint32_t last = this->last_millis_;
|
uint32_t last = this->last_millis_;
|
||||||
|
|
||||||
// Define a safe window around the rollover point (10 seconds)
|
// Define a safe window around the rollover point (10 seconds)
|
||||||
// This covers any reasonable scheduler delays or thread preemption
|
// This covers any reasonable scheduler delays or thread preemption
|
||||||
static const uint32_t ROLLOVER_WINDOW = 10000; // 10 seconds in milliseconds
|
static const uint32_t ROLLOVER_WINDOW = 10000; // 10 seconds in milliseconds
|
||||||
|
|
||||||
// Check if we're near the rollover boundary (close to std::numeric_limits<uint32_t>::max() or just past 0)
|
// Check if we're near the rollover boundary (close to std::numeric_limits<uint32_t>::max() or just past 0)
|
||||||
bool near_rollover = (last > (std::numeric_limits<uint32_t>::max() - ROLLOVER_WINDOW)) || (now < ROLLOVER_WINDOW);
|
bool near_rollover = (last > (std::numeric_limits<uint32_t>::max() - ROLLOVER_WINDOW)) || (now < ROLLOVER_WINDOW);
|
||||||
|
|
||||||
if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) {
|
if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) {
|
||||||
// Near rollover or detected a rollover - need lock for safety
|
// Near rollover or detected a rollover - need lock for safety
|
||||||
LockGuard guard{this->lock_};
|
LockGuard guard{this->lock_};
|
||||||
// Re-read with lock held
|
// Re-read with lock held
|
||||||
@ -576,7 +570,7 @@ if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) {
|
|||||||
}
|
}
|
||||||
// Update last_millis_ while holding lock
|
// Update last_millis_ while holding lock
|
||||||
this->last_millis_ = now;
|
this->last_millis_ = now;
|
||||||
} else if (now > last) {
|
} else if (now > last) {
|
||||||
// Normal case: Not near rollover and time moved forward
|
// Normal case: Not near rollover and time moved forward
|
||||||
// Update without lock. While this may cause minor races (microseconds of
|
// Update without lock. While this may cause minor races (microseconds of
|
||||||
// backwards time movement), they're acceptable because:
|
// backwards time movement), they're acceptable because:
|
||||||
@ -584,24 +578,24 @@ if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) {
|
|||||||
// 2. We've already prevented the critical rollover race condition
|
// 2. We've already prevented the critical rollover race condition
|
||||||
// 3. Any backwards movement is orders of magnitude smaller than scheduler delays
|
// 3. Any backwards movement is orders of magnitude smaller than scheduler delays
|
||||||
this->last_millis_ = now;
|
this->last_millis_ = now;
|
||||||
}
|
}
|
||||||
// If now <= last and we're not near rollover, don't update
|
// If now <= last and we're not near rollover, don't update
|
||||||
// This minimizes backwards time movement
|
// This minimizes backwards time movement
|
||||||
|
|
||||||
// Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
|
// Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
|
||||||
return now + (static_cast<uint64_t>(major) << 32);
|
return now + (static_cast<uint64_t>(major) << 32);
|
||||||
#endif // ESPHOME_MULTI_CORE_NO_ATOMICS
|
#endif // ESPHOME_MULTI_CORE_NO_ATOMICS
|
||||||
|
|
||||||
#ifdef ESPHOME_MULTI_CORE_ATOMICS
|
#ifdef ESPHOME_MULTI_CORE_ATOMICS
|
||||||
// This is the multi core with atomics implementation.
|
// This is the multi core with atomics implementation.
|
||||||
//
|
//
|
||||||
// The implementation handles the 32-bit rollover (every 49.7 days) by:
|
// The implementation handles the 32-bit rollover (every 49.7 days) by:
|
||||||
// 1. Using a lock when detecting rollover to ensure atomic update
|
// 1. Using a lock when detecting rollover to ensure atomic update
|
||||||
// 2. Restricting normal updates to forward movement within the same epoch
|
// 2. Restricting normal updates to forward movement within the same epoch
|
||||||
// This prevents race conditions at the rollover boundary without requiring
|
// This prevents race conditions at the rollover boundary without requiring
|
||||||
// 64-bit atomics or locking on every call.
|
// 64-bit atomics or locking on every call.
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
uint16_t major = this->millis_major_.load(std::memory_order_acquire);
|
uint16_t major = this->millis_major_.load(std::memory_order_acquire);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -650,9 +644,8 @@ for (;;) {
|
|||||||
uint16_t major_end = this->millis_major_.load(std::memory_order_relaxed);
|
uint16_t major_end = this->millis_major_.load(std::memory_order_relaxed);
|
||||||
if (major_end == major)
|
if (major_end == major)
|
||||||
return now + (static_cast<uint64_t>(major) << 32);
|
return now + (static_cast<uint64_t>(major) << 32);
|
||||||
}
|
}
|
||||||
#endif // ESPHOME_MULTI_CORE_ATOMICS
|
#endif // ESPHOME_MULTI_CORE_ATOMICS
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a,
|
bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user