core/scheduler: split millis_64_ into different platform functions

This commit is contained in:
RubenKelevra 2025-07-19 21:44:35 +02:00
parent 211739bba0
commit fde80bc530
3 changed files with 178 additions and 114 deletions

View File

@ -229,14 +229,19 @@
#define USE_SOCKET_SELECT_SUPPORT #define USE_SOCKET_SELECT_SUPPORT
#endif #endif
// Helper macro for platforms that lack atomic scheduler support // Helper macro for single core platforms that lack atomic scheduler support
#if defined(USE_ESP8266) || defined(USE_RP2040) #if defined(USE_ESP8266) || defined(USE_RP2040)
#define ESPHOME_SINGLE_CORE #define ESPHOME_SINGLE_CORE
#endif #endif
// Helper macro for platforms with atomic scheduler support // Helper macro for multi core platforms that lack atomic scheduler support
#if !defined(ESPHOME_SINGLE_CORE) && defined(USE_LIBRETINY)
#define ESPHOME_MULTI_CORE_NO_ATOMICS
#endif
// Helper macro for multi core platforms with atomic scheduler support
#if !defined(ESPHOME_SINGLE_CORE) && !defined(USE_LIBRETINY) #if !defined(ESPHOME_SINGLE_CORE) && !defined(USE_LIBRETINY)
#define ESPHOME_ATOMIC_SCHEDULER #define ESPHOME_MULTI_CORE_ATOMICS
#endif #endif
// Disabled feature flags // Disabled feature flags

View File

@ -273,15 +273,15 @@ void HOT Scheduler::call(uint32_t now) {
if (now_64 - last_print > 2000) { if (now_64 - last_print > 2000) {
last_print = now_64; last_print = now_64;
std::vector<std::unique_ptr<SchedulerItem>> old_items; std::vector<std::unique_ptr<SchedulerItem>> old_items;
#ifdef ESPHOME_ATOMIC_SCHEDULER #ifdef ESPHOME_MULTI_CORE_ATOMICS
const auto last_dbg = this->last_millis_.load(std::memory_order_relaxed); const auto last_dbg = this->last_millis_.load(std::memory_order_relaxed);
const auto major_dbg = this->millis_major_.load(std::memory_order_relaxed); const auto major_dbg = this->millis_major_.load(std::memory_order_relaxed);
ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64, ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64,
major_dbg, last_dbg); major_dbg, last_dbg);
#else /* not ESPHOME_ATOMIC_SCHEDULER */ #else /* not ESPHOME_MULTI_CORE_ATOMICS */
ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64, ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64,
this->millis_major_, this->last_millis_); this->millis_major_, this->last_millis_);
#endif /* else ESPHOME_ATOMIC_SCHEDULER */ #endif /* else ESPHOME_MULTI_CORE_ATOMICS */
while (!this->empty_()) { while (!this->empty_()) {
std::unique_ptr<SchedulerItem> item; std::unique_ptr<SchedulerItem> item;
{ {
@ -494,10 +494,66 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c
return total_cancelled > 0; return total_cancelled > 0;
} }
#ifdef ESPHOME_SINGLE_CORE
uint64_t Scheduler::millis_64_(uint32_t now) { uint64_t Scheduler::millis_64_(uint32_t now) {
// THREAD SAFETY NOTE: // THREAD SAFETY NOTE:
// This function can be called from multiple threads simultaneously on ESP32/LibreTiny. // This function has three implemenations, based on the precompiler flags
// On single-core platforms, atomics are not needed. // - ESPHOME_SINGLE_CORE
// - ESPHOME_MULTI_CORE_NO_ATOMICS
// - ESPHOME_MULTI_CORE_ATOMICS
//
// Make sure all changes are synchronous if you edit this function.
//
// This is the single core implementation.
//
// IMPORTANT: Always pass fresh millis() values to this function. The implementation
// handles out-of-order timestamps between threads, but minimizing time differences
// helps maintain accuracy.
//
// The implementation handles the 32-bit rollover (every 49.7 days) by:
// 1. Using a lock when detecting rollover to ensure atomic update
// 2. Restricting normal updates to forward movement within the same epoch
// This prevents race conditions at the rollover boundary without requiring
// 64-bit atomics or locking on every call.
uint16_t major = this->millis_major_;
// Single-core platforms: No atomics needed
uint32_t last = this->last_millis_;
// Check for rollover
if (now < last && (last - now) > HALF_MAX_UINT32) {
this->millis_major_++;
major++;
#ifdef ESPHOME_DEBUG_SCHEDULER
ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
#endif /* ESPHOME_DEBUG_SCHEDULER */
}
// Only update if time moved forward
if (now > last) {
this->last_millis_ = now;
}
// Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
return now + (static_cast<uint64_t>(major) << 32);
}
#endif
#ifdef ESPHOME_MULTI_CORE_NO_ATOMICS
uint64_t Scheduler::millis_64_(uint32_t now) {
// THREAD SAFETY NOTE:
// This function has three implemenations, based on the precompiler flags
// - ESPHOME_SINGLE_CORE
// - ESPHOME_MULTI_CORE_NO_ATOMICS
// - ESPHOME_MULTI_CORE_ATOMICS
//
// Make sure all changes are synchronous if you edit this function.
//
// This is the multi core no atomics implementation.
// //
// IMPORTANT: Always pass fresh millis() values to this function. The implementation // IMPORTANT: Always pass fresh millis() values to this function. The implementation
// handles out-of-order timestamps between threads, but minimizing time differences // handles out-of-order timestamps between threads, but minimizing time differences
@ -509,14 +565,8 @@ uint64_t Scheduler::millis_64_(uint32_t now) {
// This prevents race conditions at the rollover boundary without requiring // This prevents race conditions at the rollover boundary without requiring
// 64-bit atomics or locking on every call. // 64-bit atomics or locking on every call.
#ifdef ESPHOME_ATOMIC_SCHEDULER
for (;;) {
uint16_t major = this->millis_major_.load(std::memory_order_acquire);
#else /* not ESPHOME_ATOMIC_SCHEDULER */
uint16_t major = this->millis_major_; uint16_t major = this->millis_major_;
#endif /* else ESPHOME_ATOMIC_SCHEDULER */
#ifdef USE_LIBRETINY
// LibreTiny: Multi-threaded but lacks atomic operation support // LibreTiny: Multi-threaded but lacks atomic operation support
// TODO: If LibreTiny ever adds atomic support, remove this entire block and // TODO: If LibreTiny ever adds atomic support, remove this entire block and
// let it fall through to the atomic-based implementation below // let it fall through to the atomic-based implementation below
@ -558,7 +608,38 @@ uint64_t Scheduler::millis_64_(uint32_t now) {
// If now <= last and we're not near rollover, don't update // If now <= last and we're not near rollover, don't update
// This minimizes backwards time movement // This minimizes backwards time movement
#elif defined(ESPHOME_ATOMIC_SCHEDULER) // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
return now + (static_cast<uint64_t>(major) << 32);
}
#endif
#ifdef ESPHOME_MULTI_CORE_ATOMICS
uint64_t Scheduler::millis_64_(uint32_t now) {
// THREAD SAFETY NOTE:
// This function has three implemenations, based on the precompiler flags
// - ESPHOME_SINGLE_CORE
// - ESPHOME_MULTI_CORE_NO_ATOMICS
// - ESPHOME_MULTI_CORE_ATOMICS
//
// Make sure all changes are synchronous if you edit this function.
//
// This is the multi core with atomics implementation.
//
// IMPORTANT: Always pass fresh millis() values to this function. The implementation
// handles out-of-order timestamps between threads, but minimizing time differences
// helps maintain accuracy.
//
// The implementation handles the 32-bit rollover (every 49.7 days) by:
// 1. Using a lock when detecting rollover to ensure atomic update
// 2. Restricting normal updates to forward movement within the same epoch
// This prevents race conditions at the rollover boundary without requiring
// 64-bit atomics or locking on every call.
for (;;) {
uint16_t major = this->millis_major_.load(std::memory_order_acquire);
/* /*
* Multi-threaded platforms with atomic support (ESP32) * Multi-threaded platforms with atomic support (ESP32)
* Acquire so that if we later decide **not** to take the lock we still * Acquire so that if we later decide **not** to take the lock we still
@ -603,36 +684,14 @@ uint64_t Scheduler::millis_64_(uint32_t now) {
// last is automatically updated by compare_exchange_weak if it fails // last is automatically updated by compare_exchange_weak if it fails
} }
} }
#else /* not USE_LIBRETINY; not ESPHOME_ATOMIC_SCHEDULER */
// Single-core platforms: No atomics needed
uint32_t last = this->last_millis_;
// Check for rollover
if (now < last && (last - now) > HALF_MAX_UINT32) {
this->millis_major_++;
major++;
#ifdef ESPHOME_DEBUG_SCHEDULER
ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
#endif /* ESPHOME_DEBUG_SCHEDULER */
}
// Only update if time moved forward
if (now > last) {
this->last_millis_ = now;
}
#endif /* else (USE_LIBRETINY / ESPHOME_ATOMIC_SCHEDULER) */
#ifdef ESPHOME_ATOMIC_SCHEDULER
uint16_t major_end = this->millis_major_.load(std::memory_order_relaxed); uint16_t major_end = this->millis_major_.load(std::memory_order_relaxed);
if (major_end == major) if (major_end == major)
return now + (static_cast<uint64_t>(major) << 32); return now + (static_cast<uint64_t>(major) << 32);
} }
#else /* not ESPHOME_ATOMIC_SCHEDULER */
// Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
return now + (static_cast<uint64_t>(major) << 32);
#endif /* ESPHOME_ATOMIC_SCHEDULER */
} }
#endif
bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a, bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a,
const std::unique_ptr<SchedulerItem> &b) { const std::unique_ptr<SchedulerItem> &b) {
return a->next_execution_ > b->next_execution_; return a->next_execution_ > b->next_execution_;

View File

@ -5,7 +5,7 @@
#include <memory> #include <memory>
#include <cstring> #include <cstring>
#include <deque> #include <deque>
#ifdef ESPHOME_ATOMIC_SCHEDULER #ifdef ESPHOME_MULTI_CORE_ATOMICS
#include <atomic> #include <atomic>
#endif #endif
@ -209,7 +209,7 @@ class Scheduler {
// Single-core platforms don't need the defer queue and save 40 bytes of RAM // Single-core platforms don't need the defer queue and save 40 bytes of RAM
std::deque<std::unique_ptr<SchedulerItem>> defer_queue_; // FIFO queue for defer() calls std::deque<std::unique_ptr<SchedulerItem>> defer_queue_; // FIFO queue for defer() calls
#endif /* ESPHOME_SINGLE_CORE */ #endif /* ESPHOME_SINGLE_CORE */
#ifdef ESPHOME_ATOMIC_SCHEDULER #ifdef ESPHOME_MULTI_CORE_ATOMICS
/* /*
* Multi-threaded platforms with atomic support: last_millis_ needs atomic for lock-free updates * Multi-threaded platforms with atomic support: last_millis_ needs atomic for lock-free updates
* *
@ -221,21 +221,21 @@ class Scheduler {
* it also observes the corresponding increment of `millis_major_`. * it also observes the corresponding increment of `millis_major_`.
*/ */
std::atomic<uint32_t> last_millis_{0}; std::atomic<uint32_t> last_millis_{0};
#else /* not ESPHOME_ATOMIC_SCHEDULER */ #else /* not ESPHOME_MULTI_CORE_ATOMICS */
// Platforms without atomic support or single-threaded platforms // Platforms without atomic support or single-threaded platforms
uint32_t last_millis_{0}; uint32_t last_millis_{0};
#endif /* else ESPHOME_ATOMIC_SCHEDULER */ #endif /* else ESPHOME_MULTI_CORE_ATOMICS */
/* /*
* Upper 16 bits of the 64-bit millis counter. Incremented only while holding * Upper 16 bits of the 64-bit millis counter. Incremented only while holding
* `lock_`; read concurrently. Atomic (relaxed) avoids a formal data race. * `lock_`; read concurrently. Atomic (relaxed) avoids a formal data race.
* Ordering relative to `last_millis_` is provided by its release store and the * Ordering relative to `last_millis_` is provided by its release store and the
* corresponding acquire loads. * corresponding acquire loads.
*/ */
#ifdef ESPHOME_ATOMIC_SCHEDULER #ifdef ESPHOME_MULTI_CORE_ATOMICS
std::atomic<uint16_t> millis_major_{0}; std::atomic<uint16_t> millis_major_{0};
#else /* not ESPHOME_ATOMIC_SCHEDULER */ #else /* not ESPHOME_MULTI_CORE_ATOMICS */
uint16_t millis_major_{0}; uint16_t millis_major_{0};
#endif /* else ESPHOME_ATOMIC_SCHEDULER */ #endif /* else ESPHOME_MULTI_CORE_ATOMICS */
uint32_t to_remove_{0}; uint32_t to_remove_{0};
}; };