Merge branch 'bugfix/make_schedule_rollover_atomic' into integration

This commit is contained in:
J. Nick Koston 2025-07-19 10:53:31 -10:00
commit 2ed70c3c60
No known key found for this signature in database
5 changed files with 210 additions and 103 deletions

View File

@ -229,6 +229,21 @@
#define USE_SOCKET_SELECT_SUPPORT #define USE_SOCKET_SELECT_SUPPORT
#endif #endif
// Helper macro for single core platforms that lack atomic scheduler support
#if defined(USE_ESP8266) || defined(USE_RP2040)
#define ESPHOME_SINGLE_CORE
#endif
// Helper macro for multi core platforms that lack atomic scheduler support
#if !defined(ESPHOME_SINGLE_CORE) && defined(USE_LIBRETINY)
#define ESPHOME_MULTI_CORE_NO_ATOMICS
#endif
// Helper macro for multi core platforms with atomic scheduler support
#if !defined(ESPHOME_SINGLE_CORE) && !defined(USE_LIBRETINY)
#define ESPHOME_MULTI_CORE_ATOMICS
#endif
// Disabled feature flags // Disabled feature flags
// #define USE_BSEC // Requires a library with proprietary license // #define USE_BSEC // Requires a library with proprietary license
// #define USE_BSEC2 // Requires a library with proprietary license // #define USE_BSEC2 // Requires a library with proprietary license

View File

@ -54,7 +54,7 @@ static void validate_static_string(const char *name) {
ESP_LOGW(TAG, "WARNING: Scheduler name '%s' at %p might be on heap (static ref at %p)", name, name, static_str); ESP_LOGW(TAG, "WARNING: Scheduler name '%s' at %p might be on heap (static ref at %p)", name, name, static_str);
} }
} }
#endif #endif /* ESPHOME_DEBUG_SCHEDULER */
// A note on locking: the `lock_` lock protects the `items_` and `to_add_` containers. It must be taken when writing to // A note on locking: the `lock_` lock protects the `items_` and `to_add_` containers. It must be taken when writing to
// them (i.e. when adding/removing items, but not when changing items). As items are only deleted from the loop task, // them (i.e. when adding/removing items, but not when changing items). As items are only deleted from the loop task,
@ -82,9 +82,9 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type
item->callback = std::move(func); item->callback = std::move(func);
item->remove = false; item->remove = false;
#if !defined(USE_ESP8266) && !defined(USE_RP2040) #ifndef ESPHOME_SINGLE_CORE
// Special handling for defer() (delay = 0, type = TIMEOUT) // Special handling for defer() (delay = 0, type = TIMEOUT)
// ESP8266 and RP2040 are excluded because they don't need thread-safe defer handling // Single-core platforms don't need thread-safe defer handling
if (delay == 0 && type == SchedulerItem::TIMEOUT) { if (delay == 0 && type == SchedulerItem::TIMEOUT) {
// Put in defer queue for guaranteed FIFO execution // Put in defer queue for guaranteed FIFO execution
LockGuard guard{this->lock_}; LockGuard guard{this->lock_};
@ -92,7 +92,7 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type
this->defer_queue_.push_back(std::move(item)); this->defer_queue_.push_back(std::move(item));
return; return;
} }
#endif #endif /* not ESPHOME_SINGLE_CORE */
// Get fresh timestamp for new timer/interval - ensures accurate scheduling // Get fresh timestamp for new timer/interval - ensures accurate scheduling
const auto now = this->millis_64_(millis()); // Fresh millis() call const auto now = this->millis_64_(millis()); // Fresh millis() call
@ -123,7 +123,7 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type
ESP_LOGD(TAG, "set_%s(name='%s/%s', %s=%" PRIu32 ", offset=%" PRIu32 ")", type_str, item->get_source(), ESP_LOGD(TAG, "set_%s(name='%s/%s', %s=%" PRIu32 ", offset=%" PRIu32 ")", type_str, item->get_source(),
name_cstr ? name_cstr : "(null)", type_str, delay, static_cast<uint32_t>(item->next_execution_ - now)); name_cstr ? name_cstr : "(null)", type_str, delay, static_cast<uint32_t>(item->next_execution_ - now));
} }
#endif #endif /* ESPHOME_DEBUG_SCHEDULER */
LockGuard guard{this->lock_}; LockGuard guard{this->lock_};
// If name is provided, do atomic cancel-and-add // If name is provided, do atomic cancel-and-add
@ -231,7 +231,7 @@ optional<uint32_t> HOT Scheduler::next_schedule_in(uint32_t now) {
return item->next_execution_ - now_64; return item->next_execution_ - now_64;
} }
void HOT Scheduler::call(uint32_t now) { void HOT Scheduler::call(uint32_t now) {
#if !defined(USE_ESP8266) && !defined(USE_RP2040) #ifndef ESPHOME_SINGLE_CORE
// Process defer queue first to guarantee FIFO execution order for deferred items. // Process defer queue first to guarantee FIFO execution order for deferred items.
// Previously, defer() used the heap which gave undefined order for equal timestamps, // Previously, defer() used the heap which gave undefined order for equal timestamps,
// causing race conditions on multi-core systems (ESP32, BK7200). // causing race conditions on multi-core systems (ESP32, BK7200).
@ -239,8 +239,7 @@ void HOT Scheduler::call(uint32_t now) {
// - Deferred items (delay=0) go directly to defer_queue_ in set_timer_common_ // - Deferred items (delay=0) go directly to defer_queue_ in set_timer_common_
// - Items execute in exact order they were deferred (FIFO guarantee) // - Items execute in exact order they were deferred (FIFO guarantee)
// - No deferred items exist in to_add_, so processing order doesn't affect correctness // - No deferred items exist in to_add_, so processing order doesn't affect correctness
// ESP8266 and RP2040 don't use this queue - they fall back to the heap-based approach // Single-core platforms don't use this queue and fall back to the heap-based approach.
// (ESP8266: single-core, RP2040: empty mutex implementation).
// //
// Note: Items cancelled via cancel_item_locked_() are marked with remove=true but still // Note: Items cancelled via cancel_item_locked_() are marked with remove=true but still
// processed here. They are removed from the queue normally via pop_front() but skipped // processed here. They are removed from the queue normally via pop_front() but skipped
@ -262,7 +261,7 @@ void HOT Scheduler::call(uint32_t now) {
this->execute_item_(item.get(), now); this->execute_item_(item.get(), now);
} }
} }
#endif #endif /* not ESPHOME_SINGLE_CORE */
// Convert the fresh timestamp from main loop to 64-bit for scheduler operations // Convert the fresh timestamp from main loop to 64-bit for scheduler operations
const auto now_64 = this->millis_64_(now); // 'now' from parameter - fresh from Application::loop() const auto now_64 = this->millis_64_(now); // 'now' from parameter - fresh from Application::loop()
@ -274,13 +273,15 @@ void HOT Scheduler::call(uint32_t now) {
if (now_64 - last_print > 2000) { if (now_64 - last_print > 2000) {
last_print = now_64; last_print = now_64;
std::vector<std::unique_ptr<SchedulerItem>> old_items; std::vector<std::unique_ptr<SchedulerItem>> old_items;
#if !defined(USE_ESP8266) && !defined(USE_RP2040) && !defined(USE_LIBRETINY) #ifdef ESPHOME_MULTI_CORE_ATOMICS
ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%u, %" PRIu32 ")", this->items_.size(), now_64, const auto last_dbg = this->last_millis_.load(std::memory_order_relaxed);
this->millis_major_, this->last_millis_.load(std::memory_order_relaxed)); const auto major_dbg = this->millis_major_.load(std::memory_order_relaxed);
#else ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64,
ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%u, %" PRIu32 ")", this->items_.size(), now_64, major_dbg, last_dbg);
#else /* not ESPHOME_MULTI_CORE_ATOMICS */
ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64,
this->millis_major_, this->last_millis_); this->millis_major_, this->last_millis_);
#endif #endif /* else ESPHOME_MULTI_CORE_ATOMICS */
while (!this->empty_()) { while (!this->empty_()) {
std::unique_ptr<SchedulerItem> item; std::unique_ptr<SchedulerItem> item;
{ {
@ -305,7 +306,7 @@ void HOT Scheduler::call(uint32_t now) {
std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp); std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
} }
} }
#endif // ESPHOME_DEBUG_SCHEDULER #endif /* ESPHOME_DEBUG_SCHEDULER */
// If we have too many items to remove // If we have too many items to remove
if (this->to_remove_ > MAX_LOGICALLY_DELETED_ITEMS) { if (this->to_remove_ > MAX_LOGICALLY_DELETED_ITEMS) {
@ -352,7 +353,7 @@ void HOT Scheduler::call(uint32_t now) {
ESP_LOGV(TAG, "Running %s '%s/%s' with interval=%" PRIu32 " next_execution=%" PRIu64 " (now=%" PRIu64 ")", ESP_LOGV(TAG, "Running %s '%s/%s' with interval=%" PRIu32 " next_execution=%" PRIu64 " (now=%" PRIu64 ")",
item->get_type_str(), item->get_source(), item_name ? item_name : "(null)", item->interval, item->get_type_str(), item->get_source(), item_name ? item_name : "(null)", item->interval,
item->next_execution_, now_64); item->next_execution_, now_64);
#endif #endif /* ESPHOME_DEBUG_SCHEDULER */
// Warning: During callback(), a lot of stuff can happen, including: // Warning: During callback(), a lot of stuff can happen, including:
// - timeouts/intervals get added, potentially invalidating vector pointers // - timeouts/intervals get added, potentially invalidating vector pointers
@ -460,7 +461,7 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c
size_t total_cancelled = 0; size_t total_cancelled = 0;
// Check all containers for matching items // Check all containers for matching items
#if !defined(USE_ESP8266) && !defined(USE_RP2040) #ifndef ESPHOME_SINGLE_CORE
// Only check defer queue for timeouts (intervals never go there) // Only check defer queue for timeouts (intervals never go there)
if (type == SchedulerItem::TIMEOUT) { if (type == SchedulerItem::TIMEOUT) {
for (auto &item : this->defer_queue_) { for (auto &item : this->defer_queue_) {
@ -470,7 +471,7 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c
} }
} }
} }
#endif #endif /* not ESPHOME_SINGLE_CORE */
// Cancel items in the main heap // Cancel items in the main heap
for (auto &item : this->items_) { for (auto &item : this->items_) {
@ -495,24 +496,54 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c
uint64_t Scheduler::millis_64_(uint32_t now) { uint64_t Scheduler::millis_64_(uint32_t now) {
// THREAD SAFETY NOTE: // THREAD SAFETY NOTE:
// This function can be called from multiple threads simultaneously on ESP32/LibreTiny. // This function has three implementations, based on the precompiler flags
// On single-threaded platforms (ESP8266, RP2040), atomics are not needed. // - ESPHOME_SINGLE_CORE - Runs on single-core platforms (ESP8266, RP2040, etc.)
// - ESPHOME_MULTI_CORE_NO_ATOMICS - Runs on multi-core platforms without atomics (LibreTiny)
// - ESPHOME_MULTI_CORE_ATOMICS - Runs on multi-core platforms with atomics (ESP32, HOST, etc.)
//
// Make sure all changes are synchronized if you edit this function.
// //
// IMPORTANT: Always pass fresh millis() values to this function. The implementation // IMPORTANT: Always pass fresh millis() values to this function. The implementation
// handles out-of-order timestamps between threads, but minimizing time differences // handles out-of-order timestamps between threads, but minimizing time differences
// helps maintain accuracy. // helps maintain accuracy.
// //
// The implementation handles the 32-bit rollover (every 49.7 days) by:
// 1. Using a lock when detecting rollover to ensure atomic update
// 2. Restricting normal updates to forward movement within the same epoch
// This prevents race conditions at the rollover boundary without requiring
// 64-bit atomics or locking on every call.
#ifdef USE_LIBRETINY #ifdef ESPHOME_SINGLE_CORE
// LibreTiny: Multi-threaded but lacks atomic operation support // This is the single core implementation.
// TODO: If LibreTiny ever adds atomic support, remove this entire block and //
// let it fall through to the atomic-based implementation below // Single-core platforms have no concurrency, so this is a simple implementation
// We need to use a lock when near the rollover boundary to prevent races // that just tracks 32-bit rollover (every 49.7 days) without any locking or atomics.
uint16_t major = this->millis_major_;
uint32_t last = this->last_millis_;
// Check for rollover
if (now < last && (last - now) > HALF_MAX_UINT32) {
this->millis_major_++;
major++;
#ifdef ESPHOME_DEBUG_SCHEDULER
ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
#endif /* ESPHOME_DEBUG_SCHEDULER */
}
// Only update if time moved forward
if (now > last) {
this->last_millis_ = now;
}
// Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
return now + (static_cast<uint64_t>(major) << 32);
#endif // ESPHOME_SINGLE_CORE
#ifdef ESPHOME_MULTI_CORE_NO_ATOMICS
// This is the multi core no atomics implementation.
//
// Without atomics, this implementation uses locks more aggressively:
// 1. Always locks when near the rollover boundary (within 10 seconds)
// 2. Always locks when detecting a large backwards jump
// 3. Updates without lock in normal forward progression (accepting minor races)
// This is less efficient but necessary without atomic operations.
uint16_t major = this->millis_major_;
uint32_t last = this->last_millis_; uint32_t last = this->last_millis_;
// Define a safe window around the rollover point (10 seconds) // Define a safe window around the rollover point (10 seconds)
@ -531,9 +562,10 @@ uint64_t Scheduler::millis_64_(uint32_t now) {
if (now < last && (last - now) > HALF_MAX_UINT32) { if (now < last && (last - now) > HALF_MAX_UINT32) {
// True rollover detected (happens every ~49.7 days) // True rollover detected (happens every ~49.7 days)
this->millis_major_++; this->millis_major_++;
major++;
#ifdef ESPHOME_DEBUG_SCHEDULER #ifdef ESPHOME_DEBUG_SCHEDULER
ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last); ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
#endif #endif /* ESPHOME_DEBUG_SCHEDULER */
} }
// Update last_millis_ while holding lock // Update last_millis_ while holding lock
this->last_millis_ = now; this->last_millis_ = now;
@ -549,58 +581,71 @@ uint64_t Scheduler::millis_64_(uint32_t now) {
// If now <= last and we're not near rollover, don't update // If now <= last and we're not near rollover, don't update
// This minimizes backwards time movement // This minimizes backwards time movement
#elif !defined(USE_ESP8266) && !defined(USE_RP2040)
// Multi-threaded platforms with atomic support (ESP32)
uint32_t last = this->last_millis_.load(std::memory_order_relaxed);
// If we might be near a rollover (large backwards jump), take the lock for the entire operation
// This ensures rollover detection and last_millis_ update are atomic together
if (now < last && (last - now) > HALF_MAX_UINT32) {
// Potential rollover - need lock for atomic rollover detection + update
LockGuard guard{this->lock_};
// Re-read with lock held
last = this->last_millis_.load(std::memory_order_relaxed);
if (now < last && (last - now) > HALF_MAX_UINT32) {
// True rollover detected (happens every ~49.7 days)
this->millis_major_++;
#ifdef ESPHOME_DEBUG_SCHEDULER
ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
#endif
}
// Update last_millis_ while holding lock to prevent races
this->last_millis_.store(now, std::memory_order_relaxed);
} else {
// Normal case: Try lock-free update, but only allow forward movement within same epoch
// This prevents accidentally moving backwards across a rollover boundary
while (now > last && (now - last) < HALF_MAX_UINT32) {
if (this->last_millis_.compare_exchange_weak(last, now, std::memory_order_relaxed)) {
break;
}
// last is automatically updated by compare_exchange_weak if it fails
}
}
#else
// Single-threaded platforms (ESP8266, RP2040): No atomics needed
uint32_t last = this->last_millis_;
// Check for rollover
if (now < last && (last - now) > HALF_MAX_UINT32) {
this->millis_major_++;
#ifdef ESPHOME_DEBUG_SCHEDULER
ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
#endif
}
// Only update if time moved forward
if (now > last) {
this->last_millis_ = now;
}
#endif
// Combine major (high 32 bits) and now (low 32 bits) into 64-bit time // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
return now + (static_cast<uint64_t>(this->millis_major_) << 32); return now + (static_cast<uint64_t>(major) << 32);
#endif // ESPHOME_MULTI_CORE_NO_ATOMICS
#ifdef ESPHOME_MULTI_CORE_ATOMICS
// This is the multi core with atomics implementation.
//
// Uses atomic operations with acquire/release semantics to ensure coherent
// reads of millis_major_ and last_millis_ across cores. Features:
// 1. Epoch-coherency retry loop to handle concurrent updates
// 2. Lock only taken for actual rollover detection and update
// 3. Lock-free CAS updates for normal forward time progression
// 4. Memory ordering ensures cores see consistent time values
for (;;) {
uint16_t major = this->millis_major_.load(std::memory_order_acquire);
/*
* Acquire so that if we later decide **not** to take the lock we still
* observe a `millis_major_` value coherent with the loaded `last_millis_`.
* The acquire load ensures any later read of `millis_major_` sees its
* corresponding increment.
*/
uint32_t last = this->last_millis_.load(std::memory_order_acquire);
// If we might be near a rollover (large backwards jump), take the lock for the entire operation
// This ensures rollover detection and last_millis_ update are atomic together
if (now < last && (last - now) > HALF_MAX_UINT32) {
// Potential rollover - need lock for atomic rollover detection + update
LockGuard guard{this->lock_};
// Re-read with lock held; mutex already provides ordering
last = this->last_millis_.load(std::memory_order_relaxed);
if (now < last && (last - now) > HALF_MAX_UINT32) {
// True rollover detected (happens every ~49.7 days)
this->millis_major_.fetch_add(1, std::memory_order_relaxed);
major++;
#ifdef ESPHOME_DEBUG_SCHEDULER
ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
#endif /* ESPHOME_DEBUG_SCHEDULER */
}
/*
* Update last_millis_ while holding the lock to prevent races
* Publish the new low-word *after* bumping `millis_major_` (done above)
* so readers never see a mismatched pair.
*/
this->last_millis_.store(now, std::memory_order_release);
} else {
// Normal case: Try lock-free update, but only allow forward movement within same epoch
// This prevents accidentally moving backwards across a rollover boundary
while (now > last && (now - last) < HALF_MAX_UINT32) {
if (this->last_millis_.compare_exchange_weak(last, now,
std::memory_order_release, // success
std::memory_order_relaxed)) { // failure
break;
}
// CAS failure means no data was published; relaxed is fine
// last is automatically updated by compare_exchange_weak if it fails
}
}
uint16_t major_end = this->millis_major_.load(std::memory_order_relaxed);
if (major_end == major)
return now + (static_cast<uint64_t>(major) << 32);
}
#endif // ESPHOME_MULTI_CORE_ATOMICS
} }
bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a, bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a,

View File

@ -1,10 +1,11 @@
#pragma once #pragma once
#include "esphome/core/defines.h"
#include <vector> #include <vector>
#include <memory> #include <memory>
#include <cstring> #include <cstring>
#include <deque> #include <deque>
#if !defined(USE_ESP8266) && !defined(USE_RP2040) && !defined(USE_LIBRETINY) #ifdef ESPHOME_MULTI_CORE_ATOMICS
#include <atomic> #include <atomic>
#endif #endif
@ -204,23 +205,40 @@ class Scheduler {
Mutex lock_; Mutex lock_;
std::vector<std::unique_ptr<SchedulerItem>> items_; std::vector<std::unique_ptr<SchedulerItem>> items_;
std::vector<std::unique_ptr<SchedulerItem>> to_add_; std::vector<std::unique_ptr<SchedulerItem>> to_add_;
#if !defined(USE_ESP8266) && !defined(USE_RP2040) #ifndef ESPHOME_SINGLE_CORE
// ESP8266 and RP2040 don't need the defer queue because: // Single-core platforms don't need the defer queue and save 40 bytes of RAM
// ESP8266: Single-core with no preemptive multitasking
// RP2040: Currently has empty mutex implementation in ESPHome
// Both platforms save 40 bytes of RAM by excluding this
std::deque<std::unique_ptr<SchedulerItem>> defer_queue_; // FIFO queue for defer() calls std::deque<std::unique_ptr<SchedulerItem>> defer_queue_; // FIFO queue for defer() calls
#endif #endif /* ESPHOME_SINGLE_CORE */
#if !defined(USE_ESP8266) && !defined(USE_RP2040) && !defined(USE_LIBRETINY) uint32_t to_remove_{0};
// Multi-threaded platforms with atomic support: last_millis_ needs atomic for lock-free updates
#ifdef ESPHOME_MULTI_CORE_ATOMICS
/*
* Multi-threaded platforms with atomic support: last_millis_ needs atomic for lock-free updates
*
* MEMORY-ORDERING NOTE
* --------------------
* `last_millis_` and `millis_major_` form a single 64-bit timestamp split in half.
* Writers publish `last_millis_` with memory_order_release and readers use
* memory_order_acquire. This ensures that once a reader sees the new low word,
* it also observes the corresponding increment of `millis_major_`.
*/
std::atomic<uint32_t> last_millis_{0}; std::atomic<uint32_t> last_millis_{0};
#else #else /* not ESPHOME_MULTI_CORE_ATOMICS */
// Platforms without atomic support or single-threaded platforms // Platforms without atomic support or single-threaded platforms
uint32_t last_millis_{0}; uint32_t last_millis_{0};
#endif #endif /* else ESPHOME_MULTI_CORE_ATOMICS */
// millis_major_ is protected by lock when incrementing
/*
* Upper 16 bits of the 64-bit millis counter. Incremented only while holding
* `lock_`; read concurrently. Atomic (relaxed) avoids a formal data race.
* Ordering relative to `last_millis_` is provided by its release store and the
* corresponding acquire loads.
*/
#ifdef ESPHOME_MULTI_CORE_ATOMICS
std::atomic<uint16_t> millis_major_{0};
#else /* not ESPHOME_MULTI_CORE_ATOMICS */
uint16_t millis_major_{0}; uint16_t millis_major_{0};
uint32_t to_remove_{0}; #endif /* else ESPHOME_MULTI_CORE_ATOMICS */
}; };
} // namespace esphome } // namespace esphome

View File

@ -137,6 +137,10 @@ def should_run_clang_tidy(branch: str | None = None) -> bool:
- This ensures all C++ code is checked, including tests, examples, etc. - This ensures all C++ code is checked, including tests, examples, etc.
- Examples: esphome/core/component.cpp, tests/custom/my_component.h - Examples: esphome/core/component.cpp, tests/custom/my_component.h
3. The .clang-tidy.hash file itself changed
- This indicates the configuration has been updated and clang-tidy should run
- Ensures that PRs updating the clang-tidy configuration are properly validated
If the hash check fails for any reason, clang-tidy runs as a safety measure to ensure If the hash check fails for any reason, clang-tidy runs as a safety measure to ensure
code quality is maintained. code quality is maintained.
@ -160,6 +164,12 @@ def should_run_clang_tidy(branch: str | None = None) -> bool:
# If hash check fails, run clang-tidy to be safe # If hash check fails, run clang-tidy to be safe
return True return True
# Check if .clang-tidy.hash file itself was changed
# This handles the case where the hash was properly updated in the PR
files = changed_files(branch)
if ".clang-tidy.hash" in files:
return True
return _any_changed_file_endswith(branch, CPP_FILE_EXTENSIONS) return _any_changed_file_endswith(branch, CPP_FILE_EXTENSIONS)

View File

@ -6,7 +6,7 @@ import json
import os import os
import subprocess import subprocess
import sys import sys
from unittest.mock import Mock, patch from unittest.mock import Mock, call, patch
import pytest import pytest
@ -262,6 +262,8 @@ def test_should_run_integration_tests_component_dependency() -> None:
(0, [], True), # Hash changed - need full scan (0, [], True), # Hash changed - need full scan
(1, ["esphome/core.cpp"], True), # C++ file changed (1, ["esphome/core.cpp"], True), # C++ file changed
(1, ["README.md"], False), # No C++ files changed (1, ["README.md"], False), # No C++ files changed
(1, [".clang-tidy.hash"], True), # Hash file itself changed
(1, ["platformio.ini", ".clang-tidy.hash"], True), # Config + hash changed
], ],
) )
def test_should_run_clang_tidy( def test_should_run_clang_tidy(
@ -277,11 +279,26 @@ def test_should_run_clang_tidy(
result = determine_jobs.should_run_clang_tidy() result = determine_jobs.should_run_clang_tidy()
assert result == expected_result assert result == expected_result
# Test with hash check failing (exception)
if check_returncode != 0: def test_should_run_clang_tidy_hash_check_exception() -> None:
with patch("subprocess.run", side_effect=Exception("Failed")): """Test should_run_clang_tidy when hash check fails with exception."""
result = determine_jobs.should_run_clang_tidy() # When hash check fails, clang-tidy should run as a safety measure
assert result is True # Fail safe - run clang-tidy with (
patch.object(determine_jobs, "changed_files", return_value=["README.md"]),
patch("subprocess.run", side_effect=Exception("Hash check failed")),
):
result = determine_jobs.should_run_clang_tidy()
assert result is True # Fail safe - run clang-tidy
# Even with C++ files, exception should trigger clang-tidy
with (
patch.object(
determine_jobs, "changed_files", return_value=["esphome/core.cpp"]
),
patch("subprocess.run", side_effect=Exception("Hash check failed")),
):
result = determine_jobs.should_run_clang_tidy()
assert result is True
def test_should_run_clang_tidy_with_branch() -> None: def test_should_run_clang_tidy_with_branch() -> None:
@ -291,7 +308,9 @@ def test_should_run_clang_tidy_with_branch() -> None:
with patch("subprocess.run") as mock_run: with patch("subprocess.run") as mock_run:
mock_run.return_value = Mock(returncode=1) # Hash unchanged mock_run.return_value = Mock(returncode=1) # Hash unchanged
determine_jobs.should_run_clang_tidy("release") determine_jobs.should_run_clang_tidy("release")
mock_changed.assert_called_once_with("release") # Changed files is called twice now - once for hash check, once for .clang-tidy.hash check
assert mock_changed.call_count == 2
mock_changed.assert_has_calls([call("release"), call("release")])
@pytest.mark.parametrize( @pytest.mark.parametrize(