mirror of
https://github.com/esphome/esphome.git
synced 2025-08-05 09:57:47 +00:00
Merge branch 'heap_scheduler_stress_component' into integration
This commit is contained in:
commit
3dcba675b4
@ -68,7 +68,7 @@ void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type
|
||||
// Still need to cancel existing timer if name is not empty
|
||||
if (name_cstr != nullptr && name_cstr[0] != '\0') {
|
||||
LockGuard guard{this->lock_};
|
||||
this->cancel_item_locked_(component, name_cstr, type, delay == 0 && type == SchedulerItem::TIMEOUT);
|
||||
this->cancel_item_locked_(component, name_cstr, type, false);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -242,6 +242,10 @@ void HOT Scheduler::call() {
|
||||
// - No deferred items exist in to_add_, so processing order doesn't affect correctness
|
||||
// ESP8266 and RP2040 don't use this queue - they fall back to the heap-based approach
|
||||
// (ESP8266: single-core, RP2040: empty mutex implementation).
|
||||
//
|
||||
// Note: Items cancelled via cancel_item_locked_() are marked with remove=true but still
|
||||
// processed here. They are removed from the queue normally via pop_front() but skipped
|
||||
// during execution by should_skip_item_(). This is intentional - no memory leak occurs.
|
||||
while (!this->defer_queue_.empty()) {
|
||||
// The outer check is done without a lock for performance. If the queue
|
||||
// appears non-empty, we lock and process an item. We don't need to check
|
||||
@ -273,10 +277,12 @@ void HOT Scheduler::call() {
|
||||
ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%u, %" PRIu32 ")", this->items_.size(), now, this->millis_major_,
|
||||
this->last_millis_);
|
||||
while (!this->empty_()) {
|
||||
this->lock_.lock();
|
||||
auto item = std::move(this->items_[0]);
|
||||
this->pop_raw_();
|
||||
this->lock_.unlock();
|
||||
std::unique_ptr<SchedulerItem> item;
|
||||
{
|
||||
LockGuard guard{this->lock_};
|
||||
item = std::move(this->items_[0]);
|
||||
this->pop_raw_();
|
||||
}
|
||||
|
||||
const char *name = item->get_name();
|
||||
ESP_LOGD(TAG, " %s '%s/%s' interval=%" PRIu32 " next_execution in %" PRIu64 "ms at %" PRIu64,
|
||||
@ -290,6 +296,8 @@ void HOT Scheduler::call() {
|
||||
{
|
||||
LockGuard guard{this->lock_};
|
||||
this->items_ = std::move(old_items);
|
||||
// Rebuild heap after moving items back
|
||||
std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
|
||||
}
|
||||
}
|
||||
#endif // ESPHOME_DEBUG_SCHEDULER
|
||||
@ -314,6 +322,8 @@ void HOT Scheduler::call() {
|
||||
|
||||
// Replace items_ with the filtered list
|
||||
this->items_ = std::move(valid_items);
|
||||
// Rebuild the heap structure since items are no longer in heap order
|
||||
std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
|
||||
this->to_remove_ = 0;
|
||||
}
|
||||
|
||||
@ -441,7 +451,7 @@ bool HOT Scheduler::cancel_item_(Component *component, bool is_static_string, co
|
||||
|
||||
// Helper to cancel items by name - must be called with lock held
|
||||
bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_cstr, SchedulerItem::Type type,
|
||||
bool defer_only) {
|
||||
bool check_defer_only) {
|
||||
size_t total_cancelled = 0;
|
||||
|
||||
// Check all containers for matching items
|
||||
@ -454,7 +464,7 @@ bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_c
|
||||
total_cancelled++;
|
||||
}
|
||||
}
|
||||
if (defer_only) {
|
||||
if (check_defer_only) {
|
||||
return total_cancelled > 0;
|
||||
}
|
||||
}
|
||||
|
@ -99,13 +99,7 @@ class Scheduler {
|
||||
SchedulerItem(const SchedulerItem &) = delete;
|
||||
SchedulerItem &operator=(const SchedulerItem &) = delete;
|
||||
|
||||
// Delete move operations to prevent accidental moves of SchedulerItem objects.
|
||||
// This is intentional because:
|
||||
// 1. SchedulerItem contains a dynamically allocated name that requires careful ownership management
|
||||
// 2. The scheduler only moves unique_ptr<SchedulerItem>, never SchedulerItem objects directly
|
||||
// 3. Moving unique_ptr only transfers pointer ownership without moving the pointed-to object
|
||||
// 4. Deleting these operations makes it explicit that SchedulerItem objects should not be moved
|
||||
// 5. This prevents potential double-free bugs if the code is refactored to move SchedulerItem objects
|
||||
// Delete move operations: SchedulerItem objects are only managed via unique_ptr, never moved directly
|
||||
SchedulerItem(SchedulerItem &&) = delete;
|
||||
SchedulerItem &operator=(SchedulerItem &&) = delete;
|
||||
|
||||
@ -149,7 +143,7 @@ class Scheduler {
|
||||
|
||||
private:
|
||||
// Helper to cancel items by name - must be called with lock held
|
||||
bool cancel_item_locked_(Component *component, const char *name, SchedulerItem::Type type, bool defer_only);
|
||||
bool cancel_item_locked_(Component *component, const char *name, SchedulerItem::Type type, bool check_defer_only);
|
||||
|
||||
// Helper to extract name as const char* from either static string or std::string
|
||||
inline const char *get_name_cstr_(bool is_static_string, const void *name_ptr) {
|
||||
|
@ -36,26 +36,35 @@ void SchedulerBulkCleanupComponent::trigger_bulk_cleanup() {
|
||||
// At this point we have 25 items marked for removal
|
||||
// The next scheduler.call() should trigger the bulk cleanup path
|
||||
|
||||
// Schedule an interval that will execute multiple times to ensure cleanup happens
|
||||
// The bulk cleanup should happen on the next scheduler.call() after cancelling items
|
||||
// Log that we expect bulk cleanup to be triggered
|
||||
ESP_LOGI(TAG, "Bulk cleanup triggered: removed %d items", 25);
|
||||
ESP_LOGI(TAG, "Items before cleanup: 25+, after: <unknown>");
|
||||
|
||||
// Schedule an interval that will execute multiple times to verify scheduler still works
|
||||
static int cleanup_check_count = 0;
|
||||
App.scheduler.set_interval(this, "cleanup_checker", 25, [this]() {
|
||||
cleanup_check_count++;
|
||||
ESP_LOGI(TAG, "Cleanup check %d - scheduler still running", cleanup_check_count);
|
||||
|
||||
if (cleanup_check_count >= 5) {
|
||||
// Cancel the interval and complete the test
|
||||
// Cancel the interval
|
||||
App.scheduler.cancel_interval(this, "cleanup_checker");
|
||||
ESP_LOGI(TAG, "Bulk cleanup triggered: removed %d items", 25);
|
||||
ESP_LOGI(TAG, "Items before cleanup: 25+, after: <unknown>");
|
||||
ESP_LOGI(TAG, "Bulk cleanup test complete");
|
||||
ESP_LOGI(TAG, "Scheduler verified working after bulk cleanup");
|
||||
}
|
||||
});
|
||||
|
||||
// Also schedule some normal timeouts to ensure scheduler keeps working after cleanup
|
||||
static int post_cleanup_count = 0;
|
||||
for (int i = 0; i < 5; i++) {
|
||||
std::string name = "post_cleanup_" + std::to_string(i);
|
||||
App.scheduler.set_timeout(this, name, 50 + i * 25,
|
||||
[i]() { ESP_LOGI(TAG, "Post-cleanup timeout %d executed correctly", i); });
|
||||
App.scheduler.set_timeout(this, name, 50 + i * 25, [i]() {
|
||||
ESP_LOGI(TAG, "Post-cleanup timeout %d executed correctly", i);
|
||||
post_cleanup_count++;
|
||||
if (post_cleanup_count >= 5) {
|
||||
ESP_LOGI(TAG, "All post-cleanup timeouts completed - test finished");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -64,14 +64,13 @@ async def test_scheduler_bulk_cleanup(
|
||||
match = re.search(r"Post-cleanup timeout (\d+) executed correctly", line)
|
||||
if match:
|
||||
post_cleanup_executed += 1
|
||||
# All 5 post-cleanup timeouts have executed
|
||||
if post_cleanup_executed >= 5 and not test_complete_future.done():
|
||||
test_complete_future.set_result(None)
|
||||
|
||||
# Check for bulk cleanup completion (but don't end test yet)
|
||||
if "Bulk cleanup test complete" in line:
|
||||
# This just means the interval finished, not that all timeouts executed
|
||||
pass
|
||||
# Check for final test completion
|
||||
if (
|
||||
"All post-cleanup timeouts completed - test finished" in line
|
||||
and not test_complete_future.done()
|
||||
):
|
||||
test_complete_future.set_result(None)
|
||||
|
||||
async with (
|
||||
run_compiled(yaml_config, line_callback=on_log_line),
|
||||
|
Loading…
x
Reference in New Issue
Block a user