mirror of
https://github.com/esphome/esphome.git
synced 2025-08-09 11:57:46 +00:00
Merge remote-tracking branch 'upstream/integration' into integration
This commit is contained in:
commit
fe1e25b5c7
@ -177,7 +177,11 @@ async def to_code(config):
|
||||
# and plaintext disabled. Only a factory reset can remove it.
|
||||
cg.add_define("USE_API_PLAINTEXT")
|
||||
cg.add_define("USE_API_NOISE")
|
||||
cg.add_library("esphome/noise-c", "0.1.6")
|
||||
cg.add_library(
|
||||
None,
|
||||
None,
|
||||
"https://github.com/esphome/noise-c.git#libsodium_update",
|
||||
)
|
||||
else:
|
||||
cg.add_define("USE_API_PLAINTEXT")
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
#ifdef USE_ESP32
|
||||
|
||||
#include "ble.h"
|
||||
#include "ble_event_pool.h"
|
||||
|
||||
#include "esphome/core/application.h"
|
||||
#include "esphome/core/helpers.h"
|
||||
|
@ -12,8 +12,8 @@
|
||||
#include "esphome/core/helpers.h"
|
||||
|
||||
#include "ble_event.h"
|
||||
#include "ble_event_pool.h"
|
||||
#include "queue.h"
|
||||
#include "esphome/core/lock_free_queue.h"
|
||||
#include "esphome/core/event_pool.h"
|
||||
|
||||
#ifdef USE_ESP32
|
||||
|
||||
@ -148,8 +148,8 @@ class ESP32BLE : public Component {
|
||||
std::vector<BLEStatusEventHandler *> ble_status_event_handlers_;
|
||||
BLEComponentState state_{BLE_COMPONENT_STATE_OFF};
|
||||
|
||||
LockFreeQueue<BLEEvent, MAX_BLE_QUEUE_SIZE> ble_events_;
|
||||
BLEEventPool<MAX_BLE_QUEUE_SIZE> ble_event_pool_;
|
||||
esphome::LockFreeQueue<BLEEvent, MAX_BLE_QUEUE_SIZE> ble_events_;
|
||||
esphome::EventPool<BLEEvent, MAX_BLE_QUEUE_SIZE> ble_event_pool_;
|
||||
BLEAdvertising *advertising_{};
|
||||
esp_ble_io_cap_t io_cap_{ESP_IO_CAP_NONE};
|
||||
uint32_t advertising_cycle_time_{};
|
||||
|
@ -134,13 +134,13 @@ class BLEEvent {
|
||||
}
|
||||
|
||||
// Destructor to clean up heap allocations
|
||||
~BLEEvent() { this->cleanup_heap_data(); }
|
||||
~BLEEvent() { this->release(); }
|
||||
|
||||
// Default constructor for pre-allocation in pool
|
||||
BLEEvent() : type_(GAP) {}
|
||||
|
||||
// Clean up any heap-allocated data
|
||||
void cleanup_heap_data() {
|
||||
// Invoked on return to EventPool - clean up any heap-allocated data
|
||||
void release() {
|
||||
if (this->type_ == GAP) {
|
||||
return;
|
||||
}
|
||||
@ -161,19 +161,19 @@ class BLEEvent {
|
||||
|
||||
// Load new event data for reuse (replaces previous event data)
|
||||
void load_gap_event(esp_gap_ble_cb_event_t e, esp_ble_gap_cb_param_t *p) {
|
||||
this->cleanup_heap_data();
|
||||
this->release();
|
||||
this->type_ = GAP;
|
||||
this->init_gap_data_(e, p);
|
||||
}
|
||||
|
||||
void load_gattc_event(esp_gattc_cb_event_t e, esp_gatt_if_t i, esp_ble_gattc_cb_param_t *p) {
|
||||
this->cleanup_heap_data();
|
||||
this->release();
|
||||
this->type_ = GATTC;
|
||||
this->init_gattc_data_(e, i, p);
|
||||
}
|
||||
|
||||
void load_gatts_event(esp_gatts_cb_event_t e, esp_gatt_if_t i, esp_ble_gatts_cb_param_t *p) {
|
||||
this->cleanup_heap_data();
|
||||
this->release();
|
||||
this->type_ = GATTS;
|
||||
this->init_gatts_data_(e, i, p);
|
||||
}
|
||||
|
@ -1,72 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#ifdef USE_ESP32
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
#include "ble_event.h"
|
||||
#include "queue.h"
|
||||
#include "esphome/core/helpers.h"
|
||||
|
||||
namespace esphome {
|
||||
namespace esp32_ble {
|
||||
|
||||
// BLE Event Pool - On-demand pool of BLEEvent objects to avoid heap fragmentation
|
||||
// Events are allocated on first use and reused thereafter, growing to peak usage
|
||||
template<uint8_t SIZE> class BLEEventPool {
|
||||
public:
|
||||
BLEEventPool() : total_created_(0) {}
|
||||
|
||||
~BLEEventPool() {
|
||||
// Clean up any remaining events in the free list
|
||||
BLEEvent *event;
|
||||
while ((event = this->free_list_.pop()) != nullptr) {
|
||||
delete event;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate an event from the pool
|
||||
// Returns nullptr if pool is full
|
||||
BLEEvent *allocate() {
|
||||
// Try to get from free list first
|
||||
BLEEvent *event = this->free_list_.pop();
|
||||
if (event != nullptr)
|
||||
return event;
|
||||
|
||||
// Need to create a new event
|
||||
if (this->total_created_ >= SIZE) {
|
||||
// Pool is at capacity
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Use internal RAM for better performance
|
||||
RAMAllocator<BLEEvent> allocator(RAMAllocator<BLEEvent>::ALLOC_INTERNAL);
|
||||
event = allocator.allocate(1);
|
||||
|
||||
if (event == nullptr) {
|
||||
// Memory allocation failed
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Placement new to construct the object
|
||||
new (event) BLEEvent();
|
||||
this->total_created_++;
|
||||
return event;
|
||||
}
|
||||
|
||||
// Return an event to the pool for reuse
|
||||
void release(BLEEvent *event) {
|
||||
if (event != nullptr) {
|
||||
this->free_list_.push(event);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
LockFreeQueue<BLEEvent, SIZE> free_list_; // Free events ready for reuse
|
||||
uint8_t total_created_; // Total events created (high water mark)
|
||||
};
|
||||
|
||||
} // namespace esp32_ble
|
||||
} // namespace esphome
|
||||
|
||||
#endif
|
@ -1,85 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#ifdef USE_ESP32
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
|
||||
/*
|
||||
* BLE events come in from a separate Task (thread) in the ESP32 stack. Rather
|
||||
* than using mutex-based locking, this lock-free queue allows the BLE
|
||||
* task to enqueue events without blocking. The main loop() then processes
|
||||
* these events at a safer time.
|
||||
*
|
||||
* This is a Single-Producer Single-Consumer (SPSC) lock-free ring buffer.
|
||||
* The BLE task is the only producer, and the main loop() is the only consumer.
|
||||
*/
|
||||
|
||||
namespace esphome {
|
||||
namespace esp32_ble {
|
||||
|
||||
template<class T, uint8_t SIZE> class LockFreeQueue {
|
||||
public:
|
||||
LockFreeQueue() : head_(0), tail_(0), dropped_count_(0) {}
|
||||
|
||||
bool push(T *element) {
|
||||
if (element == nullptr)
|
||||
return false;
|
||||
|
||||
uint8_t current_tail = tail_.load(std::memory_order_relaxed);
|
||||
uint8_t next_tail = (current_tail + 1) % SIZE;
|
||||
|
||||
if (next_tail == head_.load(std::memory_order_acquire)) {
|
||||
// Buffer full
|
||||
dropped_count_.fetch_add(1, std::memory_order_relaxed);
|
||||
return false;
|
||||
}
|
||||
|
||||
buffer_[current_tail] = element;
|
||||
tail_.store(next_tail, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
T *pop() {
|
||||
uint8_t current_head = head_.load(std::memory_order_relaxed);
|
||||
|
||||
if (current_head == tail_.load(std::memory_order_acquire)) {
|
||||
return nullptr; // Empty
|
||||
}
|
||||
|
||||
T *element = buffer_[current_head];
|
||||
head_.store((current_head + 1) % SIZE, std::memory_order_release);
|
||||
return element;
|
||||
}
|
||||
|
||||
size_t size() const {
|
||||
uint8_t tail = tail_.load(std::memory_order_acquire);
|
||||
uint8_t head = head_.load(std::memory_order_acquire);
|
||||
return (tail - head + SIZE) % SIZE;
|
||||
}
|
||||
|
||||
uint16_t get_and_reset_dropped_count() { return dropped_count_.exchange(0, std::memory_order_relaxed); }
|
||||
|
||||
void increment_dropped_count() { dropped_count_.fetch_add(1, std::memory_order_relaxed); }
|
||||
|
||||
bool empty() const { return head_.load(std::memory_order_acquire) == tail_.load(std::memory_order_acquire); }
|
||||
|
||||
bool full() const {
|
||||
uint8_t next_tail = (tail_.load(std::memory_order_relaxed) + 1) % SIZE;
|
||||
return next_tail == head_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
protected:
|
||||
T *buffer_[SIZE];
|
||||
// Atomic: written by producer (push/increment), read+reset by consumer (get_and_reset)
|
||||
std::atomic<uint16_t> dropped_count_; // 65535 max - more than enough for drop tracking
|
||||
// Atomic: written by consumer (pop), read by producer (push) to check if full
|
||||
std::atomic<uint8_t> head_;
|
||||
// Atomic: written by producer (push), read by consumer (pop) to check if empty
|
||||
std::atomic<uint8_t> tail_;
|
||||
};
|
||||
|
||||
} // namespace esp32_ble
|
||||
} // namespace esphome
|
||||
|
||||
#endif
|
@ -413,9 +413,7 @@ std::string WebServer::sensor_json(sensor::Sensor *obj, float value, JsonDetail
|
||||
}
|
||||
set_json_icon_state_value(root, obj, "sensor-" + obj->get_object_id(), state, value, start_config);
|
||||
if (start_config == DETAIL_ALL) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
if (!obj->get_unit_of_measurement().empty())
|
||||
root["uom"] = obj->get_unit_of_measurement();
|
||||
}
|
||||
@ -459,9 +457,7 @@ std::string WebServer::text_sensor_json(text_sensor::TextSensor *obj, const std:
|
||||
return json::build_json([this, obj, value, start_config](JsonObject root) {
|
||||
set_json_icon_state_value(root, obj, "text_sensor-" + obj->get_object_id(), value, value, start_config);
|
||||
if (start_config == DETAIL_ALL) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -513,9 +509,7 @@ std::string WebServer::switch_json(switch_::Switch *obj, bool value, JsonDetail
|
||||
set_json_icon_state_value(root, obj, "switch-" + obj->get_object_id(), value ? "ON" : "OFF", value, start_config);
|
||||
if (start_config == DETAIL_ALL) {
|
||||
root["assumed_state"] = obj->assumed_state();
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -555,9 +549,7 @@ std::string WebServer::button_json(button::Button *obj, JsonDetail start_config)
|
||||
return json::build_json([this, obj, start_config](JsonObject root) {
|
||||
set_json_id(root, obj, "button-" + obj->get_object_id(), start_config);
|
||||
if (start_config == DETAIL_ALL) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -599,9 +591,7 @@ std::string WebServer::binary_sensor_json(binary_sensor::BinarySensor *obj, bool
|
||||
set_json_icon_state_value(root, obj, "binary_sensor-" + obj->get_object_id(), value ? "ON" : "OFF", value,
|
||||
start_config);
|
||||
if (start_config == DETAIL_ALL) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -686,9 +676,7 @@ std::string WebServer::fan_json(fan::Fan *obj, JsonDetail start_config) {
|
||||
if (obj->get_traits().supports_oscillation())
|
||||
root["oscillation"] = obj->oscillating;
|
||||
if (start_config == DETAIL_ALL) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -808,9 +796,7 @@ std::string WebServer::light_json(light::LightState *obj, JsonDetail start_confi
|
||||
for (auto const &option : obj->get_effects()) {
|
||||
opt.add(option->get_name());
|
||||
}
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -895,9 +881,7 @@ std::string WebServer::cover_json(cover::Cover *obj, JsonDetail start_config) {
|
||||
if (obj->get_traits().get_supports_tilt())
|
||||
root["tilt"] = obj->tilt;
|
||||
if (start_config == DETAIL_ALL) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -962,9 +946,7 @@ std::string WebServer::number_json(number::Number *obj, float value, JsonDetail
|
||||
root["mode"] = (int) obj->traits.get_mode();
|
||||
if (!obj->traits.get_unit_of_measurement().empty())
|
||||
root["uom"] = obj->traits.get_unit_of_measurement();
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
if (std::isnan(value)) {
|
||||
root["value"] = "\"NaN\"";
|
||||
@ -1037,9 +1019,7 @@ std::string WebServer::date_json(datetime::DateEntity *obj, JsonDetail start_con
|
||||
root["value"] = value;
|
||||
root["state"] = value;
|
||||
if (start_config == DETAIL_ALL) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1101,9 +1081,7 @@ std::string WebServer::time_json(datetime::TimeEntity *obj, JsonDetail start_con
|
||||
root["value"] = value;
|
||||
root["state"] = value;
|
||||
if (start_config == DETAIL_ALL) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1166,9 +1144,7 @@ std::string WebServer::datetime_json(datetime::DateTimeEntity *obj, JsonDetail s
|
||||
root["value"] = value;
|
||||
root["state"] = value;
|
||||
if (start_config == DETAIL_ALL) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1233,9 +1209,7 @@ std::string WebServer::text_json(text::Text *obj, const std::string &value, Json
|
||||
root["value"] = value;
|
||||
if (start_config == DETAIL_ALL) {
|
||||
root["mode"] = (int) obj->traits.get_mode();
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1295,9 +1269,7 @@ std::string WebServer::select_json(select::Select *obj, const std::string &value
|
||||
for (auto &option : obj->traits.get_options()) {
|
||||
opt.add(option);
|
||||
}
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1418,9 +1390,7 @@ std::string WebServer::climate_json(climate::Climate *obj, JsonDetail start_conf
|
||||
for (auto const &custom_preset : traits.get_supported_custom_presets())
|
||||
opt.add(custom_preset);
|
||||
}
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool has_state = false;
|
||||
@ -1517,9 +1487,7 @@ std::string WebServer::lock_json(lock::Lock *obj, lock::LockState value, JsonDet
|
||||
set_json_icon_state_value(root, obj, "lock-" + obj->get_object_id(), lock::lock_state_to_string(value), value,
|
||||
start_config);
|
||||
if (start_config == DETAIL_ALL) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1595,9 +1563,7 @@ std::string WebServer::valve_json(valve::Valve *obj, JsonDetail start_config) {
|
||||
if (obj->get_traits().get_supports_position())
|
||||
root["position"] = obj->position;
|
||||
if (start_config == DETAIL_ALL) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1669,9 +1635,7 @@ std::string WebServer::alarm_control_panel_json(alarm_control_panel::AlarmContro
|
||||
set_json_icon_state_value(root, obj, "alarm-control-panel-" + obj->get_object_id(),
|
||||
PSTR_LOCAL(alarm_control_panel_state_to_string(value)), value, start_config);
|
||||
if (start_config == DETAIL_ALL) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1720,9 +1684,7 @@ std::string WebServer::event_json(event::Event *obj, const std::string &event_ty
|
||||
event_types.add(event_type);
|
||||
}
|
||||
root["device_class"] = obj->get_device_class();
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1790,9 +1752,7 @@ std::string WebServer::update_json(update::UpdateEntity *obj, JsonDetail start_c
|
||||
root["title"] = obj->update_info.title;
|
||||
root["summary"] = obj->update_info.summary;
|
||||
root["release_url"] = obj->update_info.release_url;
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
this->add_sorting_info_(root, obj);
|
||||
#endif
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -2102,6 +2062,17 @@ void WebServer::handleRequest(AsyncWebServerRequest *request) {
|
||||
|
||||
bool WebServer::isRequestHandlerTrivial() const { return false; }
|
||||
|
||||
void WebServer::add_sorting_info_(JsonObject &root, EntityBase *entity) {
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
if (this->sorting_entitys_.find(entity) != this->sorting_entitys_.end()) {
|
||||
root["sorting_weight"] = this->sorting_entitys_[entity].weight;
|
||||
if (this->sorting_groups_.find(this->sorting_entitys_[entity].group_id) != this->sorting_groups_.end()) {
|
||||
root["sorting_group"] = this->sorting_groups_[this->sorting_entitys_[entity].group_id].name;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
void WebServer::add_entity_config(EntityBase *entity, float weight, uint64_t group) {
|
||||
this->sorting_entitys_[entity] = SortingComponents{weight, group};
|
||||
@ -2110,15 +2081,6 @@ void WebServer::add_entity_config(EntityBase *entity, float weight, uint64_t gro
|
||||
void WebServer::add_sorting_group(uint64_t group_id, const std::string &group_name, float weight) {
|
||||
this->sorting_groups_[group_id] = SortingGroup{group_name, weight};
|
||||
}
|
||||
|
||||
void WebServer::add_sorting_info_(JsonObject &root, EntityBase *entity) {
|
||||
if (this->sorting_entitys_.find(entity) != this->sorting_entitys_.end()) {
|
||||
root["sorting_weight"] = this->sorting_entitys_[entity].weight;
|
||||
if (this->sorting_groups_.find(this->sorting_entitys_[entity].group_id) != this->sorting_groups_.end()) {
|
||||
root["sorting_group"] = this->sorting_groups_[this->sorting_entitys_[entity].group_id].name;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void WebServer::schedule_(std::function<void()> &&f) {
|
||||
|
@ -487,9 +487,7 @@ class WebServer : public Controller, public Component, public AsyncWebHandler {
|
||||
bool include_internal_{false};
|
||||
|
||||
protected:
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
void add_sorting_info_(JsonObject &root, EntityBase *entity);
|
||||
#endif
|
||||
void schedule_(std::function<void()> &&f);
|
||||
web_server_base::WebServerBase *base_;
|
||||
#ifdef USE_ARDUINO
|
||||
|
@ -338,6 +338,7 @@ AsyncEventSourceResponse::AsyncEventSourceResponse(const AsyncWebServerRequest *
|
||||
std::string message = ws->get_config_json();
|
||||
this->try_send_nodefer(message.c_str(), "ping", millis(), 30000);
|
||||
|
||||
#ifdef USE_WEBSERVER_SORTING
|
||||
for (auto &group : ws->sorting_groups_) {
|
||||
message = json::build_json([group](JsonObject root) {
|
||||
root["name"] = group.second.name;
|
||||
@ -348,6 +349,7 @@ AsyncEventSourceResponse::AsyncEventSourceResponse(const AsyncWebServerRequest *
|
||||
// since the only thing in the send buffer at this point is the initial ping/config
|
||||
this->try_send_nodefer(message.c_str(), "sorting_group");
|
||||
}
|
||||
#endif
|
||||
|
||||
this->entities_iterator_->begin(ws->include_internal_);
|
||||
|
||||
|
81
esphome/core/event_pool.h
Normal file
81
esphome/core/event_pool.h
Normal file
@ -0,0 +1,81 @@
|
||||
#pragma once
|
||||
|
||||
#if defined(USE_ESP32) || defined(USE_LIBRETINY)
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
#include "esphome/core/helpers.h"
|
||||
#include "esphome/core/lock_free_queue.h"
|
||||
|
||||
namespace esphome {
|
||||
|
||||
// Event Pool - On-demand pool of objects to avoid heap fragmentation
|
||||
// Events are allocated on first use and reused thereafter, growing to peak usage
|
||||
// @tparam T The type of objects managed by the pool (must have a release() method)
|
||||
// @tparam SIZE The maximum number of objects in the pool (1-255, limited by uint8_t)
|
||||
template<class T, uint8_t SIZE> class EventPool {
|
||||
public:
|
||||
EventPool() : total_created_(0) {}
|
||||
|
||||
~EventPool() {
|
||||
// Clean up any remaining events in the free list
|
||||
// IMPORTANT: This destructor assumes no concurrent access. The EventPool must not
|
||||
// be destroyed while any thread might still call allocate() or release().
|
||||
// In practice, this is typically ensured by destroying the pool only during
|
||||
// component shutdown when all producer/consumer threads have been stopped.
|
||||
T *event;
|
||||
RAMAllocator<T> allocator(RAMAllocator<T>::ALLOC_INTERNAL);
|
||||
while ((event = this->free_list_.pop()) != nullptr) {
|
||||
// Call destructor
|
||||
event->~T();
|
||||
// Deallocate using RAMAllocator
|
||||
allocator.deallocate(event, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate an event from the pool
|
||||
// Returns nullptr if pool is full
|
||||
T *allocate() {
|
||||
// Try to get from free list first
|
||||
T *event = this->free_list_.pop();
|
||||
if (event != nullptr)
|
||||
return event;
|
||||
|
||||
// Need to create a new event
|
||||
if (this->total_created_ >= SIZE) {
|
||||
// Pool is at capacity
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Use internal RAM for better performance
|
||||
RAMAllocator<T> allocator(RAMAllocator<T>::ALLOC_INTERNAL);
|
||||
event = allocator.allocate(1);
|
||||
|
||||
if (event == nullptr) {
|
||||
// Memory allocation failed
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Placement new to construct the object
|
||||
new (event) T();
|
||||
this->total_created_++;
|
||||
return event;
|
||||
}
|
||||
|
||||
// Return an event to the pool for reuse
|
||||
void release(T *event) {
|
||||
if (event != nullptr) {
|
||||
// Clean up the event's allocated memory
|
||||
event->release();
|
||||
this->free_list_.push(event);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
LockFreeQueue<T, SIZE> free_list_; // Free events ready for reuse
|
||||
uint8_t total_created_; // Total events created (high water mark, max 255)
|
||||
};
|
||||
|
||||
} // namespace esphome
|
||||
|
||||
#endif // defined(USE_ESP32) || defined(USE_LIBRETINY)
|
132
esphome/core/lock_free_queue.h
Normal file
132
esphome/core/lock_free_queue.h
Normal file
@ -0,0 +1,132 @@
|
||||
#pragma once
|
||||
|
||||
#if defined(USE_ESP32) || defined(USE_LIBRETINY)
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
|
||||
#if defined(USE_ESP32)
|
||||
#include <freertos/FreeRTOS.h>
|
||||
#include <freertos/task.h>
|
||||
#elif defined(USE_LIBRETINY)
|
||||
#include <FreeRTOS.h>
|
||||
#include <task.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Lock-free queue for single-producer single-consumer scenarios.
|
||||
* This allows one thread to push items and another to pop them without
|
||||
* blocking each other.
|
||||
*
|
||||
* This is a Single-Producer Single-Consumer (SPSC) lock-free ring buffer.
|
||||
* Available on platforms with FreeRTOS support (ESP32, LibreTiny).
|
||||
*
|
||||
* Common use cases:
|
||||
* - BLE events: BLE task produces, main loop consumes
|
||||
* - MQTT messages: main task produces, MQTT thread consumes
|
||||
*
|
||||
* @tparam T The type of elements stored in the queue (must be a pointer type)
|
||||
* @tparam SIZE The maximum number of elements (1-255, limited by uint8_t indices)
|
||||
*/
|
||||
|
||||
namespace esphome {
|
||||
|
||||
template<class T, uint8_t SIZE> class LockFreeQueue {
|
||||
public:
|
||||
LockFreeQueue() : head_(0), tail_(0), dropped_count_(0), task_to_notify_(nullptr) {}
|
||||
|
||||
bool push(T *element) {
|
||||
if (element == nullptr)
|
||||
return false;
|
||||
|
||||
uint8_t current_tail = tail_.load(std::memory_order_relaxed);
|
||||
uint8_t next_tail = (current_tail + 1) % SIZE;
|
||||
|
||||
// Read head before incrementing tail
|
||||
uint8_t head_before = head_.load(std::memory_order_acquire);
|
||||
|
||||
if (next_tail == head_before) {
|
||||
// Buffer full
|
||||
dropped_count_.fetch_add(1, std::memory_order_relaxed);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if queue was empty before push
|
||||
bool was_empty = (current_tail == head_before);
|
||||
|
||||
buffer_[current_tail] = element;
|
||||
tail_.store(next_tail, std::memory_order_release);
|
||||
|
||||
// Notify optimization: only notify if we need to
|
||||
if (task_to_notify_ != nullptr) {
|
||||
if (was_empty) {
|
||||
// Queue was empty - consumer might be going to sleep, must notify
|
||||
xTaskNotifyGive(task_to_notify_);
|
||||
} else {
|
||||
// Queue wasn't empty - check if consumer has caught up to previous tail
|
||||
uint8_t head_after = head_.load(std::memory_order_acquire);
|
||||
if (head_after == current_tail) {
|
||||
// Consumer just caught up to where tail was - might go to sleep, must notify
|
||||
// Note: There's a benign race here - between reading head_after and calling
|
||||
// xTaskNotifyGive(), the consumer could advance further. This would result
|
||||
// in an unnecessary wake-up, but is harmless and extremely rare in practice.
|
||||
xTaskNotifyGive(task_to_notify_);
|
||||
}
|
||||
// Otherwise: consumer is still behind, no need to notify
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
T *pop() {
|
||||
uint8_t current_head = head_.load(std::memory_order_relaxed);
|
||||
|
||||
if (current_head == tail_.load(std::memory_order_acquire)) {
|
||||
return nullptr; // Empty
|
||||
}
|
||||
|
||||
T *element = buffer_[current_head];
|
||||
head_.store((current_head + 1) % SIZE, std::memory_order_release);
|
||||
return element;
|
||||
}
|
||||
|
||||
size_t size() const {
|
||||
uint8_t tail = tail_.load(std::memory_order_acquire);
|
||||
uint8_t head = head_.load(std::memory_order_acquire);
|
||||
return (tail - head + SIZE) % SIZE;
|
||||
}
|
||||
|
||||
uint16_t get_and_reset_dropped_count() { return dropped_count_.exchange(0, std::memory_order_relaxed); }
|
||||
|
||||
void increment_dropped_count() { dropped_count_.fetch_add(1, std::memory_order_relaxed); }
|
||||
|
||||
bool empty() const { return head_.load(std::memory_order_acquire) == tail_.load(std::memory_order_acquire); }
|
||||
|
||||
bool full() const {
|
||||
uint8_t next_tail = (tail_.load(std::memory_order_relaxed) + 1) % SIZE;
|
||||
return next_tail == head_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
// Set the FreeRTOS task handle to notify when items are pushed to the queue
|
||||
// This enables efficient wake-up of a consumer task that's waiting for data
|
||||
// @param task The FreeRTOS task handle to notify, or nullptr to disable notifications
|
||||
void set_task_to_notify(TaskHandle_t task) { task_to_notify_ = task; }
|
||||
|
||||
protected:
|
||||
T *buffer_[SIZE];
|
||||
// Atomic: written by producer (push/increment), read+reset by consumer (get_and_reset)
|
||||
std::atomic<uint16_t> dropped_count_; // 65535 max - more than enough for drop tracking
|
||||
// Atomic: written by consumer (pop), read by producer (push) to check if full
|
||||
// Using uint8_t limits queue size to 255 elements but saves memory and ensures
|
||||
// atomic operations are efficient on all platforms
|
||||
std::atomic<uint8_t> head_;
|
||||
// Atomic: written by producer (push), read by consumer (pop) to check if empty
|
||||
std::atomic<uint8_t> tail_;
|
||||
// Task handle for notification (optional)
|
||||
TaskHandle_t task_to_notify_;
|
||||
};
|
||||
|
||||
} // namespace esphome
|
||||
|
||||
#endif // defined(USE_ESP32) || defined(USE_LIBRETINY)
|
Loading…
x
Reference in New Issue
Block a user