Merge branch 'integration' into memory_api

This commit is contained in:
J. Nick Koston 2025-07-18 22:11:40 -10:00
commit e2d509d63d
No known key found for this signature in database
4 changed files with 75 additions and 31 deletions

View File

@ -1632,6 +1632,10 @@ ProtoWriteBuffer APIConnection::allocate_batch_message_buffer(uint16_t size) {
}
void APIConnection::process_batch_() {
// Ensure PacketInfo remains trivially destructible for our placement new approach
static_assert(std::is_trivially_destructible<PacketInfo>::value,
"PacketInfo must remain trivially destructible with this placement-new approach");
if (this->deferred_batch_.empty()) {
this->flags_.batch_scheduled = false;
return;
@ -1669,9 +1673,12 @@ void APIConnection::process_batch_() {
return;
}
// Pre-allocate storage for packet info
std::vector<PacketInfo> packet_info;
packet_info.reserve(num_items);
size_t packets_to_process = std::min(num_items, MAX_PACKETS_PER_BATCH);
// Stack-allocated array for packet info
alignas(PacketInfo) char packet_info_storage[MAX_PACKETS_PER_BATCH * sizeof(PacketInfo)];
PacketInfo *packet_info = reinterpret_cast<PacketInfo *>(packet_info_storage);
size_t packet_count = 0;
// Cache these values to avoid repeated virtual calls
const uint8_t header_padding = this->helper_->frame_header_padding();
@ -1703,8 +1710,8 @@ void APIConnection::process_batch_() {
// The actual message data follows after the header padding
uint32_t current_offset = 0;
// Process items and encode directly to buffer
for (size_t i = 0; i < this->deferred_batch_.size(); i++) {
// Process items and encode directly to buffer (up to our limit)
for (size_t i = 0; i < packets_to_process; i++) {
const auto &item = this->deferred_batch_[i];
// Try to encode message
// The creator will calculate overhead to determine if the message fits
@ -1718,7 +1725,9 @@ void APIConnection::process_batch_() {
// Message was encoded successfully
// payload_size is header_padding + actual payload size + footer_size
uint16_t proto_payload_size = payload_size - header_padding - footer_size;
packet_info.emplace_back(item.message_type, current_offset, proto_payload_size);
// Use placement new to construct PacketInfo in pre-allocated stack array
// This avoids default-constructing all MAX_PACKETS_PER_BATCH elements
new (&packet_info[packet_count++]) PacketInfo(item.message_type, current_offset, proto_payload_size);
// Update tracking variables
items_processed++;
@ -1744,8 +1753,8 @@ void APIConnection::process_batch_() {
}
// Send all collected packets
APIError err =
this->helper_->write_protobuf_packets(ProtoWriteBuffer{&this->parent_->get_shared_buffer_ref()}, packet_info);
APIError err = this->helper_->write_protobuf_packets(ProtoWriteBuffer{&this->parent_->get_shared_buffer_ref()},
std::span<const PacketInfo>(packet_info, packet_count));
if (err != APIError::OK && err != APIError::WOULD_BLOCK) {
on_fatal_error();
ESP_LOGW(TAG, "%s: Batch write failed %s errno=%d", this->get_client_combined_info().c_str(), api_error_to_str(err),

View File

@ -19,7 +19,17 @@ namespace api {
// Keepalive timeout in milliseconds
static constexpr uint32_t KEEPALIVE_TIMEOUT_MS = 60000;
// Maximum number of entities to process in a single batch during initial state/info sending
static constexpr size_t MAX_INITIAL_PER_BATCH = 20;
// This was increased from 20 to 24 after removing the unique_id field from entity info messages,
// which reduced message sizes allowing more entities per batch without exceeding packet limits
static constexpr size_t MAX_INITIAL_PER_BATCH = 24;
// Maximum number of packets to process in a single batch (platform-dependent)
// This limit exists to prevent stack overflow from the PacketInfo array in process_batch_
// Each PacketInfo is 8 bytes, so 64 * 8 = 512 bytes, 32 * 8 = 256 bytes
#if defined(USE_ESP32) || defined(USE_HOST)
static constexpr size_t MAX_PACKETS_PER_BATCH = 64; // ESP32 has 8KB+ stack, HOST has plenty
#else
static constexpr size_t MAX_PACKETS_PER_BATCH = 32; // ESP8266/RP2040/etc have smaller stacks
#endif
class APIConnection : public APIServerConnection {
public:

View File

@ -128,46 +128,53 @@ void ESP32BLETracker::loop() {
uint8_t write_idx = this->ring_write_index_.load(std::memory_order_acquire);
while (read_idx != write_idx) {
// Process one result at a time directly from ring buffer
BLEScanResult &scan_result = this->scan_ring_buffer_[read_idx];
// Calculate how many contiguous results we can process in one batch
// If write > read: process all results from read to write
// If write <= read (wraparound): process from read to end of buffer first
size_t batch_size = (write_idx > read_idx) ? (write_idx - read_idx) : (SCAN_RESULT_BUFFER_SIZE - read_idx);
// Process the batch for raw advertisements
if (this->raw_advertisements_) {
for (auto *listener : this->listeners_) {
listener->parse_devices(&scan_result, 1);
listener->parse_devices(&this->scan_ring_buffer_[read_idx], batch_size);
}
for (auto *client : this->clients_) {
client->parse_devices(&scan_result, 1);
client->parse_devices(&this->scan_ring_buffer_[read_idx], batch_size);
}
}
// Process individual results for parsed advertisements
if (this->parse_advertisements_) {
#ifdef USE_ESP32_BLE_DEVICE
ESPBTDevice device;
device.parse_scan_rst(scan_result);
for (size_t i = 0; i < batch_size; i++) {
BLEScanResult &scan_result = this->scan_ring_buffer_[read_idx + i];
ESPBTDevice device;
device.parse_scan_rst(scan_result);
bool found = false;
for (auto *listener : this->listeners_) {
if (listener->parse_device(device))
found = true;
}
bool found = false;
for (auto *listener : this->listeners_) {
if (listener->parse_device(device))
found = true;
}
for (auto *client : this->clients_) {
if (client->parse_device(device)) {
found = true;
if (!connecting && client->state() == ClientState::DISCOVERED) {
promote_to_connecting = true;
for (auto *client : this->clients_) {
if (client->parse_device(device)) {
found = true;
if (!connecting && client->state() == ClientState::DISCOVERED) {
promote_to_connecting = true;
}
}
}
}
if (!found && !this->scan_continuous_) {
this->print_bt_device_info(device);
if (!found && !this->scan_continuous_) {
this->print_bt_device_info(device);
}
}
#endif // USE_ESP32_BLE_DEVICE
}
// Move to next entry in ring buffer
read_idx = (read_idx + 1) % SCAN_RESULT_BUFFER_SIZE;
// Update read index for entire batch
read_idx = (read_idx + batch_size) % SCAN_RESULT_BUFFER_SIZE;
// Store with release to ensure reads complete before index update
this->ring_read_index_.store(read_idx, std::memory_order_release);

View File

@ -4,7 +4,13 @@ from esphome import pins
import esphome.codegen as cg
from esphome.components import binary_sensor
import esphome.config_validation as cv
from esphome.const import CONF_ID, CONF_NAME, CONF_NUMBER, CONF_PIN
from esphome.const import (
CONF_ALLOW_OTHER_USES,
CONF_ID,
CONF_NAME,
CONF_NUMBER,
CONF_PIN,
)
from esphome.core import CORE
from .. import gpio_ns
@ -76,6 +82,18 @@ async def to_code(config):
)
use_interrupt = False
# Check if pin is shared with other components (allow_other_uses)
# When a pin is shared, interrupts can interfere with other components
# (e.g., duty_cycle sensor) that need to monitor the pin's state changes
if use_interrupt and config[CONF_PIN].get(CONF_ALLOW_OTHER_USES, False):
_LOGGER.info(
"GPIO binary_sensor '%s': Disabling interrupts because pin %s is shared with other components. "
"The sensor will use polling mode for compatibility with other pin uses.",
config.get(CONF_NAME, config[CONF_ID]),
config[CONF_PIN][CONF_NUMBER],
)
use_interrupt = False
cg.add(var.set_use_interrupt(use_interrupt))
if use_interrupt:
cg.add(var.set_interrupt_type(config[CONF_INTERRUPT_TYPE]))