mirror of
https://github.com/esphome/esphome.git
synced 2025-08-19 00:29:26 +00:00
Compare commits
1 Commits
libretiny_
...
esp32_ble_
Author | SHA1 | Date | |
---|---|---|---|
![]() |
8a9d30c8d3 |
@@ -11,7 +11,7 @@ ci:
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.12.9
|
||||
rev: v0.12.8
|
||||
hooks:
|
||||
# Run the linter.
|
||||
- id: ruff
|
||||
|
@@ -289,26 +289,16 @@ uint16_t APIConnection::encode_message_to_buffer(ProtoMessage &msg, uint8_t mess
|
||||
return 0; // Doesn't fit
|
||||
}
|
||||
|
||||
// Allocate buffer space - pass payload size, allocation functions add header/footer space
|
||||
ProtoWriteBuffer buffer = is_single ? conn->allocate_single_message_buffer(calculated_size)
|
||||
: conn->allocate_batch_message_buffer(calculated_size);
|
||||
|
||||
// Get buffer size after allocation (which includes header padding)
|
||||
std::vector<uint8_t> &shared_buf = conn->parent_->get_shared_buffer_ref();
|
||||
|
||||
if (is_single || conn->flags_.batch_first_message) {
|
||||
// Single message or first batch message
|
||||
conn->prepare_first_message_buffer(shared_buf, header_padding, total_calculated_size);
|
||||
if (conn->flags_.batch_first_message) {
|
||||
conn->flags_.batch_first_message = false;
|
||||
}
|
||||
} else {
|
||||
// Batch message second or later
|
||||
// Add padding for previous message footer + this message header
|
||||
size_t current_size = shared_buf.size();
|
||||
shared_buf.reserve(current_size + total_calculated_size);
|
||||
shared_buf.resize(current_size + footer_size + header_padding);
|
||||
}
|
||||
size_t size_before_encode = shared_buf.size();
|
||||
|
||||
// Encode directly into buffer
|
||||
size_t size_before_encode = shared_buf.size();
|
||||
msg.encode({&shared_buf});
|
||||
msg.encode(buffer);
|
||||
|
||||
// Calculate actual encoded size (not including header that was already added)
|
||||
size_t actual_payload_size = shared_buf.size() - size_before_encode;
|
||||
@@ -1630,6 +1620,14 @@ bool APIConnection::schedule_batch_() {
|
||||
return true;
|
||||
}
|
||||
|
||||
ProtoWriteBuffer APIConnection::allocate_single_message_buffer(uint16_t size) { return this->create_buffer(size); }
|
||||
|
||||
ProtoWriteBuffer APIConnection::allocate_batch_message_buffer(uint16_t size) {
|
||||
ProtoWriteBuffer result = this->prepare_message_buffer(size, this->flags_.batch_first_message);
|
||||
this->flags_.batch_first_message = false;
|
||||
return result;
|
||||
}
|
||||
|
||||
void APIConnection::process_batch_() {
|
||||
// Ensure PacketInfo remains trivially destructible for our placement new approach
|
||||
static_assert(std::is_trivially_destructible<PacketInfo>::value,
|
||||
@@ -1737,7 +1735,7 @@ void APIConnection::process_batch_() {
|
||||
}
|
||||
remaining_size -= payload_size;
|
||||
// Calculate where the next message's header padding will start
|
||||
// Current buffer size + footer space for this message
|
||||
// Current buffer size + footer space (that prepare_message_buffer will add for this message)
|
||||
current_offset = shared_buf.size() + footer_size;
|
||||
}
|
||||
|
||||
|
@@ -252,21 +252,44 @@ class APIConnection : public APIServerConnection {
|
||||
|
||||
// Get header padding size - used for both reserve and insert
|
||||
uint8_t header_padding = this->helper_->frame_header_padding();
|
||||
|
||||
// Get shared buffer from parent server
|
||||
std::vector<uint8_t> &shared_buf = this->parent_->get_shared_buffer_ref();
|
||||
this->prepare_first_message_buffer(shared_buf, header_padding,
|
||||
reserve_size + header_padding + this->helper_->frame_footer_size());
|
||||
return {&shared_buf};
|
||||
}
|
||||
|
||||
void prepare_first_message_buffer(std::vector<uint8_t> &shared_buf, size_t header_padding, size_t total_size) {
|
||||
shared_buf.clear();
|
||||
// Reserve space for header padding + message + footer
|
||||
// - Header padding: space for protocol headers (7 bytes for Noise, 6 for Plaintext)
|
||||
// - Footer: space for MAC (16 bytes for Noise, 0 for Plaintext)
|
||||
shared_buf.reserve(total_size);
|
||||
shared_buf.reserve(reserve_size + header_padding + this->helper_->frame_footer_size());
|
||||
// Resize to add header padding so message encoding starts at the correct position
|
||||
shared_buf.resize(header_padding);
|
||||
return {&shared_buf};
|
||||
}
|
||||
|
||||
// Prepare buffer for next message in batch
|
||||
ProtoWriteBuffer prepare_message_buffer(uint16_t message_size, bool is_first_message) {
|
||||
// Get reference to shared buffer (it maintains state between batch messages)
|
||||
std::vector<uint8_t> &shared_buf = this->parent_->get_shared_buffer_ref();
|
||||
|
||||
if (is_first_message) {
|
||||
shared_buf.clear();
|
||||
}
|
||||
|
||||
size_t current_size = shared_buf.size();
|
||||
|
||||
// Calculate padding to add:
|
||||
// - First message: just header padding
|
||||
// - Subsequent messages: footer for previous message + header padding for this message
|
||||
size_t padding_to_add = is_first_message
|
||||
? this->helper_->frame_header_padding()
|
||||
: this->helper_->frame_header_padding() + this->helper_->frame_footer_size();
|
||||
|
||||
// Reserve space for padding + message
|
||||
shared_buf.reserve(current_size + padding_to_add + message_size);
|
||||
|
||||
// Resize to add the padding bytes
|
||||
shared_buf.resize(current_size + padding_to_add);
|
||||
|
||||
return {&shared_buf};
|
||||
}
|
||||
|
||||
bool try_to_clear_buffer(bool log_out_of_space);
|
||||
@@ -274,6 +297,10 @@ class APIConnection : public APIServerConnection {
|
||||
|
||||
std::string get_client_combined_info() const { return this->client_info_.get_combined_info(); }
|
||||
|
||||
// Buffer allocator methods for batch processing
|
||||
ProtoWriteBuffer allocate_single_message_buffer(uint16_t size);
|
||||
ProtoWriteBuffer allocate_batch_message_buffer(uint16_t size);
|
||||
|
||||
protected:
|
||||
// Helper function to handle authentication completion
|
||||
void complete_authentication_();
|
||||
|
@@ -133,7 +133,7 @@ void BluetoothConnection::loop() {
|
||||
|
||||
// Check if we should disable the loop
|
||||
// - For V3_WITH_CACHE: Services are never sent, disable after INIT state
|
||||
// - For V3_WITHOUT_CACHE: Disable only after service discovery is complete
|
||||
// - For other connections: Disable only after service discovery is complete
|
||||
// (send_service_ == DONE_SENDING_SERVICES, which is only set after services are sent)
|
||||
if (this->state_ != espbt::ClientState::INIT && (this->connection_type_ == espbt::ConnectionType::V3_WITH_CACHE ||
|
||||
this->send_service_ == DONE_SENDING_SERVICES)) {
|
||||
@@ -160,7 +160,10 @@ void BluetoothConnection::send_service_for_discovery_() {
|
||||
if (this->send_service_ >= this->service_count_) {
|
||||
this->send_service_ = DONE_SENDING_SERVICES;
|
||||
this->proxy_->send_gatt_services_done(this->address_);
|
||||
this->release_services();
|
||||
if (this->connection_type_ == espbt::ConnectionType::V3_WITH_CACHE ||
|
||||
this->connection_type_ == espbt::ConnectionType::V3_WITHOUT_CACHE) {
|
||||
this->release_services();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -8,7 +8,6 @@
|
||||
#include <cinttypes>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
||||
namespace esphome {
|
||||
namespace esp32 {
|
||||
@@ -157,23 +156,20 @@ class ESP32Preferences : public ESPPreferences {
|
||||
return failed == 0;
|
||||
}
|
||||
bool is_changed(const uint32_t nvs_handle, const NVSData &to_save) {
|
||||
NVSData stored_data{};
|
||||
size_t actual_len;
|
||||
esp_err_t err = nvs_get_blob(nvs_handle, to_save.key.c_str(), nullptr, &actual_len);
|
||||
if (err != 0) {
|
||||
ESP_LOGV(TAG, "nvs_get_blob('%s'): %s - the key might not be set yet", to_save.key.c_str(), esp_err_to_name(err));
|
||||
return true;
|
||||
}
|
||||
// Check size first before allocating memory
|
||||
if (actual_len != to_save.data.size()) {
|
||||
return true;
|
||||
}
|
||||
auto stored_data = std::make_unique<uint8_t[]>(actual_len);
|
||||
err = nvs_get_blob(nvs_handle, to_save.key.c_str(), stored_data.get(), &actual_len);
|
||||
stored_data.data.resize(actual_len);
|
||||
err = nvs_get_blob(nvs_handle, to_save.key.c_str(), stored_data.data.data(), &actual_len);
|
||||
if (err != 0) {
|
||||
ESP_LOGV(TAG, "nvs_get_blob('%s') failed: %s", to_save.key.c_str(), esp_err_to_name(err));
|
||||
return true;
|
||||
}
|
||||
return memcmp(to_save.data.data(), stored_data.get(), to_save.data.size()) != 0;
|
||||
return to_save.data != stored_data.data;
|
||||
}
|
||||
|
||||
bool reset() override {
|
||||
|
@@ -3,7 +3,8 @@
|
||||
#ifdef USE_ESP32
|
||||
|
||||
#include <cstddef> // for offsetof
|
||||
#include <cstring> // for memcpy
|
||||
#include <vector>
|
||||
|
||||
#include <esp_gap_ble_api.h>
|
||||
#include <esp_gattc_api.h>
|
||||
#include <esp_gatts_api.h>
|
||||
@@ -63,7 +64,7 @@ static_assert(offsetof(esp_ble_gap_cb_param_t, read_rssi_cmpl.remote_addr) == si
|
||||
|
||||
// Received GAP, GATTC and GATTS events are only queued, and get processed in the main loop().
|
||||
// This class stores each event with minimal memory usage.
|
||||
// GAP events (99% of traffic) don't have the heap allocation overhead.
|
||||
// GAP events (99% of traffic) don't have the vector overhead.
|
||||
// GATTC/GATTS events use heap allocation for their param and data.
|
||||
//
|
||||
// Event flow:
|
||||
@@ -144,18 +145,16 @@ class BLEEvent {
|
||||
}
|
||||
if (this->type_ == GATTC) {
|
||||
delete this->event_.gattc.gattc_param;
|
||||
delete[] this->event_.gattc.data;
|
||||
delete this->event_.gattc.data;
|
||||
this->event_.gattc.gattc_param = nullptr;
|
||||
this->event_.gattc.data = nullptr;
|
||||
this->event_.gattc.data_len = 0;
|
||||
return;
|
||||
}
|
||||
if (this->type_ == GATTS) {
|
||||
delete this->event_.gatts.gatts_param;
|
||||
delete[] this->event_.gatts.data;
|
||||
delete this->event_.gatts.data;
|
||||
this->event_.gatts.gatts_param = nullptr;
|
||||
this->event_.gatts.data = nullptr;
|
||||
this->event_.gatts.data_len = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,19 +209,17 @@ class BLEEvent {
|
||||
esp_gattc_cb_event_t gattc_event;
|
||||
esp_gatt_if_t gattc_if;
|
||||
esp_ble_gattc_cb_param_t *gattc_param; // Heap-allocated
|
||||
uint8_t *data; // Heap-allocated raw buffer (manually managed)
|
||||
uint16_t data_len; // Track size separately
|
||||
} gattc;
|
||||
std::vector<uint8_t> *data; // Heap-allocated
|
||||
} gattc; // 16 bytes (pointers only)
|
||||
|
||||
// NOLINTNEXTLINE(readability-identifier-naming)
|
||||
struct gatts_event {
|
||||
esp_gatts_cb_event_t gatts_event;
|
||||
esp_gatt_if_t gatts_if;
|
||||
esp_ble_gatts_cb_param_t *gatts_param; // Heap-allocated
|
||||
uint8_t *data; // Heap-allocated raw buffer (manually managed)
|
||||
uint16_t data_len; // Track size separately
|
||||
} gatts;
|
||||
} event_; // 80 bytes
|
||||
std::vector<uint8_t> *data; // Heap-allocated
|
||||
} gatts; // 16 bytes (pointers only)
|
||||
} event_; // 80 bytes
|
||||
|
||||
ble_event_t type_;
|
||||
|
||||
@@ -322,7 +319,6 @@ class BLEEvent {
|
||||
if (p == nullptr) {
|
||||
this->event_.gattc.gattc_param = nullptr;
|
||||
this->event_.gattc.data = nullptr;
|
||||
this->event_.gattc.data_len = 0;
|
||||
return; // Invalid event, but we can't log in header file
|
||||
}
|
||||
|
||||
@@ -340,29 +336,16 @@ class BLEEvent {
|
||||
// We must copy this data to ensure it remains valid when the event is processed later.
|
||||
switch (e) {
|
||||
case ESP_GATTC_NOTIFY_EVT:
|
||||
this->event_.gattc.data_len = p->notify.value_len;
|
||||
if (p->notify.value_len > 0) {
|
||||
this->event_.gattc.data = new uint8_t[p->notify.value_len];
|
||||
memcpy(this->event_.gattc.data, p->notify.value, p->notify.value_len);
|
||||
} else {
|
||||
this->event_.gattc.data = nullptr;
|
||||
}
|
||||
this->event_.gattc.gattc_param->notify.value = this->event_.gattc.data;
|
||||
this->event_.gattc.data = new std::vector<uint8_t>(p->notify.value, p->notify.value + p->notify.value_len);
|
||||
this->event_.gattc.gattc_param->notify.value = this->event_.gattc.data->data();
|
||||
break;
|
||||
case ESP_GATTC_READ_CHAR_EVT:
|
||||
case ESP_GATTC_READ_DESCR_EVT:
|
||||
this->event_.gattc.data_len = p->read.value_len;
|
||||
if (p->read.value_len > 0) {
|
||||
this->event_.gattc.data = new uint8_t[p->read.value_len];
|
||||
memcpy(this->event_.gattc.data, p->read.value, p->read.value_len);
|
||||
} else {
|
||||
this->event_.gattc.data = nullptr;
|
||||
}
|
||||
this->event_.gattc.gattc_param->read.value = this->event_.gattc.data;
|
||||
this->event_.gattc.data = new std::vector<uint8_t>(p->read.value, p->read.value + p->read.value_len);
|
||||
this->event_.gattc.gattc_param->read.value = this->event_.gattc.data->data();
|
||||
break;
|
||||
default:
|
||||
this->event_.gattc.data = nullptr;
|
||||
this->event_.gattc.data_len = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -375,7 +358,6 @@ class BLEEvent {
|
||||
if (p == nullptr) {
|
||||
this->event_.gatts.gatts_param = nullptr;
|
||||
this->event_.gatts.data = nullptr;
|
||||
this->event_.gatts.data_len = 0;
|
||||
return; // Invalid event, but we can't log in header file
|
||||
}
|
||||
|
||||
@@ -393,18 +375,11 @@ class BLEEvent {
|
||||
// We must copy this data to ensure it remains valid when the event is processed later.
|
||||
switch (e) {
|
||||
case ESP_GATTS_WRITE_EVT:
|
||||
this->event_.gatts.data_len = p->write.len;
|
||||
if (p->write.len > 0) {
|
||||
this->event_.gatts.data = new uint8_t[p->write.len];
|
||||
memcpy(this->event_.gatts.data, p->write.value, p->write.len);
|
||||
} else {
|
||||
this->event_.gatts.data = nullptr;
|
||||
}
|
||||
this->event_.gatts.gatts_param->write.value = this->event_.gatts.data;
|
||||
this->event_.gatts.data = new std::vector<uint8_t>(p->write.value, p->write.value + p->write.len);
|
||||
this->event_.gatts.gatts_param->write.value = this->event_.gatts.data->data();
|
||||
break;
|
||||
default:
|
||||
this->event_.gatts.data = nullptr;
|
||||
this->event_.gatts.data_len = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@@ -159,8 +159,7 @@ void BLEClientBase::disconnect() {
|
||||
return;
|
||||
}
|
||||
if (this->state_ == espbt::ClientState::CONNECTING || this->conn_id_ == UNSET_CONN_ID) {
|
||||
ESP_LOGW(TAG, "[%d] [%s] Disconnecting before connected, disconnect scheduled.", this->connection_index_,
|
||||
this->address_str_.c_str());
|
||||
this->log_warning_("Disconnect before connected, disconnect scheduled.");
|
||||
this->want_disconnect_ = true;
|
||||
return;
|
||||
}
|
||||
@@ -172,13 +171,11 @@ void BLEClientBase::unconditional_disconnect() {
|
||||
ESP_LOGI(TAG, "[%d] [%s] Disconnecting (conn_id: %d).", this->connection_index_, this->address_str_.c_str(),
|
||||
this->conn_id_);
|
||||
if (this->state_ == espbt::ClientState::DISCONNECTING) {
|
||||
ESP_LOGE(TAG, "[%d] [%s] Tried to disconnect while already disconnecting.", this->connection_index_,
|
||||
this->address_str_.c_str());
|
||||
this->log_error_("Already disconnecting");
|
||||
return;
|
||||
}
|
||||
if (this->conn_id_ == UNSET_CONN_ID) {
|
||||
ESP_LOGE(TAG, "[%d] [%s] No connection ID set, cannot disconnect.", this->connection_index_,
|
||||
this->address_str_.c_str());
|
||||
this->log_error_("conn id unset, cannot disconnect");
|
||||
return;
|
||||
}
|
||||
auto err = esp_ble_gattc_close(this->gattc_if_, this->conn_id_);
|
||||
@@ -234,6 +231,18 @@ void BLEClientBase::log_connection_params_(const char *param_type) {
|
||||
ESP_LOGD(TAG, "[%d] [%s] %s conn params", this->connection_index_, this->address_str_.c_str(), param_type);
|
||||
}
|
||||
|
||||
void BLEClientBase::log_error_(const char *message) {
|
||||
ESP_LOGE(TAG, "[%d] [%s] %s", this->connection_index_, this->address_str_.c_str(), message);
|
||||
}
|
||||
|
||||
void BLEClientBase::log_error_(const char *message, int code) {
|
||||
ESP_LOGE(TAG, "[%d] [%s] %s=%d", this->connection_index_, this->address_str_.c_str(), message, code);
|
||||
}
|
||||
|
||||
void BLEClientBase::log_warning_(const char *message) {
|
||||
ESP_LOGW(TAG, "[%d] [%s] %s", this->connection_index_, this->address_str_.c_str(), message);
|
||||
}
|
||||
|
||||
void BLEClientBase::restore_medium_conn_params_() {
|
||||
// Restore to medium connection parameters after initial connection phase
|
||||
// This balances performance with bandwidth usage for normal operation
|
||||
@@ -264,8 +273,7 @@ bool BLEClientBase::gattc_event_handler(esp_gattc_cb_event_t event, esp_gatt_if_
|
||||
this->app_id);
|
||||
this->gattc_if_ = esp_gattc_if;
|
||||
} else {
|
||||
ESP_LOGE(TAG, "[%d] [%s] gattc app registration failed id=%d code=%d", this->connection_index_,
|
||||
this->address_str_.c_str(), param->reg.app_id, param->reg.status);
|
||||
this->log_error_("gattc app registration failed status", param->reg.status);
|
||||
this->status_ = param->reg.status;
|
||||
this->mark_failed();
|
||||
}
|
||||
@@ -281,8 +289,7 @@ bool BLEClientBase::gattc_event_handler(esp_gattc_cb_event_t event, esp_gatt_if_
|
||||
// This should not happen but lets log it in case it does
|
||||
// because it means we have a bad assumption about how the
|
||||
// ESP BT stack works.
|
||||
ESP_LOGE(TAG, "[%d] [%s] Got ESP_GATTC_OPEN_EVT while in %s state, status=%d", this->connection_index_,
|
||||
this->address_str_.c_str(), espbt::client_state_to_string(this->state_), param->open.status);
|
||||
this->log_error_("ESP_GATTC_OPEN_EVT wrong state status", param->open.status);
|
||||
}
|
||||
if (param->open.status != ESP_GATT_OK && param->open.status != ESP_GATT_ALREADY_OPEN) {
|
||||
this->log_gattc_warning_("Connection open", param->open.status);
|
||||
@@ -307,7 +314,7 @@ bool BLEClientBase::gattc_event_handler(esp_gattc_cb_event_t event, esp_gatt_if_
|
||||
this->state_ = espbt::ClientState::ESTABLISHED;
|
||||
break;
|
||||
}
|
||||
ESP_LOGD(TAG, "[%d] [%s] Searching for services", this->connection_index_, this->address_str_.c_str());
|
||||
this->log_event_("Searching for services");
|
||||
esp_ble_gattc_search_service(esp_gattc_if, param->cfg_mtu.conn_id, nullptr);
|
||||
break;
|
||||
}
|
||||
@@ -332,8 +339,7 @@ bool BLEClientBase::gattc_event_handler(esp_gattc_cb_event_t event, esp_gatt_if_
|
||||
// Check if we were disconnected while waiting for service discovery
|
||||
if (param->disconnect.reason == ESP_GATT_CONN_TERMINATE_PEER_USER &&
|
||||
this->state_ == espbt::ClientState::CONNECTED) {
|
||||
ESP_LOGW(TAG, "[%d] [%s] Disconnected by remote during service discovery", this->connection_index_,
|
||||
this->address_str_.c_str());
|
||||
this->log_warning_("Remote closed during discovery");
|
||||
} else {
|
||||
ESP_LOGD(TAG, "[%d] [%s] ESP_GATTC_DISCONNECT_EVT, reason 0x%02x", this->connection_index_,
|
||||
this->address_str_.c_str(), param->disconnect.reason);
|
||||
@@ -506,16 +512,14 @@ void BLEClientBase::gap_event_handler(esp_gap_ble_cb_event_t event, esp_ble_gap_
|
||||
return;
|
||||
esp_bd_addr_t bd_addr;
|
||||
memcpy(bd_addr, param->ble_security.auth_cmpl.bd_addr, sizeof(esp_bd_addr_t));
|
||||
ESP_LOGI(TAG, "[%d] [%s] auth complete. remote BD_ADDR: %s", this->connection_index_, this->address_str_.c_str(),
|
||||
ESP_LOGI(TAG, "[%d] [%s] auth complete addr: %s", this->connection_index_, this->address_str_.c_str(),
|
||||
format_hex(bd_addr, 6).c_str());
|
||||
if (!param->ble_security.auth_cmpl.success) {
|
||||
ESP_LOGE(TAG, "[%d] [%s] auth fail reason = 0x%x", this->connection_index_, this->address_str_.c_str(),
|
||||
param->ble_security.auth_cmpl.fail_reason);
|
||||
this->log_error_("auth fail reason", param->ble_security.auth_cmpl.fail_reason);
|
||||
} else {
|
||||
this->paired_ = true;
|
||||
ESP_LOGD(TAG, "[%d] [%s] auth success. address type = %d auth mode = %d", this->connection_index_,
|
||||
this->address_str_.c_str(), param->ble_security.auth_cmpl.addr_type,
|
||||
param->ble_security.auth_cmpl.auth_mode);
|
||||
ESP_LOGD(TAG, "[%d] [%s] auth success type = %d mode = %d", this->connection_index_, this->address_str_.c_str(),
|
||||
param->ble_security.auth_cmpl.addr_type, param->ble_security.auth_cmpl.auth_mode);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@@ -137,6 +137,10 @@ class BLEClientBase : public espbt::ESPBTClient, public Component {
|
||||
void log_gattc_warning_(const char *operation, esp_gatt_status_t status);
|
||||
void log_gattc_warning_(const char *operation, esp_err_t err);
|
||||
void log_connection_params_(const char *param_type);
|
||||
// Compact error logging helpers to reduce flash usage
|
||||
void log_error_(const char *message);
|
||||
void log_error_(const char *message, int code);
|
||||
void log_warning_(const char *message);
|
||||
};
|
||||
|
||||
} // namespace esphome::esp32_ble_client
|
||||
|
@@ -5,7 +5,7 @@
|
||||
#include "esphome/core/preferences.h"
|
||||
#include <flashdb.h>
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
namespace esphome {
|
||||
@@ -139,29 +139,21 @@ class LibreTinyPreferences : public ESPPreferences {
|
||||
}
|
||||
|
||||
bool is_changed(const fdb_kvdb_t db, const NVSData &to_save) {
|
||||
NVSData stored_data{};
|
||||
struct fdb_kv kv;
|
||||
fdb_kv_t kvp = fdb_kv_get_obj(db, to_save.key.c_str(), &kv);
|
||||
if (kvp == nullptr) {
|
||||
ESP_LOGV(TAG, "fdb_kv_get_obj('%s'): nullptr - the key might not be set yet", to_save.key.c_str());
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check size first - if different, data has changed
|
||||
if (kv.value_len != to_save.data.size()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Allocate buffer on heap to avoid stack allocation for large data
|
||||
auto stored_data = std::make_unique<uint8_t[]>(kv.value_len);
|
||||
fdb_blob_make(&blob, stored_data.get(), kv.value_len);
|
||||
stored_data.data.resize(kv.value_len);
|
||||
fdb_blob_make(&blob, stored_data.data.data(), kv.value_len);
|
||||
size_t actual_len = fdb_kv_get_blob(db, to_save.key.c_str(), &blob);
|
||||
if (actual_len != kv.value_len) {
|
||||
ESP_LOGV(TAG, "fdb_kv_get_blob('%s') len mismatch: %u != %u", to_save.key.c_str(), actual_len, kv.value_len);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Compare the actual data
|
||||
return memcmp(to_save.data.data(), stored_data.get(), kv.value_len) != 0;
|
||||
return to_save.data != stored_data.data;
|
||||
}
|
||||
|
||||
bool reset() override {
|
||||
|
@@ -813,7 +813,7 @@ std::string WebServer::cover_state_json_generator(WebServer *web_server, void *s
|
||||
return web_server->cover_json((cover::Cover *) (source), DETAIL_STATE);
|
||||
}
|
||||
std::string WebServer::cover_all_json_generator(WebServer *web_server, void *source) {
|
||||
return web_server->cover_json((cover::Cover *) (source), DETAIL_ALL);
|
||||
return web_server->cover_json((cover::Cover *) (source), DETAIL_STATE);
|
||||
}
|
||||
std::string WebServer::cover_json(cover::Cover *obj, JsonDetail start_config) {
|
||||
return json::build_json([this, obj, start_config](JsonObject root) {
|
||||
|
@@ -375,16 +375,11 @@ async def to_code(config):
|
||||
var = cg.new_Pvariable(config[CONF_ID])
|
||||
cg.add(var.set_use_address(config[CONF_USE_ADDRESS]))
|
||||
|
||||
# Track if any network uses Enterprise authentication
|
||||
has_eap = False
|
||||
|
||||
def add_sta(ap, network):
|
||||
ip_config = network.get(CONF_MANUAL_IP, config.get(CONF_MANUAL_IP))
|
||||
cg.add(var.add_sta(wifi_network(network, ap, ip_config)))
|
||||
|
||||
for network in config.get(CONF_NETWORKS, []):
|
||||
if CONF_EAP in network:
|
||||
has_eap = True
|
||||
cg.with_local_variable(network[CONF_ID], WiFiAP(), add_sta, network)
|
||||
|
||||
if CONF_AP in config:
|
||||
@@ -401,10 +396,6 @@ async def to_code(config):
|
||||
add_idf_sdkconfig_option("CONFIG_ESP_WIFI_SOFTAP_SUPPORT", False)
|
||||
add_idf_sdkconfig_option("CONFIG_LWIP_DHCPS", False)
|
||||
|
||||
# Disable Enterprise WiFi support if no EAP is configured
|
||||
if CORE.is_esp32 and CORE.using_esp_idf and not has_eap:
|
||||
add_idf_sdkconfig_option("CONFIG_ESP_WIFI_ENTERPRISE_SUPPORT", False)
|
||||
|
||||
cg.add(var.set_reboot_timeout(config[CONF_REBOOT_TIMEOUT]))
|
||||
cg.add(var.set_power_save_mode(config[CONF_POWER_SAVE_MODE]))
|
||||
cg.add(var.set_fast_connect(config[CONF_FAST_CONNECT]))
|
||||
|
@@ -803,10 +803,6 @@ class EsphomeCore:
|
||||
raise TypeError(
|
||||
f"Library {library} must be instance of Library, not {type(library)}"
|
||||
)
|
||||
|
||||
if not library.name:
|
||||
raise ValueError(f"The library for {library.repository} must have a name")
|
||||
|
||||
short_name = (
|
||||
library.name if "/" not in library.name else library.name.split("/")[-1]
|
||||
)
|
||||
|
@@ -475,16 +475,11 @@ bool Application::register_socket_fd(int fd) {
|
||||
if (fd < 0)
|
||||
return false;
|
||||
|
||||
#ifndef USE_ESP32
|
||||
// Only check on non-ESP32 platforms
|
||||
// On ESP32 (both Arduino and ESP-IDF), CONFIG_LWIP_MAX_SOCKETS is always <= FD_SETSIZE by design
|
||||
// (LWIP_SOCKET_OFFSET = FD_SETSIZE - CONFIG_LWIP_MAX_SOCKETS per lwipopts.h)
|
||||
// Other platforms may not have this guarantee
|
||||
if (fd >= FD_SETSIZE) {
|
||||
ESP_LOGE(TAG, "fd %d exceeds FD_SETSIZE %d", fd, FD_SETSIZE);
|
||||
ESP_LOGE(TAG, "Cannot monitor socket fd %d: exceeds FD_SETSIZE (%d)", fd, FD_SETSIZE);
|
||||
ESP_LOGE(TAG, "Socket will not be monitored for data - may cause performance issues!");
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
this->socket_fds_.push_back(fd);
|
||||
this->socket_fds_changed_ = true;
|
||||
|
@@ -80,16 +80,13 @@ def replace_file_content(text, pattern, repl):
|
||||
return content_new, count
|
||||
|
||||
|
||||
def storage_should_clean(old: StorageJSON | None, new: StorageJSON) -> bool:
|
||||
def storage_should_clean(old: StorageJSON, new: StorageJSON) -> bool:
|
||||
if old is None:
|
||||
return True
|
||||
|
||||
if old.src_version != new.src_version:
|
||||
return True
|
||||
if old.build_path != new.build_path:
|
||||
return True
|
||||
# Check if any components have been removed
|
||||
return bool(old.loaded_integrations - new.loaded_integrations)
|
||||
return old.build_path != new.build_path
|
||||
|
||||
|
||||
def storage_should_update_cmake_cache(old: StorageJSON, new: StorageJSON) -> bool:
|
||||
@@ -103,7 +100,7 @@ def storage_should_update_cmake_cache(old: StorageJSON, new: StorageJSON) -> boo
|
||||
return False
|
||||
|
||||
|
||||
def update_storage_json() -> None:
|
||||
def update_storage_json():
|
||||
path = storage_path()
|
||||
old = StorageJSON.load(path)
|
||||
new = StorageJSON.from_esphome_core(CORE, old)
|
||||
@@ -111,14 +108,7 @@ def update_storage_json() -> None:
|
||||
return
|
||||
|
||||
if storage_should_clean(old, new):
|
||||
if old is not None and old.loaded_integrations - new.loaded_integrations:
|
||||
removed = old.loaded_integrations - new.loaded_integrations
|
||||
_LOGGER.info(
|
||||
"Components removed (%s), cleaning build files...",
|
||||
", ".join(sorted(removed)),
|
||||
)
|
||||
else:
|
||||
_LOGGER.info("Core config or version changed, cleaning build files...")
|
||||
_LOGGER.info("Core config, version changed, cleaning build files...")
|
||||
clean_build()
|
||||
elif storage_should_update_cmake_cache(old, new):
|
||||
_LOGGER.info("Integrations changed, cleaning cmake cache...")
|
||||
|
@@ -1,6 +1,6 @@
|
||||
pylint==3.3.8
|
||||
flake8==7.3.0 # also change in .pre-commit-config.yaml when updating
|
||||
ruff==0.12.9 # also change in .pre-commit-config.yaml when updating
|
||||
ruff==0.12.8 # also change in .pre-commit-config.yaml when updating
|
||||
pyupgrade==3.20.0 # also change in .pre-commit-config.yaml when updating
|
||||
pre-commit
|
||||
|
||||
|
@@ -1,220 +0,0 @@
|
||||
"""Test writer module functionality."""
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from esphome.storage_json import StorageJSON
|
||||
from esphome.writer import storage_should_clean, update_storage_json
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def create_storage() -> Callable[..., StorageJSON]:
|
||||
"""Factory fixture to create StorageJSON instances."""
|
||||
|
||||
def _create(
|
||||
loaded_integrations: list[str] | None = None, **kwargs: Any
|
||||
) -> StorageJSON:
|
||||
return StorageJSON(
|
||||
storage_version=kwargs.get("storage_version", 1),
|
||||
name=kwargs.get("name", "test"),
|
||||
friendly_name=kwargs.get("friendly_name", "Test Device"),
|
||||
comment=kwargs.get("comment"),
|
||||
esphome_version=kwargs.get("esphome_version", "2025.1.0"),
|
||||
src_version=kwargs.get("src_version", 1),
|
||||
address=kwargs.get("address", "test.local"),
|
||||
web_port=kwargs.get("web_port", 80),
|
||||
target_platform=kwargs.get("target_platform", "ESP32"),
|
||||
build_path=kwargs.get("build_path", "/build"),
|
||||
firmware_bin_path=kwargs.get("firmware_bin_path", "/firmware.bin"),
|
||||
loaded_integrations=set(loaded_integrations or []),
|
||||
loaded_platforms=kwargs.get("loaded_platforms", set()),
|
||||
no_mdns=kwargs.get("no_mdns", False),
|
||||
framework=kwargs.get("framework", "arduino"),
|
||||
core_platform=kwargs.get("core_platform", "esp32"),
|
||||
)
|
||||
|
||||
return _create
|
||||
|
||||
|
||||
def test_storage_should_clean_when_old_is_none(
|
||||
create_storage: Callable[..., StorageJSON],
|
||||
) -> None:
|
||||
"""Test that clean is triggered when old storage is None."""
|
||||
new = create_storage(loaded_integrations=["api", "wifi"])
|
||||
assert storage_should_clean(None, new) is True
|
||||
|
||||
|
||||
def test_storage_should_clean_when_src_version_changes(
|
||||
create_storage: Callable[..., StorageJSON],
|
||||
) -> None:
|
||||
"""Test that clean is triggered when src_version changes."""
|
||||
old = create_storage(loaded_integrations=["api", "wifi"], src_version=1)
|
||||
new = create_storage(loaded_integrations=["api", "wifi"], src_version=2)
|
||||
assert storage_should_clean(old, new) is True
|
||||
|
||||
|
||||
def test_storage_should_clean_when_build_path_changes(
|
||||
create_storage: Callable[..., StorageJSON],
|
||||
) -> None:
|
||||
"""Test that clean is triggered when build_path changes."""
|
||||
old = create_storage(loaded_integrations=["api", "wifi"], build_path="/build1")
|
||||
new = create_storage(loaded_integrations=["api", "wifi"], build_path="/build2")
|
||||
assert storage_should_clean(old, new) is True
|
||||
|
||||
|
||||
def test_storage_should_clean_when_component_removed(
|
||||
create_storage: Callable[..., StorageJSON],
|
||||
) -> None:
|
||||
"""Test that clean is triggered when a component is removed."""
|
||||
old = create_storage(
|
||||
loaded_integrations=["api", "wifi", "bluetooth_proxy", "esp32_ble_tracker"]
|
||||
)
|
||||
new = create_storage(loaded_integrations=["api", "wifi", "esp32_ble_tracker"])
|
||||
assert storage_should_clean(old, new) is True
|
||||
|
||||
|
||||
def test_storage_should_clean_when_multiple_components_removed(
|
||||
create_storage: Callable[..., StorageJSON],
|
||||
) -> None:
|
||||
"""Test that clean is triggered when multiple components are removed."""
|
||||
old = create_storage(
|
||||
loaded_integrations=["api", "wifi", "ota", "web_server", "logger"]
|
||||
)
|
||||
new = create_storage(loaded_integrations=["api", "wifi", "logger"])
|
||||
assert storage_should_clean(old, new) is True
|
||||
|
||||
|
||||
def test_storage_should_not_clean_when_nothing_changes(
|
||||
create_storage: Callable[..., StorageJSON],
|
||||
) -> None:
|
||||
"""Test that clean is not triggered when nothing changes."""
|
||||
old = create_storage(loaded_integrations=["api", "wifi", "logger"])
|
||||
new = create_storage(loaded_integrations=["api", "wifi", "logger"])
|
||||
assert storage_should_clean(old, new) is False
|
||||
|
||||
|
||||
def test_storage_should_not_clean_when_component_added(
|
||||
create_storage: Callable[..., StorageJSON],
|
||||
) -> None:
|
||||
"""Test that clean is not triggered when a component is only added."""
|
||||
old = create_storage(loaded_integrations=["api", "wifi"])
|
||||
new = create_storage(loaded_integrations=["api", "wifi", "ota"])
|
||||
assert storage_should_clean(old, new) is False
|
||||
|
||||
|
||||
def test_storage_should_not_clean_when_other_fields_change(
|
||||
create_storage: Callable[..., StorageJSON],
|
||||
) -> None:
|
||||
"""Test that clean is not triggered when non-relevant fields change."""
|
||||
old = create_storage(
|
||||
loaded_integrations=["api", "wifi"],
|
||||
friendly_name="Old Name",
|
||||
esphome_version="2024.12.0",
|
||||
)
|
||||
new = create_storage(
|
||||
loaded_integrations=["api", "wifi"],
|
||||
friendly_name="New Name",
|
||||
esphome_version="2025.1.0",
|
||||
)
|
||||
assert storage_should_clean(old, new) is False
|
||||
|
||||
|
||||
def test_storage_edge_case_empty_integrations(
|
||||
create_storage: Callable[..., StorageJSON],
|
||||
) -> None:
|
||||
"""Test edge case when old has integrations but new has none."""
|
||||
old = create_storage(loaded_integrations=["api", "wifi"])
|
||||
new = create_storage(loaded_integrations=[])
|
||||
assert storage_should_clean(old, new) is True
|
||||
|
||||
|
||||
def test_storage_edge_case_from_empty_integrations(
|
||||
create_storage: Callable[..., StorageJSON],
|
||||
) -> None:
|
||||
"""Test edge case when old has no integrations but new has some."""
|
||||
old = create_storage(loaded_integrations=[])
|
||||
new = create_storage(loaded_integrations=["api", "wifi"])
|
||||
assert storage_should_clean(old, new) is False
|
||||
|
||||
|
||||
@patch("esphome.writer.clean_build")
|
||||
@patch("esphome.writer.StorageJSON")
|
||||
@patch("esphome.writer.storage_path")
|
||||
@patch("esphome.writer.CORE")
|
||||
def test_update_storage_json_logging_when_old_is_none(
|
||||
mock_core: MagicMock,
|
||||
mock_storage_path: MagicMock,
|
||||
mock_storage_json_class: MagicMock,
|
||||
mock_clean_build: MagicMock,
|
||||
create_storage: Callable[..., StorageJSON],
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
) -> None:
|
||||
"""Test that update_storage_json doesn't crash when old storage is None.
|
||||
|
||||
This is a regression test for the AttributeError that occurred when
|
||||
old was None and we tried to access old.loaded_integrations.
|
||||
"""
|
||||
# Setup mocks
|
||||
mock_storage_path.return_value = "/test/path"
|
||||
mock_storage_json_class.load.return_value = None # Old storage is None
|
||||
|
||||
new_storage = create_storage(loaded_integrations=["api", "wifi"])
|
||||
new_storage.save = MagicMock() # Mock the save method
|
||||
mock_storage_json_class.from_esphome_core.return_value = new_storage
|
||||
|
||||
# Call the function - should not raise AttributeError
|
||||
with caplog.at_level("INFO"):
|
||||
update_storage_json()
|
||||
|
||||
# Verify clean_build was called
|
||||
mock_clean_build.assert_called_once()
|
||||
|
||||
# Verify the correct log message was used (not the component removal message)
|
||||
assert "Core config or version changed, cleaning build files..." in caplog.text
|
||||
assert "Components removed" not in caplog.text
|
||||
|
||||
# Verify save was called
|
||||
new_storage.save.assert_called_once_with("/test/path")
|
||||
|
||||
|
||||
@patch("esphome.writer.clean_build")
|
||||
@patch("esphome.writer.StorageJSON")
|
||||
@patch("esphome.writer.storage_path")
|
||||
@patch("esphome.writer.CORE")
|
||||
def test_update_storage_json_logging_components_removed(
|
||||
mock_core: MagicMock,
|
||||
mock_storage_path: MagicMock,
|
||||
mock_storage_json_class: MagicMock,
|
||||
mock_clean_build: MagicMock,
|
||||
create_storage: Callable[..., StorageJSON],
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
) -> None:
|
||||
"""Test that update_storage_json logs removed components correctly."""
|
||||
# Setup mocks
|
||||
mock_storage_path.return_value = "/test/path"
|
||||
|
||||
old_storage = create_storage(loaded_integrations=["api", "wifi", "bluetooth_proxy"])
|
||||
new_storage = create_storage(loaded_integrations=["api", "wifi"])
|
||||
new_storage.save = MagicMock() # Mock the save method
|
||||
|
||||
mock_storage_json_class.load.return_value = old_storage
|
||||
mock_storage_json_class.from_esphome_core.return_value = new_storage
|
||||
|
||||
# Call the function
|
||||
with caplog.at_level("INFO"):
|
||||
update_storage_json()
|
||||
|
||||
# Verify clean_build was called
|
||||
mock_clean_build.assert_called_once()
|
||||
|
||||
# Verify the correct log message was used with component names
|
||||
assert (
|
||||
"Components removed (bluetooth_proxy), cleaning build files..." in caplog.text
|
||||
)
|
||||
assert "Core config or version changed" not in caplog.text
|
||||
|
||||
# Verify save was called
|
||||
new_storage.save.assert_called_once_with("/test/path")
|
Reference in New Issue
Block a user