Compare commits

..

52 Commits

Author SHA1 Message Date
J. Nick Koston
dd0699305e [esp32_ble] Refactor to use CORE.data instead of module-level globals 2025-10-13 18:08:52 -10:00
dependabot[bot]
fe07c34246 Bump aioesphomeapi from 41.14.0 to 41.16.0 (#11215)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-14 00:00:45 +00:00
dependabot[bot]
c652aa375a Bump pylint from 3.3.9 to 4.0.0 (#11211)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-13 13:10:46 -10:00
J. Nick Koston
9fb254fdc2 Fix log retrieval with FQDN when mDNS is disabled (#11202) 2025-10-14 10:23:44 +13:00
Jonathan Swoboda
3df4dbd3a6 [core] Properly clean the build dir in the HA addon (#11208) 2025-10-13 17:12:45 -04:00
J. Nick Koston
6372099df3 [http_request] Pass parameters by const reference to reduce flash usage (#11184) 2025-10-14 09:53:11 +13:00
J. Nick Koston
8d8fcfeda2 [core] Add make_name_with_suffix helper to optimize string concatenation (#11176)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-10-14 09:39:38 +13:00
J. Nick Koston
0f356fcc79 [core] Optimize looping_components_ with FixedVector to save flash (#11183) 2025-10-14 09:20:43 +13:00
dependabot[bot]
aec60d122b Bump esphome-dashboard from 20251009.0 to 20251013.0 (#11212)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-13 09:34:12 -10:00
J. Nick Koston
c10f68ef0c [mdns] Conditionally store services to reduce RAM usage by 200-464 bytes (#11180) 2025-10-14 07:24:57 +13:00
J. Nick Koston
bcc424afed [web_server] Reduce code duplication in JSON generation with helper functions (#11117) 2025-10-14 07:21:19 +13:00
J. Nick Koston
be2c859df3 [web_server] Consolidate duplicate client connection checks (saves 288 bytes of flash) (#11116) 2025-10-14 07:01:47 +13:00
Jesse Hills
59f728488e [media_player.speaker] Dynamic auto load (#11084)
Co-authored-by: J. Nick Koston <nick@koston.org>
2025-10-13 12:58:30 +13:00
Jesse Hills
04a0de556d Merge branch 'beta' into dev 2025-10-13 10:56:08 +13:00
Jesse Hills
13cfa30c67 Merge pull request #11199 from esphome/bump-2025.10.0b2
2025.10.0b2
2025-10-13 10:55:34 +13:00
Jesse Hills
da1959ab5d Bump version to 2025.10.0b2 2025-10-13 08:49:29 +13:00
J. Nick Koston
2b42903e9c [usb_host] Fix transfer slot exhaustion at high data rates and add configurable max_transfer_requests (#11174)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: pre-commit-ci-lite[bot] <117423508+pre-commit-ci-lite[bot]@users.noreply.github.com>
2025-10-13 08:49:29 +13:00
J. Nick Koston
742c9cbb53 [esp32_improv] Fix state not transitioning to PROVISIONED when WiFi configured via captive portal (#11181) 2025-10-13 08:49:29 +13:00
J. Nick Koston
e4bc465a3d [ota] Increase handshake timeout to 20s now that auth is non-blocking (#11186) 2025-10-13 08:49:29 +13:00
J. Nick Koston
5cec0941f8 [wifi] Fix missed string literal in flash on ESP8266 (#11187) 2025-10-13 08:49:29 +13:00
J. Nick Koston
72a7aeb430 [ci] Dynamic runner allocation: 8 for releases, 4 for dev (#11191) 2025-10-13 08:49:29 +13:00
J. Nick Koston
53e6b28092 [mipi_rgb] Fix pin conflicts introduced by shared SPI bus in #11134 (#11185) 2025-10-13 08:49:28 +13:00
dependabot[bot]
7f3c7bb5c6 Bump aioesphomeapi from 41.13.0 to 41.14.0 (#11188)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-13 08:49:28 +13:00
Jonathan Swoboda
c02c0b2a96 [esp32] Change Arduino dev & latest to 3.3.2 (#11169) 2025-10-13 08:49:28 +13:00
J. Nick Koston
5f5092e29f [ci] Filter out components without tests from CI test jobs (#11134 followup) (#11178) 2025-10-13 08:49:28 +13:00
J. Nick Koston
2864bf1674 Group component tests to reduce CI time (#11134) 2025-10-13 08:49:28 +13:00
J. Nick Koston
132e949927 [mdns] Restore mdns_txt_record() public API for external components (#11158) 2025-10-13 08:49:28 +13:00
J. Nick Koston
8fa44e471d [esp32_ble] Partial revert of #10862 - Fix GATT client notifications (#11171) 2025-10-13 08:49:28 +13:00
J. Nick Koston
ccedcfb600 [json] Fix PSRAM allocator dangling pointer crash (#11165) 2025-10-13 08:49:28 +13:00
J. Nick Koston
8b0ec0afe3 [ci][tests] Remove all redundant ESP32-C3 Arduino tests (#11154) 2025-10-13 08:49:28 +13:00
J. Nick Koston
dca29ed89b [datetime][ci][tests] Replace test.all.yaml with minimal platform cover (#11151) 2025-10-13 08:49:28 +13:00
J. Nick Koston
728726e29e [ci][tests] Remove redundant ESP32-C3 Arduino tests for non-variant-specific components (#11152) 2025-10-13 08:49:28 +13:00
J. Nick Koston
79f4ca20b8 [opentherm][ci][tests] Remove redundant ESP32 Arduino tests and simplify conditionals (#11149) 2025-10-13 08:49:28 +13:00
J. Nick Koston
3eca72e0b8 [ci][logger][tests] Remove redundant ESP32 Arduino test files (#11144) 2025-10-13 08:49:28 +13:00
J. Nick Koston
22c0f55cef [ci][debug][tests] Remove redundant ESP32 variant Arduino test files (#11146) 2025-10-13 08:49:28 +13:00
J. Nick Koston
fd8ecc9608 [ci][time][tests] Remove redundant ESP32 Arduino test files (#11147) 2025-10-13 08:49:27 +13:00
J. Nick Koston
ac96a59d58 [network][ci][tests] Remove redundant ESP32 Arduino test files (#11148) 2025-10-13 08:49:27 +13:00
J. Nick Koston
dceed992d8 [esp32_ble_beacon, esp32_ble_tracker] Remove unused Arduino includes and redundant tests (#11140) 2025-10-13 08:49:27 +13:00
J. Nick Koston
b0c66c1c09 [ci][mdns][tests] Remove redundant ESP32 Arduino test files (#11143) 2025-10-13 08:49:27 +13:00
J. Nick Koston
8f04a5b944 [esp32] Update migration warning for Arduino-as-IDF-component transition (#11142) 2025-10-13 08:49:27 +13:00
Jonathan Swoboda
e6c21df30b [esp32] Update IDF 5.5 and Arduino 3.3 to use 55.03.31-1 (#11120) 2025-10-13 08:49:27 +13:00
J. Nick Koston
842cb9033a [mdns] Store TXT record values in flash to reduce heap usage (#11114) 2025-10-13 08:49:27 +13:00
J. Nick Koston
a2cb415dfa [ci][improv_serial][tests] Remove redundant ESP32 Arduino test files (#11138) 2025-10-13 08:49:27 +13:00
J. Nick Koston
1fac193535 [ci][ethernet][tests] Remove redundant Arduino tests for ethernet PHYs (#11137) 2025-10-13 08:49:27 +13:00
J. Nick Koston
34632f78cf [ci][tests] Remove redundant ESP32 Arduino test files (#11136) 2025-10-13 08:49:27 +13:00
J. Nick Koston
b93c60e85a [canbus][mcp23xxx_base] Mark virtual methods as pure virtual to fix linker errors (#11133) 2025-10-13 08:49:27 +13:00
dependabot[bot]
60dc055509 Bump esphome-dashboard from 20250904.0 to 20251009.0 (#11123)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-13 08:49:27 +13:00
J. Nick Koston
1f13d44c1b [usb_host] Fix transfer slot exhaustion at high data rates and add configurable max_transfer_requests (#11174)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: pre-commit-ci-lite[bot] <117423508+pre-commit-ci-lite[bot]@users.noreply.github.com>
2025-10-13 08:04:30 +13:00
J. Nick Koston
9ebfa9aaa8 [esp32_improv] Fix state not transitioning to PROVISIONED when WiFi configured via captive portal (#11181) 2025-10-13 07:30:58 +13:00
J. Nick Koston
6bc9ed0810 [ota] Increase handshake timeout to 20s now that auth is non-blocking (#11186) 2025-10-13 07:27:43 +13:00
J. Nick Koston
9b6e8b4b41 [wifi] Fix missed string literal in flash on ESP8266 (#11187) 2025-10-13 07:26:28 +13:00
J. Nick Koston
cad747c672 [ci] Dynamic runner allocation: 8 for releases, 4 for dev (#11191) 2025-10-13 07:25:35 +13:00
57 changed files with 522 additions and 499 deletions

View File

@@ -178,7 +178,6 @@ jobs:
python-linters: ${{ steps.determine.outputs.python-linters }}
changed-components: ${{ steps.determine.outputs.changed-components }}
changed-components-with-tests: ${{ steps.determine.outputs.changed-components-with-tests }}
directly-changed-components-with-tests: ${{ steps.determine.outputs.directly-changed-components-with-tests }}
component-test-count: ${{ steps.determine.outputs.component-test-count }}
steps:
- name: Check out code from GitHub
@@ -207,7 +206,6 @@ jobs:
echo "python-linters=$(echo "$output" | jq -r '.python_linters')" >> $GITHUB_OUTPUT
echo "changed-components=$(echo "$output" | jq -c '.changed_components')" >> $GITHUB_OUTPUT
echo "changed-components-with-tests=$(echo "$output" | jq -c '.changed_components_with_tests')" >> $GITHUB_OUTPUT
echo "directly-changed-components-with-tests=$(echo "$output" | jq -c '.directly_changed_components_with_tests')" >> $GITHUB_OUTPUT
echo "component-test-count=$(echo "$output" | jq -r '.component_test_count')" >> $GITHUB_OUTPUT
integration-tests:
@@ -360,13 +358,48 @@ jobs:
# yamllint disable-line rule:line-length
if: always()
test-build-components:
name: Component test ${{ matrix.file }}
runs-on: ubuntu-24.04
needs:
- common
- determine-jobs
if: github.event_name == 'pull_request' && fromJSON(needs.determine-jobs.outputs.component-test-count) > 0 && fromJSON(needs.determine-jobs.outputs.component-test-count) < 100
strategy:
fail-fast: false
max-parallel: 2
matrix:
file: ${{ fromJson(needs.determine-jobs.outputs.changed-components-with-tests) }}
steps:
- name: Cache apt packages
uses: awalsh128/cache-apt-pkgs-action@acb598e5ddbc6f68a970c5da0688d2f3a9f04d05 # v1.5.3
with:
packages: libsdl2-dev
version: 1.0
- name: Check out code from GitHub
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Restore Python
uses: ./.github/actions/restore-python
with:
python-version: ${{ env.DEFAULT_PYTHON }}
cache-key: ${{ needs.common.outputs.cache-key }}
- name: Validate config for ${{ matrix.file }}
run: |
. venv/bin/activate
python3 script/test_build_components.py -e config -c ${{ matrix.file }}
- name: Compile config for ${{ matrix.file }}
run: |
. venv/bin/activate
python3 script/test_build_components.py -e compile -c ${{ matrix.file }}
test-build-components-splitter:
name: Split components for intelligent grouping (40 weighted per batch)
runs-on: ubuntu-24.04
needs:
- common
- determine-jobs
if: github.event_name == 'pull_request' && fromJSON(needs.determine-jobs.outputs.component-test-count) > 0
if: github.event_name == 'pull_request' && fromJSON(needs.determine-jobs.outputs.component-test-count) >= 100
outputs:
matrix: ${{ steps.split.outputs.components }}
steps:
@@ -384,10 +417,9 @@ jobs:
# Use intelligent splitter that groups components with same bus configs
components='${{ needs.determine-jobs.outputs.changed-components-with-tests }}'
directly_changed='${{ needs.determine-jobs.outputs.directly-changed-components-with-tests }}'
echo "Splitting components intelligently..."
output=$(python3 script/split_components_for_ci.py --components "$components" --directly-changed "$directly_changed" --batch-size 40 --output github)
output=$(python3 script/split_components_for_ci.py --components "$components" --batch-size 40 --output github)
echo "$output" >> $GITHUB_OUTPUT
@@ -398,10 +430,10 @@ jobs:
- common
- determine-jobs
- test-build-components-splitter
if: github.event_name == 'pull_request' && fromJSON(needs.determine-jobs.outputs.component-test-count) > 0
if: github.event_name == 'pull_request' && fromJSON(needs.determine-jobs.outputs.component-test-count) >= 100
strategy:
fail-fast: false
max-parallel: 5
max-parallel: ${{ (github.base_ref == 'beta' || github.base_ref == 'release') && 8 || 4 }}
matrix:
components: ${{ fromJson(needs.test-build-components-splitter.outputs.matrix) }}
steps:
@@ -445,34 +477,18 @@ jobs:
# Convert space-separated components to comma-separated for Python script
components_csv=$(echo "${{ matrix.components }}" | tr ' ' ',')
# Only isolate directly changed components when targeting dev branch
# For beta/release branches, group everything for faster CI
#
# WHY ISOLATE DIRECTLY CHANGED COMPONENTS?
# - Isolated tests run WITHOUT --testing-mode, enabling full validation
# - This catches pin conflicts and other issues in directly changed code
# - Grouped tests use --testing-mode to allow config merging (disables some checks)
# - Dependencies are safe to group since they weren't modified in this PR
if [ "${{ github.base_ref }}" = "beta" ] || [ "${{ github.base_ref }}" = "release" ]; then
directly_changed_csv=""
echo "Testing components: $components_csv"
echo "Target branch: ${{ github.base_ref }} - grouping all components"
else
directly_changed_csv=$(echo '${{ needs.determine-jobs.outputs.directly-changed-components-with-tests }}' | jq -r 'join(",")')
echo "Testing components: $components_csv"
echo "Target branch: ${{ github.base_ref }} - isolating directly changed components: $directly_changed_csv"
fi
echo "Testing components: $components_csv"
echo ""
# Run config validation with grouping and isolation
python3 script/test_build_components.py -e config -c "$components_csv" -f --isolate "$directly_changed_csv"
# Run config validation with grouping
python3 script/test_build_components.py -e config -c "$components_csv" -f
echo ""
echo "Config validation passed! Starting compilation..."
echo ""
# Run compilation with grouping and isolation
python3 script/test_build_components.py -e compile -c "$components_csv" -f --isolate "$directly_changed_csv"
# Run compilation with grouping
python3 script/test_build_components.py -e compile -c "$components_csv" -f
pre-commit-ci-lite:
name: pre-commit.ci lite
@@ -505,6 +521,7 @@ jobs:
- integration-tests
- clang-tidy
- determine-jobs
- test-build-components
- test-build-components-splitter
- test-build-components-split
- pre-commit-ci-lite

View File

@@ -268,8 +268,10 @@ def has_ip_address() -> bool:
def has_resolvable_address() -> bool:
"""Check if CORE.address is resolvable (via mDNS or is an IP address)."""
return has_mdns() or has_ip_address()
"""Check if CORE.address is resolvable (via mDNS, DNS, or is an IP address)."""
# Any address (IP, mDNS hostname, or regular DNS hostname) is resolvable
# The resolve_ip_address() function in helpers.py handles all types via AsyncResolver
return CORE.address is not None
def mqtt_get_ip(config: ConfigType, username: str, password: str, client_id: str):
@@ -578,11 +580,12 @@ def show_logs(config: ConfigType, args: ArgsProtocol, devices: list[str]) -> int
if has_api():
addresses_to_use: list[str] | None = None
if port_type == "NETWORK" and (has_mdns() or is_ip_address(port)):
if port_type == "NETWORK":
# Network addresses (IPs, mDNS names, or regular DNS hostnames) can be used
# The resolve_ip_address() function in helpers.py handles all types
addresses_to_use = devices
elif port_type in ("NETWORK", "MQTT", "MQTTIP") and has_mqtt_ip_lookup():
# Only use MQTT IP lookup if the first condition didn't match
# (for MQTT/MQTTIP types, or for NETWORK when mdns/ip check fails)
elif port_type in ("MQTT", "MQTTIP") and has_mqtt_ip_lookup():
# Use MQTT IP lookup for MQTT/MQTTIP types
addresses_to_use = mqtt_get_ip(
config, args.username, args.password, args.client_id
)

View File

@@ -1,5 +1,3 @@
// useless comment
#pragma once
#ifdef USE_ESP32

View File

@@ -1,5 +1,3 @@
// changed
#include "api_connection.h"
#ifdef USE_API
#ifdef USE_API_NOISE

View File

@@ -107,8 +107,13 @@ class BTLoggers(Enum):
"""ESP32 WiFi provisioning over Bluetooth"""
# Set to track which loggers are needed by components
_required_loggers: set[BTLoggers] = set()
# Key for storing required loggers in CORE.data
ESP32_BLE_REQUIRED_LOGGERS_KEY = "esp32_ble_required_loggers"
def _get_required_loggers() -> set[BTLoggers]:
"""Get the set of required Bluetooth loggers from CORE.data."""
return CORE.data.setdefault(ESP32_BLE_REQUIRED_LOGGERS_KEY, set())
def register_bt_logger(*loggers: BTLoggers) -> None:
@@ -117,12 +122,13 @@ def register_bt_logger(*loggers: BTLoggers) -> None:
Args:
*loggers: One or more BTLoggers enum members
"""
required_loggers = _get_required_loggers()
for logger in loggers:
if not isinstance(logger, BTLoggers):
raise TypeError(
f"Logger must be a BTLoggers enum member, got {type(logger)}"
)
_required_loggers.add(logger)
required_loggers.add(logger)
CONF_BLE_ID = "ble_id"
@@ -396,8 +402,9 @@ async def to_code(config):
# Apply logger settings if log disabling is enabled
if config.get(CONF_DISABLE_BT_LOGS, False):
# Disable all Bluetooth loggers that are not required
required_loggers = _get_required_loggers()
for logger in BTLoggers:
if logger not in _required_loggers:
if logger not in required_loggers:
add_idf_sdkconfig_option(f"{logger.value}_NONE", True)
# Set BLE connection establishment timeout to match aioesphomeapi/bleak-retry-connector

View File

@@ -213,8 +213,11 @@ bool ESP32BLE::ble_setup_() {
if (this->name_.has_value()) {
name = this->name_.value();
if (App.is_name_add_mac_suffix_enabled()) {
name += "-";
name += get_mac_address().substr(6);
// MAC address suffix length (last 6 characters of 12-char MAC address string)
constexpr size_t mac_address_suffix_len = 6;
const std::string mac_addr = get_mac_address();
const char *mac_suffix_ptr = mac_addr.c_str() + mac_address_suffix_len;
name = make_name_with_suffix(name, '-', mac_suffix_ptr, mac_address_suffix_len);
}
} else {
name = App.get_name();

View File

@@ -1,4 +1,3 @@
// a
#pragma once
#include "esphome/core/automation.h"

View File

@@ -143,6 +143,7 @@ void ESP32ImprovComponent::loop() {
#else
this->set_state_(improv::STATE_AUTHORIZED);
#endif
this->check_wifi_connection_();
break;
}
case improv::STATE_AUTHORIZED: {
@@ -156,31 +157,12 @@ void ESP32ImprovComponent::loop() {
if (!this->check_identify_()) {
this->set_status_indicator_state_((now % 1000) < 500);
}
this->check_wifi_connection_();
break;
}
case improv::STATE_PROVISIONING: {
this->set_status_indicator_state_((now % 200) < 100);
if (wifi::global_wifi_component->is_connected()) {
wifi::global_wifi_component->save_wifi_sta(this->connecting_sta_.get_ssid(),
this->connecting_sta_.get_password());
this->connecting_sta_ = {};
this->cancel_timeout("wifi-connect-timeout");
this->set_state_(improv::STATE_PROVISIONED);
std::vector<std::string> urls = {ESPHOME_MY_LINK};
#ifdef USE_WEBSERVER
for (auto &ip : wifi::global_wifi_component->wifi_sta_ip_addresses()) {
if (ip.is_ip4()) {
std::string webserver_url = "http://" + ip.str() + ":" + to_string(USE_WEBSERVER_PORT);
urls.push_back(webserver_url);
break;
}
}
#endif
std::vector<uint8_t> data = improv::build_rpc_response(improv::WIFI_SETTINGS, urls);
this->send_response_(data);
this->stop();
}
this->check_wifi_connection_();
break;
}
case improv::STATE_PROVISIONED: {
@@ -392,6 +374,36 @@ void ESP32ImprovComponent::on_wifi_connect_timeout_() {
wifi::global_wifi_component->clear_sta();
}
void ESP32ImprovComponent::check_wifi_connection_() {
if (!wifi::global_wifi_component->is_connected()) {
return;
}
if (this->state_ == improv::STATE_PROVISIONING) {
wifi::global_wifi_component->save_wifi_sta(this->connecting_sta_.get_ssid(), this->connecting_sta_.get_password());
this->connecting_sta_ = {};
this->cancel_timeout("wifi-connect-timeout");
std::vector<std::string> urls = {ESPHOME_MY_LINK};
#ifdef USE_WEBSERVER
for (auto &ip : wifi::global_wifi_component->wifi_sta_ip_addresses()) {
if (ip.is_ip4()) {
std::string webserver_url = "http://" + ip.str() + ":" + to_string(USE_WEBSERVER_PORT);
urls.push_back(webserver_url);
break;
}
}
#endif
std::vector<uint8_t> data = improv::build_rpc_response(improv::WIFI_SETTINGS, urls);
this->send_response_(data);
} else if (this->is_active() && this->state_ != improv::STATE_PROVISIONED) {
ESP_LOGD(TAG, "WiFi provisioned externally");
}
this->set_state_(improv::STATE_PROVISIONED);
this->stop();
}
void ESP32ImprovComponent::advertise_service_data_() {
uint8_t service_data[IMPROV_SERVICE_DATA_SIZE] = {};
service_data[0] = IMPROV_PROTOCOL_ID_1; // PR

View File

@@ -111,6 +111,7 @@ class ESP32ImprovComponent : public Component {
void send_response_(std::vector<uint8_t> &response);
void process_incoming_data_();
void on_wifi_connect_timeout_();
void check_wifi_connection_();
bool check_identify_();
void advertise_service_data_();
#if ESPHOME_LOG_LEVEL >= ESPHOME_LOG_LEVEL_DEBUG

View File

@@ -1,5 +1,3 @@
// useless comment
#include "ota_esphome.h"
#ifdef USE_OTA
#ifdef USE_OTA_PASSWORD
@@ -31,7 +29,7 @@ namespace esphome {
static const char *const TAG = "esphome.ota";
static constexpr uint16_t OTA_BLOCK_SIZE = 8192;
static constexpr size_t OTA_BUFFER_SIZE = 1024; // buffer size for OTA data transfer
static constexpr uint32_t OTA_SOCKET_TIMEOUT_HANDSHAKE = 10000; // milliseconds for initial handshake
static constexpr uint32_t OTA_SOCKET_TIMEOUT_HANDSHAKE = 20000; // milliseconds for initial handshake
static constexpr uint32_t OTA_SOCKET_TIMEOUT_DATA = 90000; // milliseconds for data transfer
#ifdef USE_OTA_PASSWORD

View File

@@ -691,7 +691,9 @@ void EthernetComponent::set_manual_ip(const ManualIP &manual_ip) { this->manual_
std::string EthernetComponent::get_use_address() const {
if (this->use_address_.empty()) {
return App.get_name() + ".local";
// ".local" suffix length for mDNS hostnames
constexpr size_t mdns_local_suffix_len = 5;
return make_name_with_suffix(App.get_name(), '.', "local", mdns_local_suffix_len);
}
return this->use_address_;
}

View File

@@ -167,8 +167,8 @@ class HttpRequestComponent : public Component {
}
protected:
virtual std::shared_ptr<HttpContainer> perform(std::string url, std::string method, std::string body,
std::list<Header> request_headers,
virtual std::shared_ptr<HttpContainer> perform(const std::string &url, const std::string &method,
const std::string &body, const std::list<Header> &request_headers,
std::set<std::string> collect_headers) = 0;
const char *useragent_{nullptr};
bool follow_redirects_{};

View File

@@ -14,8 +14,9 @@ namespace http_request {
static const char *const TAG = "http_request.arduino";
std::shared_ptr<HttpContainer> HttpRequestArduino::perform(std::string url, std::string method, std::string body,
std::list<Header> request_headers,
std::shared_ptr<HttpContainer> HttpRequestArduino::perform(const std::string &url, const std::string &method,
const std::string &body,
const std::list<Header> &request_headers,
std::set<std::string> collect_headers) {
if (!network::is_connected()) {
this->status_momentary_error("failed", 1000);

View File

@@ -31,8 +31,8 @@ class HttpContainerArduino : public HttpContainer {
class HttpRequestArduino : public HttpRequestComponent {
protected:
std::shared_ptr<HttpContainer> perform(std::string url, std::string method, std::string body,
std::list<Header> request_headers,
std::shared_ptr<HttpContainer> perform(const std::string &url, const std::string &method, const std::string &body,
const std::list<Header> &request_headers,
std::set<std::string> collect_headers) override;
};

View File

@@ -17,8 +17,9 @@ namespace http_request {
static const char *const TAG = "http_request.host";
std::shared_ptr<HttpContainer> HttpRequestHost::perform(std::string url, std::string method, std::string body,
std::list<Header> request_headers,
std::shared_ptr<HttpContainer> HttpRequestHost::perform(const std::string &url, const std::string &method,
const std::string &body,
const std::list<Header> &request_headers,
std::set<std::string> response_headers) {
if (!network::is_connected()) {
this->status_momentary_error("failed", 1000);

View File

@@ -18,8 +18,8 @@ class HttpContainerHost : public HttpContainer {
class HttpRequestHost : public HttpRequestComponent {
public:
std::shared_ptr<HttpContainer> perform(std::string url, std::string method, std::string body,
std::list<Header> request_headers,
std::shared_ptr<HttpContainer> perform(const std::string &url, const std::string &method, const std::string &body,
const std::list<Header> &request_headers,
std::set<std::string> response_headers) override;
void set_ca_path(const char *ca_path) { this->ca_path_ = ca_path; }

View File

@@ -52,8 +52,9 @@ esp_err_t HttpRequestIDF::http_event_handler(esp_http_client_event_t *evt) {
return ESP_OK;
}
std::shared_ptr<HttpContainer> HttpRequestIDF::perform(std::string url, std::string method, std::string body,
std::list<Header> request_headers,
std::shared_ptr<HttpContainer> HttpRequestIDF::perform(const std::string &url, const std::string &method,
const std::string &body,
const std::list<Header> &request_headers,
std::set<std::string> collect_headers) {
if (!network::is_connected()) {
this->status_momentary_error("failed", 1000);

View File

@@ -37,8 +37,8 @@ class HttpRequestIDF : public HttpRequestComponent {
void set_buffer_size_tx(uint16_t buffer_size_tx) { this->buffer_size_tx_ = buffer_size_tx; }
protected:
std::shared_ptr<HttpContainer> perform(std::string url, std::string method, std::string body,
std::list<Header> request_headers,
std::shared_ptr<HttpContainer> perform(const std::string &url, const std::string &method, const std::string &body,
const std::list<Header> &request_headers,
std::set<std::string> collect_headers) override;
// if zero ESP-IDF will use DEFAULT_HTTP_BUF_SIZE
uint16_t buffer_size_rx_{};

View File

@@ -1,6 +1,6 @@
import esphome.codegen as cg
from esphome.components.esp32 import add_idf_component
from esphome.config_helpers import filter_source_files_from_platform
from esphome.config_helpers import filter_source_files_from_platform, get_logger_level
import esphome.config_validation as cv
from esphome.const import (
CONF_DISABLED,
@@ -125,6 +125,17 @@ def mdns_service(
)
def enable_mdns_storage():
"""Enable persistent storage of mDNS services in the MDNSComponent.
Called by external components (like OpenThread) that need access to
services after setup() completes via get_services().
Public API for external components. Do not remove.
"""
cg.add_define("USE_MDNS_STORE_SERVICES")
@coroutine_with_priority(CoroPriority.NETWORK_SERVICES)
async def to_code(config):
if config[CONF_DISABLED] is True:
@@ -150,6 +161,8 @@ async def to_code(config):
if config[CONF_SERVICES]:
cg.add_define("USE_MDNS_EXTRA_SERVICES")
# Extra services need to be stored persistently
enable_mdns_storage()
# Ensure at least 1 service (fallback service)
cg.add_define("MDNS_SERVICE_COUNT", max(1, service_count))
@@ -171,6 +184,10 @@ async def to_code(config):
# Ensure at least 1 to avoid zero-size array
cg.add_define("MDNS_DYNAMIC_TXT_COUNT", max(1, dynamic_txt_count))
# Enable storage if verbose logging is enabled (for dump_config)
if get_logger_level() in ("VERBOSE", "VERY_VERBOSE"):
enable_mdns_storage()
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)

View File

@@ -36,7 +36,7 @@ MDNS_STATIC_CONST_CHAR(SERVICE_TCP, "_tcp");
// Wrap build-time defines into flash storage
MDNS_STATIC_CONST_CHAR(VALUE_VERSION, ESPHOME_VERSION);
void MDNSComponent::compile_records_() {
void MDNSComponent::compile_records_(StaticVector<MDNSService, MDNS_SERVICE_COUNT> &services) {
this->hostname_ = App.get_name();
// IMPORTANT: The #ifdef blocks below must match COMPONENTS_WITH_MDNS_SERVICES
@@ -53,7 +53,7 @@ void MDNSComponent::compile_records_() {
MDNS_STATIC_CONST_CHAR(VALUE_BOARD, ESPHOME_BOARD);
if (api::global_api_server != nullptr) {
auto &service = this->services_.emplace_next();
auto &service = services.emplace_next();
service.service_type = MDNS_STR(SERVICE_ESPHOMELIB);
service.proto = MDNS_STR(SERVICE_TCP);
service.port = api::global_api_server->get_port();
@@ -146,7 +146,7 @@ void MDNSComponent::compile_records_() {
#ifdef USE_PROMETHEUS
MDNS_STATIC_CONST_CHAR(SERVICE_PROMETHEUS, "_prometheus-http");
auto &prom_service = this->services_.emplace_next();
auto &prom_service = services.emplace_next();
prom_service.service_type = MDNS_STR(SERVICE_PROMETHEUS);
prom_service.proto = MDNS_STR(SERVICE_TCP);
prom_service.port = USE_WEBSERVER_PORT;
@@ -155,7 +155,7 @@ void MDNSComponent::compile_records_() {
#ifdef USE_WEBSERVER
MDNS_STATIC_CONST_CHAR(SERVICE_HTTP, "_http");
auto &web_service = this->services_.emplace_next();
auto &web_service = services.emplace_next();
web_service.service_type = MDNS_STR(SERVICE_HTTP);
web_service.proto = MDNS_STR(SERVICE_TCP);
web_service.port = USE_WEBSERVER_PORT;
@@ -167,12 +167,17 @@ void MDNSComponent::compile_records_() {
// Publish "http" service if not using native API or any other services
// This is just to have *some* mDNS service so that .local resolution works
auto &fallback_service = this->services_.emplace_next();
auto &fallback_service = services.emplace_next();
fallback_service.service_type = MDNS_STR(SERVICE_HTTP);
fallback_service.proto = MDNS_STR(SERVICE_TCP);
fallback_service.port = USE_WEBSERVER_PORT;
fallback_service.txt_records.push_back({MDNS_STR(TXT_VERSION), MDNS_STR(VALUE_VERSION)});
#endif
#ifdef USE_MDNS_STORE_SERVICES
// Copy to member variable if storage is enabled (verbose logging, OpenThread, or extra services)
this->services_ = services;
#endif
}
void MDNSComponent::dump_config() {
@@ -180,7 +185,7 @@ void MDNSComponent::dump_config() {
"mDNS:\n"
" Hostname: %s",
this->hostname_.c_str());
#if ESPHOME_LOG_LEVEL >= ESPHOME_LOG_LEVEL_VERBOSE
#ifdef USE_MDNS_STORE_SERVICES
ESP_LOGV(TAG, " Services:");
for (const auto &service : this->services_) {
ESP_LOGV(TAG, " - %s, %s, %d", MDNS_STR_ARG(service.service_type), MDNS_STR_ARG(service.proto),

View File

@@ -55,7 +55,9 @@ class MDNSComponent : public Component {
void add_extra_service(MDNSService service) { this->services_.emplace_next() = std::move(service); }
#endif
#ifdef USE_MDNS_STORE_SERVICES
const StaticVector<MDNSService, MDNS_SERVICE_COUNT> &get_services() const { return this->services_; }
#endif
void on_shutdown() override;
@@ -71,9 +73,11 @@ class MDNSComponent : public Component {
StaticVector<std::string, MDNS_DYNAMIC_TXT_COUNT> dynamic_txt_values_;
protected:
#ifdef USE_MDNS_STORE_SERVICES
StaticVector<MDNSService, MDNS_SERVICE_COUNT> services_{};
#endif
std::string hostname_;
void compile_records_();
void compile_records_(StaticVector<MDNSService, MDNS_SERVICE_COUNT> &services);
};
} // namespace mdns

View File

@@ -12,7 +12,8 @@ namespace mdns {
static const char *const TAG = "mdns";
void MDNSComponent::setup() {
this->compile_records_();
StaticVector<MDNSService, MDNS_SERVICE_COUNT> services;
this->compile_records_(services);
esp_err_t err = mdns_init();
if (err != ESP_OK) {
@@ -24,7 +25,7 @@ void MDNSComponent::setup() {
mdns_hostname_set(this->hostname_.c_str());
mdns_instance_name_set(this->hostname_.c_str());
for (const auto &service : this->services_) {
for (const auto &service : services) {
std::vector<mdns_txt_item_t> txt_records;
for (const auto &record : service.txt_records) {
mdns_txt_item_t it{};

View File

@@ -12,11 +12,12 @@ namespace esphome {
namespace mdns {
void MDNSComponent::setup() {
this->compile_records_();
StaticVector<MDNSService, MDNS_SERVICE_COUNT> services;
this->compile_records_(services);
MDNS.begin(this->hostname_.c_str());
for (const auto &service : this->services_) {
for (const auto &service : services) {
// Strip the leading underscore from the proto and service_type. While it is
// part of the wire protocol to have an underscore, and for example ESP-IDF
// expects the underscore to be there, the ESP8266 implementation always adds

View File

@@ -9,7 +9,9 @@
namespace esphome {
namespace mdns {
void MDNSComponent::setup() { this->compile_records_(); }
void MDNSComponent::setup() {
// Host platform doesn't have actual mDNS implementation
}
void MDNSComponent::on_shutdown() {}

View File

@@ -12,11 +12,12 @@ namespace esphome {
namespace mdns {
void MDNSComponent::setup() {
this->compile_records_();
StaticVector<MDNSService, MDNS_SERVICE_COUNT> services;
this->compile_records_(services);
MDNS.begin(this->hostname_.c_str());
for (const auto &service : this->services_) {
for (const auto &service : services) {
// Strip the leading underscore from the proto and service_type. While it is
// part of the wire protocol to have an underscore, and for example ESP-IDF
// expects the underscore to be there, the ESP8266 implementation always adds

View File

@@ -12,11 +12,12 @@ namespace esphome {
namespace mdns {
void MDNSComponent::setup() {
this->compile_records_();
StaticVector<MDNSService, MDNS_SERVICE_COUNT> services;
this->compile_records_(services);
MDNS.begin(this->hostname_.c_str());
for (const auto &service : this->services_) {
for (const auto &service : services) {
// Strip the leading underscore from the proto and service_type. While it is
// part of the wire protocol to have an underscore, and for example ESP-IDF
// expects the underscore to be there, the ESP8266 implementation always adds

View File

@@ -29,7 +29,8 @@ static const char *const TAG = "mqtt";
MQTTClientComponent::MQTTClientComponent() {
global_mqtt_client = this;
this->credentials_.client_id = App.get_name() + "-" + get_mac_address();
const std::string mac_addr = get_mac_address();
this->credentials_.client_id = make_name_with_suffix(App.get_name(), '-', mac_addr.c_str(), mac_addr.size());
}
// Connection

View File

@@ -5,7 +5,7 @@ from esphome.components.esp32 import (
add_idf_sdkconfig_option,
only_on_variant,
)
from esphome.components.mdns import MDNSComponent
from esphome.components.mdns import MDNSComponent, enable_mdns_storage
import esphome.config_validation as cv
from esphome.const import CONF_CHANNEL, CONF_ENABLE_IPV6, CONF_ID
import esphome.final_validate as fv
@@ -141,6 +141,9 @@ FINAL_VALIDATE_SCHEMA = _final_validate
async def to_code(config):
cg.add_define("USE_OPENTHREAD")
# OpenThread SRP needs access to mDNS services after setup
enable_mdns_storage()
ot = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(ot, config)

View File

@@ -63,6 +63,8 @@ SPIRAM_SPEEDS = {
def supported() -> bool:
if not CORE.is_esp32:
return False
variant = get_esp32_variant()
return variant in SPIRAM_MODES

View File

@@ -6,7 +6,7 @@ from pathlib import Path
from esphome import automation, external_files
import esphome.codegen as cg
from esphome.components import audio, esp32, media_player, speaker
from esphome.components import audio, esp32, media_player, psram, speaker
import esphome.config_validation as cv
from esphome.const import (
CONF_BUFFER_SIZE,
@@ -26,10 +26,21 @@ from esphome.const import (
from esphome.core import CORE, HexInt
from esphome.core.entity_helpers import inherit_property_from
from esphome.external_files import download_content
from esphome.types import ConfigType
_LOGGER = logging.getLogger(__name__)
AUTO_LOAD = ["audio", "psram"]
def AUTO_LOAD(config: ConfigType) -> list[str]:
load = ["audio"]
if (
not config
or config.get(CONF_TASK_STACK_IN_PSRAM)
or config.get(CONF_CODEC_SUPPORT_ENABLED)
):
return load + ["psram"]
return load
CODEOWNERS = ["@kahrendt", "@synesthesiam"]
DOMAIN = "media_player"
@@ -279,7 +290,9 @@ CONFIG_SCHEMA = cv.All(
cv.Optional(CONF_BUFFER_SIZE, default=1000000): cv.int_range(
min=4000, max=4000000
),
cv.Optional(CONF_CODEC_SUPPORT_ENABLED, default=True): cv.boolean,
cv.Optional(
CONF_CODEC_SUPPORT_ENABLED, default=psram.supported()
): cv.boolean,
cv.Optional(CONF_FILES): cv.ensure_list(MEDIA_FILE_TYPE_SCHEMA),
cv.Optional(CONF_TASK_STACK_IN_PSRAM, default=False): cv.boolean,
cv.Optional(CONF_VOLUME_INCREMENT, default=0.05): cv.percentage,

View File

@@ -9,6 +9,7 @@ from esphome.components.esp32 import (
import esphome.config_validation as cv
from esphome.const import CONF_DEVICES, CONF_ID
from esphome.cpp_types import Component
from esphome.types import ConfigType
AUTO_LOAD = ["bytebuffer"]
CODEOWNERS = ["@clydebarrow"]
@@ -20,6 +21,7 @@ USBClient = usb_host_ns.class_("USBClient", Component)
CONF_VID = "vid"
CONF_PID = "pid"
CONF_ENABLE_HUBS = "enable_hubs"
CONF_MAX_TRANSFER_REQUESTS = "max_transfer_requests"
def usb_device_schema(cls=USBClient, vid: int = None, pid: [int] = None) -> cv.Schema:
@@ -44,6 +46,9 @@ CONFIG_SCHEMA = cv.All(
{
cv.GenerateID(): cv.declare_id(USBHost),
cv.Optional(CONF_ENABLE_HUBS, default=False): cv.boolean,
cv.Optional(CONF_MAX_TRANSFER_REQUESTS, default=16): cv.int_range(
min=1, max=32
),
cv.Optional(CONF_DEVICES): cv.ensure_list(usb_device_schema()),
}
),
@@ -58,10 +63,14 @@ async def register_usb_client(config):
return var
async def to_code(config):
async def to_code(config: ConfigType) -> None:
add_idf_sdkconfig_option("CONFIG_USB_HOST_CONTROL_TRANSFER_MAX_SIZE", 1024)
if config.get(CONF_ENABLE_HUBS):
add_idf_sdkconfig_option("CONFIG_USB_HOST_HUBS_SUPPORTED", True)
max_requests = config[CONF_MAX_TRANSFER_REQUESTS]
cg.add_define("USB_HOST_MAX_REQUESTS", max_requests)
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
for device in config.get(CONF_DEVICES) or ():

View File

@@ -2,6 +2,7 @@
// Should not be needed, but it's required to pass CI clang-tidy checks
#if defined(USE_ESP32_VARIANT_ESP32S2) || defined(USE_ESP32_VARIANT_ESP32S3) || defined(USE_ESP32_VARIANT_ESP32P4)
#include "esphome/core/defines.h"
#include "esphome/core/component.h"
#include <vector>
#include "usb/usb_host.h"
@@ -16,23 +17,25 @@ namespace usb_host {
// THREADING MODEL:
// This component uses a dedicated USB task for event processing to prevent data loss.
// - USB Task (high priority): Handles USB events, executes transfer callbacks
// - Main Loop Task: Initiates transfers, processes completion events
// - USB Task (high priority): Handles USB events, executes transfer callbacks, releases transfer slots
// - Main Loop Task: Initiates transfers, processes device connect/disconnect events
//
// Thread-safe communication:
// - Lock-free queues for USB task -> main loop events (SPSC pattern)
// - Lock-free TransferRequest pool using atomic bitmask (MCSP pattern)
// - Lock-free TransferRequest pool using atomic bitmask (MCMP pattern - multi-consumer, multi-producer)
//
// TransferRequest pool access pattern:
// - get_trq_() [allocate]: Called from BOTH USB task and main loop threads
// * USB task: via USB UART input callbacks that restart transfers immediately
// * Main loop: for output transfers and flow-controlled input restarts
// - release_trq() [deallocate]: Called from main loop thread only
// - release_trq() [deallocate]: Called from BOTH USB task and main loop threads
// * USB task: immediately after transfer callback completes (critical for preventing slot exhaustion)
// * Main loop: when transfer submission fails
//
// The multi-threaded allocation is intentional for performance:
// - USB task can immediately restart input transfers without context switching
// The multi-threaded allocation/deallocation is intentional for performance:
// - USB task can immediately restart input transfers and release slots without context switching
// - Main loop controls backpressure by deciding when to restart after consuming data
// The atomic bitmask ensures thread-safe allocation without mutex blocking.
// The atomic bitmask ensures thread-safe allocation/deallocation without mutex blocking.
static const char *const TAG = "usb_host";
@@ -52,8 +55,17 @@ static const uint8_t USB_DIR_IN = 1 << 7;
static const uint8_t USB_DIR_OUT = 0;
static const size_t SETUP_PACKET_SIZE = 8;
static const size_t MAX_REQUESTS = 16; // maximum number of outstanding requests possible.
static_assert(MAX_REQUESTS <= 16, "MAX_REQUESTS must be <= 16 to fit in uint16_t bitmask");
static const size_t MAX_REQUESTS = USB_HOST_MAX_REQUESTS; // maximum number of outstanding requests possible.
static_assert(MAX_REQUESTS >= 1 && MAX_REQUESTS <= 32, "MAX_REQUESTS must be between 1 and 32");
// Select appropriate bitmask type for tracking allocation of TransferRequest slots.
// The bitmask must have at least as many bits as MAX_REQUESTS, so:
// - Use uint16_t for up to 16 requests (MAX_REQUESTS <= 16)
// - Use uint32_t for 17-32 requests (MAX_REQUESTS > 16)
// This is tied to the static_assert above, which enforces MAX_REQUESTS is between 1 and 32.
// If MAX_REQUESTS is increased above 32, this logic and the static_assert must be updated.
using trq_bitmask_t = std::conditional<(MAX_REQUESTS <= 16), uint16_t, uint32_t>::type;
static constexpr size_t USB_EVENT_QUEUE_SIZE = 32; // Size of event queue between USB task and main loop
static constexpr size_t USB_TASK_STACK_SIZE = 4096; // Stack size for USB task (same as ESP-IDF USB examples)
static constexpr UBaseType_t USB_TASK_PRIORITY = 5; // Higher priority than main loop (tskIDLE_PRIORITY + 5)
@@ -83,8 +95,6 @@ struct TransferRequest {
enum EventType : uint8_t {
EVENT_DEVICE_NEW,
EVENT_DEVICE_GONE,
EVENT_TRANSFER_COMPLETE,
EVENT_CONTROL_COMPLETE,
};
struct UsbEvent {
@@ -96,9 +106,6 @@ struct UsbEvent {
struct {
usb_device_handle_t handle;
} device_gone;
struct {
TransferRequest *trq;
} transfer;
} data;
// Required for EventPool - no cleanup needed for POD types
@@ -163,10 +170,9 @@ class USBClient : public Component {
uint16_t pid_{};
// Lock-free pool management using atomic bitmask (no dynamic allocation)
// Bit i = 1: requests_[i] is in use, Bit i = 0: requests_[i] is available
// Supports multiple concurrent consumers (both threads can allocate)
// Single producer for deallocation (main loop only)
// Limited to 16 slots by uint16_t size (enforced by static_assert)
std::atomic<uint16_t> trq_in_use_;
// Supports multiple concurrent consumers and producers (both threads can allocate/deallocate)
// Bitmask type automatically selected: uint16_t for <= 16 slots, uint32_t for 17-32 slots
std::atomic<trq_bitmask_t> trq_in_use_;
TransferRequest requests_[MAX_REQUESTS]{};
};
class USBHost : public Component {

View File

@@ -228,12 +228,6 @@ void USBClient::loop() {
case EVENT_DEVICE_GONE:
this->on_removed(event->data.device_gone.handle);
break;
case EVENT_TRANSFER_COMPLETE:
case EVENT_CONTROL_COMPLETE: {
auto *trq = event->data.transfer.trq;
this->release_trq(trq);
break;
}
}
// Return event to pool for reuse
this->event_pool.release(event);
@@ -313,25 +307,6 @@ void USBClient::on_removed(usb_device_handle_t handle) {
}
}
// Helper to queue transfer cleanup to main loop
static void queue_transfer_cleanup(TransferRequest *trq, EventType type) {
auto *client = trq->client;
// Allocate event from pool
UsbEvent *event = client->event_pool.allocate();
if (event == nullptr) {
// No events available - increment counter for periodic logging
client->event_queue.increment_dropped_count();
return;
}
event->type = type;
event->data.transfer.trq = trq;
// Push to lock-free queue (always succeeds since pool size == queue size)
client->event_queue.push(event);
}
// CALLBACK CONTEXT: USB task (called from usb_host_client_handle_events in USB task)
static void control_callback(const usb_transfer_t *xfer) {
auto *trq = static_cast<TransferRequest *>(xfer->context);
@@ -346,8 +321,9 @@ static void control_callback(const usb_transfer_t *xfer) {
trq->callback(trq->status);
}
// Queue cleanup to main loop
queue_transfer_cleanup(trq, EVENT_CONTROL_COMPLETE);
// Release transfer slot immediately in USB task
// The release_trq() uses thread-safe atomic operations
trq->client->release_trq(trq);
}
// THREAD CONTEXT: Called from both USB task and main loop threads (multi-consumer)
@@ -358,20 +334,20 @@ static void control_callback(const usb_transfer_t *xfer) {
// This multi-threaded access is intentional for performance - USB task can
// immediately restart transfers without waiting for main loop scheduling.
TransferRequest *USBClient::get_trq_() {
uint16_t mask = this->trq_in_use_.load(std::memory_order_relaxed);
trq_bitmask_t mask = this->trq_in_use_.load(std::memory_order_relaxed);
// Find first available slot (bit = 0) and try to claim it atomically
// We use a while loop to allow retrying the same slot after CAS failure
size_t i = 0;
while (i != MAX_REQUESTS) {
if (mask & (1U << i)) {
if (mask & (static_cast<trq_bitmask_t>(1) << i)) {
// Slot is in use, move to next slot
i++;
continue;
}
// Slot i appears available, try to claim it atomically
uint16_t desired = mask | (1U << i); // Set bit i to mark as in-use
trq_bitmask_t desired = mask | (static_cast<trq_bitmask_t>(1) << i); // Set bit i to mark as in-use
if (this->trq_in_use_.compare_exchange_weak(mask, desired, std::memory_order_acquire, std::memory_order_relaxed)) {
// Successfully claimed slot i - prepare the TransferRequest
@@ -386,7 +362,7 @@ TransferRequest *USBClient::get_trq_() {
i = 0;
}
ESP_LOGE(TAG, "All %d transfer slots in use", MAX_REQUESTS);
ESP_LOGE(TAG, "All %zu transfer slots in use", MAX_REQUESTS);
return nullptr;
}
void USBClient::disconnect() {
@@ -452,8 +428,11 @@ static void transfer_callback(usb_transfer_t *xfer) {
trq->callback(trq->status);
}
// Queue cleanup to main loop
queue_transfer_cleanup(trq, EVENT_TRANSFER_COMPLETE);
// Release transfer slot AFTER callback completes to prevent slot exhaustion
// This is critical for high-throughput transfers (e.g., USB UART at 115200 baud)
// The callback has finished accessing xfer->data_buffer, so it's safe to release
// The release_trq() uses thread-safe atomic operations
trq->client->release_trq(trq);
}
/**
* Performs a transfer input operation.
@@ -521,12 +500,12 @@ void USBClient::dump_config() {
" Product id %04X",
this->vid_, this->pid_);
}
// THREAD CONTEXT: Only called from main loop thread (single producer for deallocation)
// - Via event processing when handling EVENT_TRANSFER_COMPLETE/EVENT_CONTROL_COMPLETE
// - Directly when transfer submission fails
// THREAD CONTEXT: Called from both USB task and main loop threads
// - USB task: Immediately after transfer callback completes
// - Main loop: When transfer submission fails
//
// THREAD SAFETY: Lock-free using atomic AND to clear bit
// Single-producer pattern makes this simpler than allocation
// Thread-safe atomic operation allows multi-threaded deallocation
void USBClient::release_trq(TransferRequest *trq) {
if (trq == nullptr)
return;
@@ -540,8 +519,8 @@ void USBClient::release_trq(TransferRequest *trq) {
// Atomically clear bit i to mark slot as available
// fetch_and with inverted bitmask clears the bit atomically
uint16_t bit = 1U << index;
this->trq_in_use_.fetch_and(static_cast<uint16_t>(~bit), std::memory_order_release);
trq_bitmask_t bit = static_cast<trq_bitmask_t>(1) << index;
this->trq_in_use_.fetch_and(static_cast<trq_bitmask_t>(~bit), std::memory_order_release);
}
} // namespace usb_host

View File

@@ -19,72 +19,54 @@ ListEntitiesIterator::~ListEntitiesIterator() {}
#ifdef USE_BINARY_SENSOR
bool ListEntitiesIterator::on_binary_sensor(binary_sensor::BinarySensor *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::binary_sensor_all_json_generator);
return true;
}
#endif
#ifdef USE_COVER
bool ListEntitiesIterator::on_cover(cover::Cover *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::cover_all_json_generator);
return true;
}
#endif
#ifdef USE_FAN
bool ListEntitiesIterator::on_fan(fan::Fan *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::fan_all_json_generator);
return true;
}
#endif
#ifdef USE_LIGHT
bool ListEntitiesIterator::on_light(light::LightState *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::light_all_json_generator);
return true;
}
#endif
#ifdef USE_SENSOR
bool ListEntitiesIterator::on_sensor(sensor::Sensor *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::sensor_all_json_generator);
return true;
}
#endif
#ifdef USE_SWITCH
bool ListEntitiesIterator::on_switch(switch_::Switch *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::switch_all_json_generator);
return true;
}
#endif
#ifdef USE_BUTTON
bool ListEntitiesIterator::on_button(button::Button *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::button_all_json_generator);
return true;
}
#endif
#ifdef USE_TEXT_SENSOR
bool ListEntitiesIterator::on_text_sensor(text_sensor::TextSensor *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::text_sensor_all_json_generator);
return true;
}
#endif
#ifdef USE_LOCK
bool ListEntitiesIterator::on_lock(lock::Lock *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::lock_all_json_generator);
return true;
}
@@ -92,8 +74,6 @@ bool ListEntitiesIterator::on_lock(lock::Lock *obj) {
#ifdef USE_VALVE
bool ListEntitiesIterator::on_valve(valve::Valve *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::valve_all_json_generator);
return true;
}
@@ -101,8 +81,6 @@ bool ListEntitiesIterator::on_valve(valve::Valve *obj) {
#ifdef USE_CLIMATE
bool ListEntitiesIterator::on_climate(climate::Climate *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::climate_all_json_generator);
return true;
}
@@ -110,8 +88,6 @@ bool ListEntitiesIterator::on_climate(climate::Climate *obj) {
#ifdef USE_NUMBER
bool ListEntitiesIterator::on_number(number::Number *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::number_all_json_generator);
return true;
}
@@ -119,8 +95,6 @@ bool ListEntitiesIterator::on_number(number::Number *obj) {
#ifdef USE_DATETIME_DATE
bool ListEntitiesIterator::on_date(datetime::DateEntity *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::date_all_json_generator);
return true;
}
@@ -128,8 +102,6 @@ bool ListEntitiesIterator::on_date(datetime::DateEntity *obj) {
#ifdef USE_DATETIME_TIME
bool ListEntitiesIterator::on_time(datetime::TimeEntity *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::time_all_json_generator);
return true;
}
@@ -137,8 +109,6 @@ bool ListEntitiesIterator::on_time(datetime::TimeEntity *obj) {
#ifdef USE_DATETIME_DATETIME
bool ListEntitiesIterator::on_datetime(datetime::DateTimeEntity *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::datetime_all_json_generator);
return true;
}
@@ -146,8 +116,6 @@ bool ListEntitiesIterator::on_datetime(datetime::DateTimeEntity *obj) {
#ifdef USE_TEXT
bool ListEntitiesIterator::on_text(text::Text *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::text_all_json_generator);
return true;
}
@@ -155,8 +123,6 @@ bool ListEntitiesIterator::on_text(text::Text *obj) {
#ifdef USE_SELECT
bool ListEntitiesIterator::on_select(select::Select *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::select_all_json_generator);
return true;
}
@@ -164,8 +130,6 @@ bool ListEntitiesIterator::on_select(select::Select *obj) {
#ifdef USE_ALARM_CONTROL_PANEL
bool ListEntitiesIterator::on_alarm_control_panel(alarm_control_panel::AlarmControlPanel *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::alarm_control_panel_all_json_generator);
return true;
}
@@ -173,8 +137,6 @@ bool ListEntitiesIterator::on_alarm_control_panel(alarm_control_panel::AlarmCont
#ifdef USE_EVENT
bool ListEntitiesIterator::on_event(event::Event *obj) {
if (this->events_->count() == 0)
return true;
// Null event type, since we are just iterating over entities
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::event_all_json_generator);
return true;
@@ -183,8 +145,6 @@ bool ListEntitiesIterator::on_event(event::Event *obj) {
#ifdef USE_UPDATE
bool ListEntitiesIterator::on_update(update::UpdateEntity *obj) {
if (this->events_->count() == 0)
return true;
this->events_->deferrable_send_state(obj, "state_detail_all", WebServer::update_all_json_generator);
return true;
}

View File

@@ -152,6 +152,10 @@ void DeferredUpdateEventSource::loop() {
void DeferredUpdateEventSource::deferrable_send_state(void *source, const char *event_type,
message_generator_t *message_generator) {
// Skip if no connected clients to avoid unnecessary deferred queue processing
if (this->count() == 0)
return;
// allow all json "details_all" to go through before publishing bare state events, this avoids unnamed entries showing
// up in the web GUI and reduces event load during initial connect
if (!entities_iterator_.completed() && 0 != strcmp(event_type, "state_detail_all"))
@@ -197,6 +201,9 @@ void DeferredUpdateEventSourceList::loop() {
void DeferredUpdateEventSourceList::deferrable_send_state(void *source, const char *event_type,
message_generator_t *message_generator) {
// Skip if no event sources (no connected clients) to avoid unnecessary iteration
if (this->empty())
return;
for (DeferredUpdateEventSource *dues : *this) {
dues->deferrable_send_state(source, event_type, message_generator);
}
@@ -424,8 +431,6 @@ static JsonDetail get_request_detail(AsyncWebServerRequest *request) {
#ifdef USE_SENSOR
void WebServer::on_sensor_update(sensor::Sensor *obj, float state) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", sensor_state_json_generator);
}
void WebServer::handle_sensor_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -453,13 +458,8 @@ std::string WebServer::sensor_json(sensor::Sensor *obj, float value, JsonDetail
const auto uom_ref = obj->get_unit_of_measurement_ref();
// Build JSON directly inline
std::string state;
if (std::isnan(value)) {
state = "NA";
} else {
state = value_accuracy_with_uom_to_string(value, obj->get_accuracy_decimals(), uom_ref);
}
std::string state =
std::isnan(value) ? "NA" : value_accuracy_with_uom_to_string(value, obj->get_accuracy_decimals(), uom_ref);
set_json_icon_state_value(root, obj, "sensor", state, value, start_config);
if (start_config == DETAIL_ALL) {
this->add_sorting_info_(root, obj);
@@ -473,8 +473,6 @@ std::string WebServer::sensor_json(sensor::Sensor *obj, float value, JsonDetail
#ifdef USE_TEXT_SENSOR
void WebServer::on_text_sensor_update(text_sensor::TextSensor *obj, const std::string &state) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", text_sensor_state_json_generator);
}
void WebServer::handle_text_sensor_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -514,8 +512,6 @@ std::string WebServer::text_sensor_json(text_sensor::TextSensor *obj, const std:
#ifdef USE_SWITCH
void WebServer::on_switch_update(switch_::Switch *obj, bool state) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", switch_state_json_generator);
}
void WebServer::handle_switch_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -627,8 +623,6 @@ std::string WebServer::button_json(button::Button *obj, JsonDetail start_config)
#ifdef USE_BINARY_SENSOR
void WebServer::on_binary_sensor_update(binary_sensor::BinarySensor *obj) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", binary_sensor_state_json_generator);
}
void WebServer::handle_binary_sensor_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -667,8 +661,6 @@ std::string WebServer::binary_sensor_json(binary_sensor::BinarySensor *obj, bool
#ifdef USE_FAN
void WebServer::on_fan_update(fan::Fan *obj) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", fan_state_json_generator);
}
void WebServer::handle_fan_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -743,8 +735,6 @@ std::string WebServer::fan_json(fan::Fan *obj, JsonDetail start_config) {
#ifdef USE_LIGHT
void WebServer::on_light_update(light::LightState *obj) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", light_state_json_generator);
}
void WebServer::handle_light_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -800,8 +790,7 @@ std::string WebServer::light_json(light::LightState *obj, JsonDetail start_confi
json::JsonBuilder builder;
JsonObject root = builder.root();
set_json_id(root, obj, "light", start_config);
root["state"] = obj->remote_values.is_on() ? "ON" : "OFF";
set_json_value(root, obj, "light", obj->remote_values.is_on() ? "ON" : "OFF", start_config);
light::LightJSONSchema::dump_json(*obj, root);
if (start_config == DETAIL_ALL) {
@@ -819,8 +808,6 @@ std::string WebServer::light_json(light::LightState *obj, JsonDetail start_confi
#ifdef USE_COVER
void WebServer::on_cover_update(cover::Cover *obj) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", cover_state_json_generator);
}
void WebServer::handle_cover_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -906,8 +893,6 @@ std::string WebServer::cover_json(cover::Cover *obj, JsonDetail start_config) {
#ifdef USE_NUMBER
void WebServer::on_number_update(number::Number *obj, float state) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", number_state_json_generator);
}
void WebServer::handle_number_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -948,7 +933,13 @@ std::string WebServer::number_json(number::Number *obj, float value, JsonDetail
const auto uom_ref = obj->traits.get_unit_of_measurement_ref();
set_json_id(root, obj, "number", start_config);
std::string val_str = std::isnan(value)
? "\"NaN\""
: value_accuracy_to_string(value, step_to_accuracy_decimals(obj->traits.get_step()));
std::string state_str = std::isnan(value) ? "NA"
: value_accuracy_with_uom_to_string(
value, step_to_accuracy_decimals(obj->traits.get_step()), uom_ref);
set_json_icon_state_value(root, obj, "number", state_str, val_str, start_config);
if (start_config == DETAIL_ALL) {
root["min_value"] =
value_accuracy_to_string(obj->traits.get_min_value(), step_to_accuracy_decimals(obj->traits.get_step()));
@@ -960,14 +951,6 @@ std::string WebServer::number_json(number::Number *obj, float value, JsonDetail
root["uom"] = uom_ref;
this->add_sorting_info_(root, obj);
}
if (std::isnan(value)) {
root["value"] = "\"NaN\"";
root["state"] = "NA";
} else {
root["value"] = value_accuracy_to_string(value, step_to_accuracy_decimals(obj->traits.get_step()));
root["state"] =
value_accuracy_with_uom_to_string(value, step_to_accuracy_decimals(obj->traits.get_step()), uom_ref);
}
return builder.serialize();
}
@@ -975,8 +958,6 @@ std::string WebServer::number_json(number::Number *obj, float value, JsonDetail
#ifdef USE_DATETIME_DATE
void WebServer::on_date_update(datetime::DateEntity *obj) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", date_state_json_generator);
}
void WebServer::handle_date_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -1020,10 +1001,8 @@ std::string WebServer::date_json(datetime::DateEntity *obj, JsonDetail start_con
json::JsonBuilder builder;
JsonObject root = builder.root();
set_json_id(root, obj, "date", start_config);
std::string value = str_sprintf("%d-%02d-%02d", obj->year, obj->month, obj->day);
root["value"] = value;
root["state"] = value;
set_json_icon_state_value(root, obj, "date", value, value, start_config);
if (start_config == DETAIL_ALL) {
this->add_sorting_info_(root, obj);
}
@@ -1034,8 +1013,6 @@ std::string WebServer::date_json(datetime::DateEntity *obj, JsonDetail start_con
#ifdef USE_DATETIME_TIME
void WebServer::on_time_update(datetime::TimeEntity *obj) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", time_state_json_generator);
}
void WebServer::handle_time_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -1078,10 +1055,8 @@ std::string WebServer::time_json(datetime::TimeEntity *obj, JsonDetail start_con
json::JsonBuilder builder;
JsonObject root = builder.root();
set_json_id(root, obj, "time", start_config);
std::string value = str_sprintf("%02d:%02d:%02d", obj->hour, obj->minute, obj->second);
root["value"] = value;
root["state"] = value;
set_json_icon_state_value(root, obj, "time", value, value, start_config);
if (start_config == DETAIL_ALL) {
this->add_sorting_info_(root, obj);
}
@@ -1092,8 +1067,6 @@ std::string WebServer::time_json(datetime::TimeEntity *obj, JsonDetail start_con
#ifdef USE_DATETIME_DATETIME
void WebServer::on_datetime_update(datetime::DateTimeEntity *obj) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", datetime_state_json_generator);
}
void WebServer::handle_datetime_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -1136,11 +1109,9 @@ std::string WebServer::datetime_json(datetime::DateTimeEntity *obj, JsonDetail s
json::JsonBuilder builder;
JsonObject root = builder.root();
set_json_id(root, obj, "datetime", start_config);
std::string value =
str_sprintf("%d-%02d-%02d %02d:%02d:%02d", obj->year, obj->month, obj->day, obj->hour, obj->minute, obj->second);
root["value"] = value;
root["state"] = value;
set_json_icon_state_value(root, obj, "datetime", value, value, start_config);
if (start_config == DETAIL_ALL) {
this->add_sorting_info_(root, obj);
}
@@ -1151,8 +1122,6 @@ std::string WebServer::datetime_json(datetime::DateTimeEntity *obj, JsonDetail s
#ifdef USE_TEXT
void WebServer::on_text_update(text::Text *obj, const std::string &state) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", text_state_json_generator);
}
void WebServer::handle_text_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -1191,16 +1160,11 @@ std::string WebServer::text_json(text::Text *obj, const std::string &value, Json
json::JsonBuilder builder;
JsonObject root = builder.root();
set_json_id(root, obj, "text", start_config);
std::string state = obj->traits.get_mode() == text::TextMode::TEXT_MODE_PASSWORD ? "********" : value;
set_json_icon_state_value(root, obj, "text", state, value, start_config);
root["min_length"] = obj->traits.get_min_length();
root["max_length"] = obj->traits.get_max_length();
root["pattern"] = obj->traits.get_pattern();
if (obj->traits.get_mode() == text::TextMode::TEXT_MODE_PASSWORD) {
root["state"] = "********";
} else {
root["state"] = value;
}
root["value"] = value;
if (start_config == DETAIL_ALL) {
root["mode"] = (int) obj->traits.get_mode();
this->add_sorting_info_(root, obj);
@@ -1212,8 +1176,6 @@ std::string WebServer::text_json(text::Text *obj, const std::string &value, Json
#ifdef USE_SELECT
void WebServer::on_select_update(select::Select *obj, const std::string &state, size_t index) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", select_state_json_generator);
}
void WebServer::handle_select_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -1270,8 +1232,6 @@ std::string WebServer::select_json(select::Select *obj, const std::string &value
#ifdef USE_CLIMATE
void WebServer::on_climate_update(climate::Climate *obj) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", climate_state_json_generator);
}
void WebServer::handle_climate_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -1412,8 +1372,6 @@ std::string WebServer::climate_json(climate::Climate *obj, JsonDetail start_conf
#ifdef USE_LOCK
void WebServer::on_lock_update(lock::Lock *obj) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", lock_state_json_generator);
}
void WebServer::handle_lock_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -1485,8 +1443,6 @@ std::string WebServer::lock_json(lock::Lock *obj, lock::LockState value, JsonDet
#ifdef USE_VALVE
void WebServer::on_valve_update(valve::Valve *obj) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", valve_state_json_generator);
}
void WebServer::handle_valve_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -1568,8 +1524,6 @@ std::string WebServer::valve_json(valve::Valve *obj, JsonDetail start_config) {
#ifdef USE_ALARM_CONTROL_PANEL
void WebServer::on_alarm_control_panel_update(alarm_control_panel::AlarmControlPanel *obj) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", alarm_control_panel_state_json_generator);
}
void WebServer::handle_alarm_control_panel_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -1714,8 +1668,6 @@ static const char *update_state_to_string(update::UpdateState state) {
}
void WebServer::on_update(update::UpdateEntity *obj) {
if (this->events_.empty())
return;
this->events_.deferrable_send_state(obj, "state", update_state_json_generator);
}
void WebServer::handle_update_request(AsyncWebServerRequest *request, const UrlMatch &match) {
@@ -1754,9 +1706,8 @@ std::string WebServer::update_json(update::UpdateEntity *obj, JsonDetail start_c
json::JsonBuilder builder;
JsonObject root = builder.root();
set_json_id(root, obj, "update", start_config);
root["value"] = obj->update_info.latest_version;
root["state"] = update_state_to_string(obj->state);
set_json_icon_state_value(root, obj, "update", update_state_to_string(obj->state), obj->update_info.latest_version,
start_config);
if (start_config == DETAIL_ALL) {
root["current_version"] = obj->update_info.current_version;
root["title"] = obj->update_info.title;

View File

@@ -412,6 +412,9 @@ void AsyncEventSource::try_send_nodefer(const char *message, const char *event,
void AsyncEventSource::deferrable_send_state(void *source, const char *event_type,
message_generator_t *message_generator) {
// Skip if no connected clients to avoid unnecessary processing
if (this->empty())
return;
for (auto *ses : this->sessions_) {
if (ses->fd_.load() != 0) { // Skip dead sessions
ses->deferrable_send_state(source, event_type, message_generator);

View File

@@ -267,7 +267,9 @@ network::IPAddress WiFiComponent::get_dns_address(int num) {
}
std::string WiFiComponent::get_use_address() const {
if (this->use_address_.empty()) {
return App.get_name() + ".local";
// ".local" suffix length for mDNS hostnames
constexpr size_t mdns_local_suffix_len = 5;
return make_name_with_suffix(App.get_name(), '.', "local", mdns_local_suffix_len);
}
return this->use_address_;
}
@@ -576,8 +578,9 @@ __attribute__((noinline)) static void log_scan_result(const WiFiScanResult &res)
format_mac_addr_upper(bssid.data(), bssid_s);
if (res.get_matches()) {
ESP_LOGI(TAG, "- '%s' %s" LOG_SECRET("(%s) ") "%s", res.get_ssid().c_str(), res.get_is_hidden() ? "(HIDDEN) " : "",
bssid_s, LOG_STR_ARG(get_signal_bars(res.get_rssi())));
ESP_LOGI(TAG, "- '%s' %s" LOG_SECRET("(%s) ") "%s", res.get_ssid().c_str(),
res.get_is_hidden() ? LOG_STR_LITERAL("(HIDDEN) ") : LOG_STR_LITERAL(""), bssid_s,
LOG_STR_ARG(get_signal_bars(res.get_rssi())));
ESP_LOGD(TAG,
" Channel: %u\n"
" RSSI: %d dB",

View File

@@ -1195,6 +1195,13 @@ def validate_bytes(value):
def hostname(value):
"""Validate that the value is a valid hostname.
Maximum length is 63 characters per RFC 1035.
Note: If this limit is changed, update MAX_NAME_WITH_SUFFIX_SIZE in
esphome/core/helpers.cpp to accommodate the new maximum length.
"""
value = string(value)
if re.match(r"^[a-z0-9-]{1,63}$", value, re.IGNORECASE) is not None:
return value

View File

@@ -340,8 +340,8 @@ void Application::calculate_looping_components_() {
}
}
// Pre-reserve vector to avoid reallocations
this->looping_components_.reserve(total_looping);
// Initialize FixedVector with exact size - no reallocation possible
this->looping_components_.init(total_looping);
// Add all components with loop override that aren't already LOOP_DONE
// Some components (like logger) may call disable_loop() during initialization

View File

@@ -102,9 +102,15 @@ class Application {
arch_init();
this->name_add_mac_suffix_ = name_add_mac_suffix;
if (name_add_mac_suffix) {
const std::string mac_suffix = get_mac_address().substr(6);
this->name_ = name + "-" + mac_suffix;
this->friendly_name_ = friendly_name.empty() ? "" : friendly_name + " " + mac_suffix;
// MAC address suffix length (last 6 characters of 12-char MAC address string)
constexpr size_t mac_address_suffix_len = 6;
const std::string mac_addr = get_mac_address();
// Use pointer + offset to avoid substr() allocation
const char *mac_suffix_ptr = mac_addr.c_str() + mac_address_suffix_len;
this->name_ = make_name_with_suffix(name, '-', mac_suffix_ptr, mac_address_suffix_len);
if (!friendly_name.empty()) {
this->friendly_name_ = make_name_with_suffix(friendly_name, ' ', mac_suffix_ptr, mac_address_suffix_len);
}
} else {
this->name_ = name;
this->friendly_name_ = friendly_name;
@@ -472,7 +478,7 @@ class Application {
// - When a component is enabled, it's swapped with the first inactive component
// and active_end_ is incremented
// - This eliminates branch mispredictions from flag checking in the hot loop
std::vector<Component *> looping_components_{};
FixedVector<Component *> looping_components_{};
#ifdef USE_SOCKET_SELECT_SUPPORT
std::vector<int> socket_fds_; // Vector of all monitored socket file descriptors
#endif

View File

@@ -200,7 +200,7 @@ CONFIG_SCHEMA = cv.All(
cv.Schema(
{
cv.Required(CONF_NAME): cv.valid_name,
cv.Optional(CONF_FRIENDLY_NAME, ""): cv.string,
cv.Optional(CONF_FRIENDLY_NAME, ""): cv.All(cv.string, cv.Length(max=120)),
cv.Optional(CONF_AREA): validate_area_config,
cv.Optional(CONF_COMMENT): cv.string,
cv.Required(CONF_BUILD_PATH): cv.string,

View File

@@ -83,6 +83,7 @@
#define USE_LVGL_TILEVIEW
#define USE_LVGL_TOUCHSCREEN
#define USE_MDNS
#define USE_MDNS_STORE_SERVICES
#define MDNS_SERVICE_COUNT 3
#define MDNS_DYNAMIC_TXT_COUNT 3
#define USE_MEDIA_PLAYER
@@ -193,6 +194,7 @@
#define USE_WEBSERVER_PORT 80 // NOLINT
#define USE_WEBSERVER_SORTING
#define USE_WIFI_11KV_SUPPORT
#define USB_HOST_MAX_REQUESTS 16
#ifdef USE_ARDUINO
#define USE_ARDUINO_VERSION_CODE VERSION_CODE(3, 2, 1)

View File

@@ -235,6 +235,30 @@ std::string str_sprintf(const char *fmt, ...) {
return str;
}
// Maximum size for name with suffix: 120 (max friendly name) + 1 (separator) + 6 (MAC suffix) + 1 (null term)
static constexpr size_t MAX_NAME_WITH_SUFFIX_SIZE = 128;
std::string make_name_with_suffix(const std::string &name, char sep, const char *suffix_ptr, size_t suffix_len) {
char buffer[MAX_NAME_WITH_SUFFIX_SIZE];
size_t name_len = name.size();
size_t total_len = name_len + 1 + suffix_len;
// Silently truncate if needed: prioritize keeping the full suffix
if (total_len >= MAX_NAME_WITH_SUFFIX_SIZE) {
// NOTE: This calculation could underflow if suffix_len >= MAX_NAME_WITH_SUFFIX_SIZE - 2,
// but this is safe because this helper is only called with small suffixes:
// MAC suffixes (6-12 bytes), ".local" (5 bytes), etc.
name_len = MAX_NAME_WITH_SUFFIX_SIZE - suffix_len - 2; // -2 for separator and null terminator
total_len = name_len + 1 + suffix_len;
}
memcpy(buffer, name.c_str(), name_len);
buffer[name_len] = sep;
memcpy(buffer + name_len + 1, suffix_ptr, suffix_len);
buffer[total_len] = '\0';
return std::string(buffer, total_len);
}
// Parsing & formatting
size_t parse_hex(const char *str, size_t length, uint8_t *data, size_t count) {

View File

@@ -159,6 +159,54 @@ template<typename T, size_t N> class StaticVector {
const_reverse_iterator rend() const { return const_reverse_iterator(begin()); }
};
/// Fixed-capacity vector - allocates once at runtime, never reallocates
/// This avoids std::vector template overhead (_M_realloc_insert, _M_default_append)
/// when size is known at initialization but not at compile time
template<typename T> class FixedVector {
private:
T *data_{nullptr};
size_t size_{0};
size_t capacity_{0};
public:
FixedVector() = default;
~FixedVector() {
if (data_ != nullptr) {
delete[] data_;
}
}
// Disable copy to avoid accidental copies
FixedVector(const FixedVector &) = delete;
FixedVector &operator=(const FixedVector &) = delete;
// Allocate capacity - can only be called once on empty vector
void init(size_t n) {
if (data_ == nullptr && n > 0) {
data_ = new T[n];
capacity_ = n;
size_ = 0;
}
}
/// Add element without bounds checking
/// Caller must ensure sufficient capacity was allocated via init()
/// Silently ignores pushes beyond capacity (no exception or assertion)
void push_back(const T &value) {
if (size_ < capacity_) {
data_[size_++] = value;
}
}
size_t size() const { return size_; }
/// Access element without bounds checking (matches std::vector behavior)
/// Caller must ensure index is valid (i < size())
T &operator[](size_t i) { return data_[i]; }
const T &operator[](size_t i) const { return data_[i]; }
};
///@}
/// @name Mathematics
@@ -306,6 +354,16 @@ std::string __attribute__((format(printf, 1, 3))) str_snprintf(const char *fmt,
/// sprintf-like function returning std::string.
std::string __attribute__((format(printf, 1, 2))) str_sprintf(const char *fmt, ...);
/// Concatenate a name with a separator and suffix using an efficient stack-based approach.
/// This avoids multiple heap allocations during string construction.
/// Maximum name length supported is 120 characters for friendly names.
/// @param name The base name string
/// @param sep The separator character (e.g., '-', ' ', or '.')
/// @param suffix_ptr Pointer to the suffix characters
/// @param suffix_len Length of the suffix
/// @return The concatenated string: name + sep + suffix
std::string make_name_with_suffix(const std::string &name, char sep, const char *suffix_ptr, size_t suffix_len);
///@}
/// @name Parsing & formatting

View File

@@ -410,7 +410,7 @@ def run_ota_impl_(
af, socktype, _, _, sa = r
_LOGGER.info("Connecting to %s port %s...", sa[0], sa[1])
sock = socket.socket(af, socktype)
sock.settimeout(10.0)
sock.settimeout(20.0)
try:
sock.connect(sa)
except OSError as err:

View File

@@ -15,6 +15,8 @@ from esphome.const import (
from esphome.core import CORE, EsphomeError
from esphome.helpers import (
copy_file_if_changed,
get_str_env,
is_ha_addon,
read_file,
walk_files,
write_file_if_changed,
@@ -338,16 +340,21 @@ def clean_build():
def clean_all(configuration: list[str]):
import shutil
# Clean entire build dir
for dir in configuration:
build_dir = Path(dir) / ".esphome"
if build_dir.is_dir():
_LOGGER.info("Cleaning %s", build_dir)
# Don't remove storage as it will cause the dashboard to regenerate all configs
for item in build_dir.iterdir():
if item.is_file():
data_dirs = [Path(dir) / ".esphome" for dir in configuration]
if is_ha_addon():
data_dirs.append(Path("/data"))
if "ESPHOME_DATA_DIR" in os.environ:
data_dirs.append(Path(get_str_env("ESPHOME_DATA_DIR", None)))
# Clean build dir
for dir in data_dirs:
if dir.is_dir():
_LOGGER.info("Cleaning %s", dir)
# Don't remove storage or .json files which are needed by the dashboard
for item in dir.iterdir():
if item.is_file() and not item.name.endswith(".json"):
item.unlink()
elif item.name != "storage" and item.is_dir():
elif item.is_dir() and item.name != "storage":
shutil.rmtree(item)
# Clean PlatformIO project files

View File

@@ -11,8 +11,8 @@ pyserial==3.5
platformio==6.1.18 # When updating platformio, also update /docker/Dockerfile
esptool==5.1.0
click==8.1.7
esphome-dashboard==20251009.0
aioesphomeapi==41.14.0
esphome-dashboard==20251013.0
aioesphomeapi==41.16.0
zeroconf==0.148.0
puremagic==1.30
ruamel.yaml==0.18.15 # dashboard_import

View File

@@ -1,4 +1,4 @@
pylint==3.3.9
pylint==4.0.0
flake8==7.3.0 # also change in .pre-commit-config.yaml when updating
ruff==0.14.0 # also change in .pre-commit-config.yaml when updating
pyupgrade==3.21.0 # also change in .pre-commit-config.yaml when updating

View File

@@ -31,7 +31,6 @@ Options:
from __future__ import annotations
import argparse
from functools import cache
import json
import os
from pathlib import Path
@@ -46,6 +45,7 @@ from helpers import (
changed_files,
get_all_dependencies,
get_components_from_integration_fixtures,
parse_list_components_output,
root_path,
)
@@ -212,24 +212,6 @@ def _any_changed_file_endswith(branch: str | None, extensions: tuple[str, ...])
return any(file.endswith(extensions) for file in changed_files(branch))
@cache
def _component_has_tests(component: str) -> bool:
"""Check if a component has test files.
Cached to avoid repeated filesystem operations for the same component.
Args:
component: Component name to check
Returns:
True if the component has test YAML files
"""
tests_dir = Path(root_path) / "tests" / "components" / component
if not tests_dir.exists():
return False
return any(tests_dir.glob("test.*.yaml"))
def main() -> None:
"""Main function that determines which CI jobs to run."""
parser = argparse.ArgumentParser(
@@ -246,37 +228,23 @@ def main() -> None:
run_clang_format = should_run_clang_format(args.branch)
run_python_linters = should_run_python_linters(args.branch)
# Get both directly changed and all changed components (with dependencies) in one call
# Get changed components using list-components.py for exact compatibility
script_path = Path(__file__).parent / "list-components.py"
cmd = [sys.executable, str(script_path), "--changed-with-deps"]
cmd = [sys.executable, str(script_path), "--changed"]
if args.branch:
cmd.extend(["-b", args.branch])
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
component_data = json.loads(result.stdout)
directly_changed_components = component_data["directly_changed"]
changed_components = component_data["all_changed"]
changed_components = parse_list_components_output(result.stdout)
# Filter to only components that have test files
# Components without tests shouldn't generate CI test jobs
tests_dir = Path(root_path) / "tests" / "components"
changed_components_with_tests = [
component for component in changed_components if _component_has_tests(component)
]
# Get directly changed components with tests (for isolated testing)
# These will be tested WITHOUT --testing-mode in CI to enable full validation
# (pin conflicts, etc.) since they contain the actual changes being reviewed
directly_changed_with_tests = [
component
for component in directly_changed_components
if _component_has_tests(component)
]
# Get dependency-only components (for grouped testing)
dependency_only_components = [
component
for component in changed_components_with_tests
if component not in directly_changed_components
for component in changed_components
if (component_test_dir := tests_dir / component).exists()
and any(component_test_dir.glob("test.*.yaml"))
]
# Build output
@@ -287,11 +255,7 @@ def main() -> None:
"python_linters": run_python_linters,
"changed_components": changed_components,
"changed_components_with_tests": changed_components_with_tests,
"directly_changed_components_with_tests": directly_changed_with_tests,
"dependency_only_components_with_tests": dependency_only_components,
"component_test_count": len(changed_components_with_tests),
"directly_changed_count": len(directly_changed_with_tests),
"dependency_only_count": len(dependency_only_components),
}
# Output as JSON

View File

@@ -185,32 +185,18 @@ def main():
"-c",
"--changed",
action="store_true",
help="List all components required for testing based on changes (includes dependencies)",
)
parser.add_argument(
"--changed-direct",
action="store_true",
help="List only directly changed components (without dependencies)",
)
parser.add_argument(
"--changed-with-deps",
action="store_true",
help="Output JSON with both directly changed and all changed components",
help="List all components required for testing based on changes",
)
parser.add_argument(
"-b", "--branch", help="Branch to compare changed files against"
)
args = parser.parse_args()
if args.branch and not (
args.changed or args.changed_direct or args.changed_with_deps
):
parser.error(
"--branch requires --changed, --changed-direct, or --changed-with-deps"
)
if args.branch and not args.changed:
parser.error("--branch requires --changed")
if args.changed or args.changed_direct or args.changed_with_deps:
# When --changed* is passed, only get the changed files
if args.changed:
# When --changed is passed, only get the changed files
changed = changed_files(args.branch)
# If any base test file(s) changed, there's no need to filter out components
@@ -224,25 +210,8 @@ def main():
# Get all component files
files = get_all_component_files()
if args.changed_with_deps:
# Return JSON with both directly changed and all changed components
import json
directly_changed = get_components(files, False)
all_changed = get_components(files, True)
output = {
"directly_changed": directly_changed,
"all_changed": all_changed,
}
print(json.dumps(output))
elif args.changed_direct:
# Return only directly changed components (without dependencies)
for c in get_components(files, False):
print(c)
else:
# Return all changed components (with dependencies) - default behavior
for c in get_components(files, args.changed):
print(c)
for c in get_components(files, args.changed):
print(c)
if __name__ == "__main__":

View File

@@ -56,7 +56,6 @@ def create_intelligent_batches(
components: list[str],
tests_dir: Path,
batch_size: int = 40,
directly_changed: set[str] | None = None,
) -> list[list[str]]:
"""Create batches optimized for component grouping.
@@ -64,7 +63,6 @@ def create_intelligent_batches(
components: List of component names to batch
tests_dir: Path to tests/components directory
batch_size: Target size for each batch
directly_changed: Set of directly changed components (for logging only)
Returns:
List of component batches (lists of component names)
@@ -189,10 +187,6 @@ def main() -> int:
default=Path("tests/components"),
help="Path to tests/components directory",
)
parser.add_argument(
"--directly-changed",
help="JSON array of directly changed component names (for logging only)",
)
parser.add_argument(
"--output",
"-o",
@@ -214,21 +208,11 @@ def main() -> int:
print("Components must be a JSON array", file=sys.stderr)
return 1
# Parse directly changed components list from JSON (if provided)
directly_changed = None
if args.directly_changed:
try:
directly_changed = set(json.loads(args.directly_changed))
except json.JSONDecodeError as e:
print(f"Error parsing directly-changed JSON: {e}", file=sys.stderr)
return 1
# Create intelligent batches
batches = create_intelligent_batches(
components=components,
tests_dir=args.tests_dir,
batch_size=args.batch_size,
directly_changed=directly_changed,
)
# Convert batches to space-separated strings for CI
@@ -261,28 +245,6 @@ def main() -> int:
print("\n=== Intelligent Batch Summary ===", file=sys.stderr)
print(f"Total components requested: {len(components)}", file=sys.stderr)
print(f"Components with test files: {actual_components}", file=sys.stderr)
# Show breakdown of directly changed vs dependencies
if directly_changed:
direct_count = sum(
1 for comp in all_batched_components if comp in directly_changed
)
dep_count = actual_components - direct_count
direct_comps = [
comp for comp in all_batched_components if comp in directly_changed
]
dep_comps = [
comp for comp in all_batched_components if comp not in directly_changed
]
print(
f" - Direct changes: {direct_count} ({', '.join(sorted(direct_comps))})",
file=sys.stderr,
)
print(
f" - Dependencies: {dep_count} ({', '.join(sorted(dep_comps))})",
file=sys.stderr,
)
print(f" - Groupable (weight=1): {groupable_count}", file=sys.stderr)
print(f" - Isolated (weight=10): {isolated_count}", file=sys.stderr)
if actual_components < len(components):

View File

@@ -365,7 +365,6 @@ def run_grouped_component_tests(
build_dir: Path,
esphome_command: str,
continue_on_fail: bool,
additional_isolated: set[str] | None = None,
) -> tuple[set[tuple[str, str]], list[str], list[str], dict[str, str]]:
"""Run grouped component tests.
@@ -377,7 +376,6 @@ def run_grouped_component_tests(
build_dir: Path to build directory
esphome_command: ESPHome command (config/compile)
continue_on_fail: Whether to continue on failure
additional_isolated: Additional components to treat as isolated (not grouped)
Returns:
Tuple of (tested_components, passed_tests, failed_tests, failed_commands)
@@ -399,17 +397,6 @@ def run_grouped_component_tests(
# Track why components can't be grouped (for detailed output)
non_groupable_reasons = {}
# Merge additional isolated components with predefined ones
# ISOLATED COMPONENTS are tested individually WITHOUT --testing-mode
# This is critical because:
# - Grouped tests use --testing-mode which disables pin conflict checks and other validation
# - These checks are disabled to allow config merging (multiple components in one build)
# - For directly changed components (via --isolate), we need full validation to catch issues
# - Dependencies are safe to group since they weren't modified in the PR
all_isolated = set(ISOLATED_COMPONENTS.keys())
if additional_isolated:
all_isolated.update(additional_isolated)
# Group by (platform, bus_signature)
for component, platforms in component_buses.items():
if component not in all_tests:
@@ -417,7 +404,7 @@ def run_grouped_component_tests(
# Skip components that must be tested in isolation
# These are shown separately and should not be in non_groupable_reasons
if component in all_isolated:
if component in ISOLATED_COMPONENTS:
continue
# Skip base bus components (these test the bus platforms themselves)
@@ -466,28 +453,15 @@ def run_grouped_component_tests(
print("\nGrouping Plan:")
print("-" * 80)
# Show isolated components (must test individually due to known issues or direct changes)
isolated_in_tests = [c for c in all_isolated if c in all_tests]
# Show isolated components (must test individually due to known issues)
isolated_in_tests = [c for c in ISOLATED_COMPONENTS if c in all_tests]
if isolated_in_tests:
predefined_isolated = [c for c in isolated_in_tests if c in ISOLATED_COMPONENTS]
additional_in_tests = [
c for c in isolated_in_tests if c in (additional_isolated or set())
]
if predefined_isolated:
print(
f"\n{len(predefined_isolated)} components must be tested in isolation (known build issues):"
)
for comp in sorted(predefined_isolated):
reason = ISOLATED_COMPONENTS[comp]
print(f" - {comp}: {reason}")
if additional_in_tests:
print(
f"\n{len(additional_in_tests)} components tested in isolation (directly changed in PR):"
)
for comp in sorted(additional_in_tests):
print(f" - {comp}")
print(
f"\n{len(isolated_in_tests)} components must be tested in isolation (known build issues):"
)
for comp in sorted(isolated_in_tests):
reason = ISOLATED_COMPONENTS[comp]
print(f" - {comp}: {reason}")
# Show base bus components (test the bus platform implementations)
base_bus_in_tests = [c for c in BASE_BUS_COMPONENTS if c in all_tests]
@@ -759,7 +733,6 @@ def test_components(
esphome_command: str,
continue_on_fail: bool,
enable_grouping: bool = True,
isolated_components: set[str] | None = None,
) -> int:
"""Test components with optional intelligent grouping.
@@ -769,10 +742,6 @@ def test_components(
esphome_command: ESPHome command (config/compile)
continue_on_fail: Whether to continue on failure
enable_grouping: Whether to enable component grouping
isolated_components: Set of component names to test in isolation (not grouped).
These are tested WITHOUT --testing-mode to enable full validation
(pin conflicts, etc). This is used in CI for directly changed components
to catch issues that would be missed with --testing-mode.
Returns:
Exit code (0 for success, 1 for failure)
@@ -819,7 +788,6 @@ def test_components(
build_dir=build_dir,
esphome_command=esphome_command,
continue_on_fail=continue_on_fail,
additional_isolated=isolated_components,
)
# Then run individual tests for components not in groups
@@ -944,30 +912,18 @@ def main() -> int:
action="store_true",
help="Disable component grouping (test each component individually)",
)
parser.add_argument(
"--isolate",
help="Comma-separated list of components to test in isolation (not grouped with others). "
"These are tested WITHOUT --testing-mode to enable full validation. "
"Used in CI for directly changed components to catch pin conflicts and other issues.",
)
args = parser.parse_args()
# Parse component patterns
component_patterns = [p.strip() for p in args.components.split(",")]
# Parse isolated components
isolated_components = None
if args.isolate:
isolated_components = {c.strip() for c in args.isolate.split(",") if c.strip()}
return test_components(
component_patterns=component_patterns,
platform_filter=args.target,
esphome_command=args.esphome_command,
continue_on_fail=args.continue_on_fail,
enable_grouping=not args.no_grouping,
isolated_components=isolated_components,
)

View File

@@ -1,4 +1,5 @@
usb_host:
max_transfer_requests: 32 # Test uint32_t bitmask path (17-32 requests)
devices:
- id: device_1
vid: 0x1234

View File

@@ -73,11 +73,9 @@ def test_main_all_tests_should_run(
mock_should_run_clang_format.return_value = True
mock_should_run_python_linters.return_value = True
# Mock list-components.py output (now returns JSON with --changed-with-deps)
# Mock list-components.py output
mock_result = Mock()
mock_result.stdout = json.dumps(
{"directly_changed": ["wifi", "api"], "all_changed": ["wifi", "api", "sensor"]}
)
mock_result.stdout = "wifi\napi\nsensor\n"
mock_subprocess_run.return_value = mock_result
# Run main function with mocked argv
@@ -118,7 +116,7 @@ def test_main_no_tests_should_run(
# Mock empty list-components.py output
mock_result = Mock()
mock_result.stdout = json.dumps({"directly_changed": [], "all_changed": []})
mock_result.stdout = ""
mock_subprocess_run.return_value = mock_result
# Run main function with mocked argv
@@ -179,9 +177,7 @@ def test_main_with_branch_argument(
# Mock list-components.py output
mock_result = Mock()
mock_result.stdout = json.dumps(
{"directly_changed": ["mqtt"], "all_changed": ["mqtt"]}
)
mock_result.stdout = "mqtt\n"
mock_subprocess_run.return_value = mock_result
with patch("sys.argv", ["script.py", "-b", "main"]):
@@ -196,7 +192,7 @@ def test_main_with_branch_argument(
# Check that list-components.py was called with branch
mock_subprocess_run.assert_called_once()
call_args = mock_subprocess_run.call_args[0][0]
assert "--changed-with-deps" in call_args
assert "--changed" in call_args
assert "-b" in call_args
assert "main" in call_args
@@ -415,12 +411,7 @@ def test_main_filters_components_without_tests(
# Mock list-components.py output with 3 components
# wifi: has tests, sensor: has tests, airthings_ble: no tests
mock_result = Mock()
mock_result.stdout = json.dumps(
{
"directly_changed": ["wifi", "sensor"],
"all_changed": ["wifi", "sensor", "airthings_ble"],
}
)
mock_result.stdout = "wifi\nsensor\nairthings_ble\n"
mock_subprocess_run.return_value = mock_result
# Create test directory structure
@@ -445,8 +436,6 @@ def test_main_filters_components_without_tests(
patch.object(determine_jobs, "root_path", str(tmp_path)),
patch("sys.argv", ["determine-jobs.py"]),
):
# Clear the cache since we're mocking root_path
determine_jobs._component_has_tests.cache_clear()
determine_jobs.main()
# Check output

View File

@@ -493,7 +493,7 @@ def test_run_ota_impl_successful(
assert result_host == "192.168.1.100"
# Verify socket was configured correctly
mock_socket.settimeout.assert_called_with(10.0)
mock_socket.settimeout.assert_called_with(20.0)
mock_socket.connect.assert_called_once_with(("192.168.1.100", 3232))
mock_socket.close.assert_called_once()

View File

@@ -1203,6 +1203,31 @@ def test_show_logs_api(
)
@patch("esphome.components.api.client.run_logs")
def test_show_logs_api_with_fqdn_mdns_disabled(
mock_run_logs: Mock,
) -> None:
"""Test show_logs with API using FQDN when mDNS is disabled."""
setup_core(
config={
"logger": {},
CONF_API: {},
CONF_MDNS: {CONF_DISABLED: True},
},
platform=PLATFORM_ESP32,
)
mock_run_logs.return_value = 0
args = MockArgs()
devices = ["device.example.com"]
result = show_logs(CORE.config, args, devices)
assert result == 0
# Should use the FQDN directly, not try MQTT lookup
mock_run_logs.assert_called_once_with(CORE.config, ["device.example.com"])
@patch("esphome.components.api.client.run_logs")
def test_show_logs_api_with_mqtt_fallback(
mock_run_logs: Mock,
@@ -1222,7 +1247,7 @@ def test_show_logs_api_with_mqtt_fallback(
mock_mqtt_get_ip.return_value = ["192.168.1.200"]
args = MockArgs(username="user", password="pass", client_id="client")
devices = ["device.local"]
devices = ["MQTTIP"]
result = show_logs(CORE.config, args, devices)
@@ -1487,27 +1512,31 @@ def test_mqtt_get_ip() -> None:
def test_has_resolvable_address() -> None:
"""Test has_resolvable_address function."""
# Test with mDNS enabled and hostname address
# Test with mDNS enabled and .local hostname address
setup_core(config={}, address="esphome-device.local")
assert has_resolvable_address() is True
# Test with mDNS disabled and hostname address
# Test with mDNS disabled and .local hostname address (still resolvable via DNS)
setup_core(
config={CONF_MDNS: {CONF_DISABLED: True}}, address="esphome-device.local"
)
assert has_resolvable_address() is False
assert has_resolvable_address() is True
# Test with IP address (mDNS doesn't matter)
# Test with mDNS disabled and regular DNS hostname (resolvable)
setup_core(config={CONF_MDNS: {CONF_DISABLED: True}}, address="device.example.com")
assert has_resolvable_address() is True
# Test with IP address (always resolvable, mDNS doesn't matter)
setup_core(config={}, address="192.168.1.100")
assert has_resolvable_address() is True
# Test with IP address and mDNS disabled
# Test with IP address and mDNS disabled (still resolvable)
setup_core(config={CONF_MDNS: {CONF_DISABLED: True}}, address="192.168.1.100")
assert has_resolvable_address() is True
# Test with no address but mDNS enabled (can still resolve mDNS names)
# Test with no address
setup_core(config={}, address=None)
assert has_resolvable_address() is True
assert has_resolvable_address() is False
# Test with no address and mDNS disabled
setup_core(config={CONF_MDNS: {CONF_DISABLED: True}}, address=None)

View File

@@ -985,3 +985,49 @@ def test_clean_all_removes_non_storage_directories(
# Verify logging mentions cleaning
assert "Cleaning" in caplog.text
assert str(build_dir) in caplog.text
@patch("esphome.writer.CORE")
def test_clean_all_preserves_json_files(
mock_core: MagicMock,
tmp_path: Path,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test clean_all preserves .json files."""
# Create build directory with various files
config_dir = tmp_path / "config"
config_dir.mkdir()
build_dir = config_dir / ".esphome"
build_dir.mkdir()
# Create .json files (should be preserved)
(build_dir / "config.json").write_text('{"config": "data"}')
(build_dir / "metadata.json").write_text('{"metadata": "info"}')
# Create non-.json files (should be removed)
(build_dir / "dummy.txt").write_text("x")
(build_dir / "other.log").write_text("log content")
# Call clean_all
from esphome.writer import clean_all
with caplog.at_level("INFO"):
clean_all([str(config_dir)])
# Verify .esphome directory still exists
assert build_dir.exists()
# Verify .json files are preserved
assert (build_dir / "config.json").exists()
assert (build_dir / "config.json").read_text() == '{"config": "data"}'
assert (build_dir / "metadata.json").exists()
assert (build_dir / "metadata.json").read_text() == '{"metadata": "info"}'
# Verify non-.json files were removed
assert not (build_dir / "dummy.txt").exists()
assert not (build_dir / "other.log").exists()
# Verify logging mentions cleaning
assert "Cleaning" in caplog.text
assert str(build_dir) in caplog.text