Compare commits

...

5 Commits

Author SHA1 Message Date
Stefan Agner
844308d755 Merge branch 'main' into handle-config-envs-none 2026-02-13 09:00:31 +01:00
dependabot[bot]
0cce2dad3c Bump ruff from 0.15.0 to 0.15.1 (#6565)
Bumps [ruff](https://github.com/astral-sh/ruff) from 0.15.0 to 0.15.1.
- [Release notes](https://github.com/astral-sh/ruff/releases)
- [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md)
- [Commits](https://github.com/astral-sh/ruff/compare/0.15.0...0.15.1)

---
updated-dependencies:
- dependency-name: ruff
  dependency-version: 0.15.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-02-13 08:59:59 +01:00
Stefan Agner
8dd42cb7a0 Fix getting Supervisor IP address in testing (#6564)
* Fix getting Supervisor IP address in testing

Newer Docker versions (probably newer than 29.x) do not have a global
IPAddress attribute under .NetworkSettings anymore. There is a network
specific map under Networks. For our case the hassio has the relevant
IP address. This network specific maps already existed before, hence
the new inspect format works for old as well as new Docker versions.

While at it, also adjust the test fixture.

* Actively wait for hassio IPAddress to become valid
2026-02-13 08:12:19 +01:00
Mike Degatano
844ce5b318 Handle nonetype error for envs on homeassistant stop 2026-02-12 22:00:21 +00:00
Mike Degatano
590674ba7c Remove blocking I/O added to import_image (#6557)
* Remove blocking I/O added to import_image

* Add scanned modules to extra blockbuster functions

* Use same cast avoidance approach in export_image

* Remove unnecessary local image_writer variable

* Remove unnecessary local image_tar_stream variable

---------

Co-authored-by: Stefan Agner <stefan@agner.ch>
2026-02-12 17:37:15 +01:00
7 changed files with 62 additions and 33 deletions

View File

@@ -296,7 +296,11 @@ jobs:
- &wait_for_supervisor
name: Wait for Supervisor to come up
run: |
SUPERVISOR=$(docker inspect --format='{{.NetworkSettings.IPAddress}}' hassio_supervisor)
until SUPERVISOR=$(docker inspect --format='{{.NetworkSettings.Networks.hassio.IPAddress}}' hassio_supervisor 2>/dev/null) && \
[ -n "$SUPERVISOR" ] && [ "$SUPERVISOR" != "<no value>" ]; do
echo "Waiting for network configuration..."
sleep 1
done
echo "Waiting for Supervisor API at http://${SUPERVISOR}/supervisor/ping"
timeout=300
elapsed=0

View File

@@ -8,7 +8,7 @@ pytest-asyncio==1.3.0
pytest-cov==7.0.0
pytest-timeout==2.4.0
pytest==9.0.2
ruff==0.15.0
ruff==0.15.1
time-machine==3.2.0
types-docker==7.1.0.20260109
types-pyyaml==6.0.12.20250915

View File

@@ -64,8 +64,8 @@ class DockerHomeAssistant(DockerInterface):
"""Return timeout for Docker actions."""
# Use S6_SERVICES_GRACETIME to avoid killing Home Assistant Core, see
# https://github.com/home-assistant/core/tree/dev/Dockerfile
if self.meta_config and "Env" in self.meta_config:
for env in self.meta_config["Env"]:
if self.meta_config and (envs := self.meta_config.get("Env")):
for env in envs:
if match := ENV_S6_GRACETIME.match(env):
return 20 + int(int(match.group(1)) / 1000)

View File

@@ -9,7 +9,7 @@ from dataclasses import dataclass
import errno
from functools import partial
from http import HTTPStatus
from io import BufferedWriter
from io import BufferedReader, BufferedWriter
from ipaddress import IPv4Address
import json
import logging
@@ -1025,13 +1025,30 @@ class DockerAPI(CoreSysAttributes):
async def import_image(self, tar_file: Path) -> dict[str, Any] | None:
"""Import a tar file as image."""
image_tar_stream: BufferedReader | None = None
try:
with tar_file.open("rb") as read_tar:
resp: list[dict[str, Any]] = await self.images.import_image(read_tar)
except (aiodocker.DockerError, OSError) as err:
# Lambda avoids need for a cast here. Since return type of open is based on mode
image_tar_stream = await self.sys_run_in_executor(
lambda: tar_file.open("rb")
)
resp: list[dict[str, Any]] = await self.images.import_image(
image_tar_stream
)
except aiodocker.DockerError as err:
raise DockerError(
f"Can't import image from tar: {err}", _LOGGER.error
) from err
except OSError as err:
if err.errno == errno.EBADMSG:
self.sys_resolution.add_unhealthy_reason(
UnhealthyReason.OSERROR_BAD_MESSAGE
)
raise DockerError(
f"Can't read tar file {tar_file}: {err}", _LOGGER.error
) from err
finally:
if image_tar_stream:
await self.sys_run_in_executor(image_tar_stream.close)
docker_image_list: list[str] = []
for chunk in resp:
@@ -1066,12 +1083,13 @@ class DockerAPI(CoreSysAttributes):
image_tar_stream: BufferedWriter | None = None
try:
image_tar_stream = image_writer = cast(
BufferedWriter, await self.sys_run_in_executor(tar_file.open, "wb")
# Lambda avoids need for a cast here. Since return type of open is based on mode
image_tar_stream = await self.sys_run_in_executor(
lambda: tar_file.open("wb")
)
async with self.images.export_image(f"{image}:{version}") as content:
async for chunk in content.iter_chunked(DEFAULT_CHUNK_SIZE):
await self.sys_run_in_executor(image_writer.write, chunk)
await self.sys_run_in_executor(image_tar_stream.write, chunk)
except aiodocker.DockerError as err:
raise DockerError(
f"Can't fetch image {image}:{version}: {err}", _LOGGER.error

View File

@@ -18,7 +18,7 @@ from aiodocker.system import DockerSystem
from aiohttp import ClientSession, web
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
from blockbuster import BlockBuster, blockbuster_ctx
from blockbuster import BlockBuster, BlockBusterFunction
from dbus_fast import BusType
from dbus_fast.aio.message_bus import MessageBus
import pytest
@@ -94,9 +94,17 @@ def blockbuster(request: pytest.FixtureRequest) -> BlockBuster | None:
# But it will ignore calls to libraries and such that do blocking I/O directly from tests
# Removing that would be nice but a todo for the future
# pylint: disable-next=contextmanager-generator-missing-cleanup
with blockbuster_ctx(scanned_modules=["supervisor"]) as bb:
yield bb
SCANNED_MODULES = ["supervisor"]
blockbuster = BlockBuster(scanned_modules=SCANNED_MODULES)
blockbuster.functions["pathlib.Path.open"] = BlockBusterFunction(
Path, "open", scanned_modules=SCANNED_MODULES
)
blockbuster.functions["pathlib.Path.close"] = BlockBusterFunction(
Path, "close", scanned_modules=SCANNED_MODULES
)
blockbuster.activate()
yield blockbuster
blockbuster.deactivate()
@pytest.fixture

View File

@@ -210,28 +210,14 @@
}
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "067cd11a63f96d227dcc0f01d3e4f5053c368021becd0b4b2da4f301cfda3d29",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SandboxKey": "/var/run/docker/netns/067cd11a63f9",
"Ports": {
"1883/tcp": [
{ "HostIp": "0.0.0.0", "HostPort": "1883" },
{ "HostIp": "::", "HostPort": "1883" }
]
},
"SandboxKey": "/var/run/docker/netns/067cd11a63f9",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"hassio": {
"IPAMConfig": null,

View File

@@ -1,5 +1,6 @@
"""Tests for apparmor utility."""
import asyncio
from pathlib import Path
import pytest
@@ -31,13 +32,20 @@ profile test flags=(attach_disconnected,mediate_deleted) {
async def test_valid_apparmor_file():
"""Test a valid apparmor file."""
assert validate_profile("example", get_fixture_path("apparmor_valid.txt"))
assert await asyncio.get_running_loop().run_in_executor(
None, validate_profile, "example", get_fixture_path("apparmor_valid.txt")
)
async def test_apparmor_missing_profile(caplog: pytest.LogCaptureFixture):
"""Test apparmor file missing profile."""
with pytest.raises(AppArmorInvalidError):
validate_profile("example", get_fixture_path("apparmor_no_profile.txt"))
await asyncio.get_running_loop().run_in_executor(
None,
validate_profile,
"example",
get_fixture_path("apparmor_no_profile.txt"),
)
assert (
"Missing AppArmor profile inside file: apparmor_no_profile.txt" in caplog.text
@@ -47,7 +55,12 @@ async def test_apparmor_missing_profile(caplog: pytest.LogCaptureFixture):
async def test_apparmor_multiple_profiles(caplog: pytest.LogCaptureFixture):
"""Test apparmor file with too many profiles."""
with pytest.raises(AppArmorInvalidError):
validate_profile("example", get_fixture_path("apparmor_multiple_profiles.txt"))
await asyncio.get_running_loop().run_in_executor(
None,
validate_profile,
"example",
get_fixture_path("apparmor_multiple_profiles.txt"),
)
assert (
"Too many AppArmor profiles inside file: apparmor_multiple_profiles.txt"