Compare commits

..

2 Commits

Author SHA1 Message Date
Stefan Agner
eb9988929b Add and fix tests 2025-08-25 19:10:55 +02:00
Stefan Agner
7d06dfcecc Round down to avoid floating point issues
Round down the progress percentage to avoid floating point issues
when comparing if percentage indeed increased.
2025-08-25 19:09:40 +02:00
61 changed files with 299 additions and 1674 deletions

View File

@@ -125,7 +125,7 @@ jobs:
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
if: needs.init.outputs.publish == 'true'
uses: actions/setup-python@v6.0.0
uses: actions/setup-python@v5.6.0
with:
python-version: ${{ env.DEFAULT_PYTHON }}

View File

@@ -29,7 +29,7 @@ jobs:
uses: actions/checkout@v5.0.0
- name: Set up Python
id: python
uses: actions/setup-python@v6.0.0
uses: actions/setup-python@v5.6.0
with:
python-version: ${{ env.DEFAULT_PYTHON }}
- name: Restore Python virtual environment
@@ -70,7 +70,7 @@ jobs:
- name: Check out code from GitHub
uses: actions/checkout@v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v6.0.0
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -113,7 +113,7 @@ jobs:
- name: Check out code from GitHub
uses: actions/checkout@v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v6.0.0
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -171,7 +171,7 @@ jobs:
- name: Check out code from GitHub
uses: actions/checkout@v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v6.0.0
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -215,7 +215,7 @@ jobs:
- name: Check out code from GitHub
uses: actions/checkout@v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v6.0.0
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -259,7 +259,7 @@ jobs:
- name: Check out code from GitHub
uses: actions/checkout@v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v6.0.0
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -295,7 +295,7 @@ jobs:
- name: Check out code from GitHub
uses: actions/checkout@v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v6.0.0
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -341,7 +341,7 @@ jobs:
- name: Check out code from GitHub
uses: actions/checkout@v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v6.0.0
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -400,7 +400,7 @@ jobs:
- name: Check out code from GitHub
uses: actions/checkout@v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v6.0.0
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -428,4 +428,4 @@ jobs:
coverage report
coverage xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v5.5.1
uses: codecov/codecov-action@v5.5.0

View File

@@ -12,7 +12,7 @@ jobs:
if: github.event.issue.type.name == 'Task'
steps:
- name: Check if user is authorized
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
script: |
const issueAuthor = context.payload.issue.user.login;

View File

@@ -9,7 +9,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v10.0.0
- uses: actions/stale@v9.1.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-stale: 30

View File

@@ -8,7 +8,7 @@ brotli==1.1.0
ciso8601==2.3.3
colorlog==6.9.0
cpe==1.3.1
cryptography==45.0.7
cryptography==45.0.6
debugpy==1.8.16
deepmerge==2.0
dirhash==0.5.0
@@ -17,13 +17,13 @@ faust-cchardet==2.1.19
gitpython==3.1.45
jinja2==3.1.6
log-rate-limit==1.4.2
orjson==3.11.3
orjson==3.11.2
pulsectl==24.12.0
pyudev==0.24.3
PyYAML==6.0.2
requests==2.32.5
securetar==2025.2.1
sentry-sdk==2.36.0
sentry-sdk==2.35.0
setuptools==80.9.0
voluptuous==0.15.2
dbus-fast==2.44.3

View File

@@ -1,5 +1,5 @@
astroid==3.3.11
coverage==7.10.6
coverage==7.10.5
mypy==1.17.1
pre-commit==4.3.0
pylint==3.3.8
@@ -7,8 +7,8 @@ pytest-aiohttp==1.1.0
pytest-asyncio==0.25.2
pytest-cov==6.2.1
pytest-timeout==2.4.0
pytest==8.4.2
ruff==0.12.12
pytest==8.4.1
ruff==0.12.10
time-machine==2.19.0
types-docker==7.1.0.20250822
types-pyyaml==6.0.12.20250822

View File

@@ -67,9 +67,9 @@ from ..docker.monitor import DockerContainerStateEvent
from ..docker.stats import DockerStats
from ..exceptions import (
AddonConfigurationError,
AddonNotSupportedError,
AddonsError,
AddonsJobError,
AddonsNotSupportedError,
ConfigurationFileError,
DockerError,
HomeAssistantAPIError,
@@ -1172,7 +1172,7 @@ class Addon(AddonModel):
async def write_stdin(self, data) -> None:
"""Write data to add-on stdin."""
if not self.with_stdin:
raise AddonNotSupportedError(
raise AddonsNotSupportedError(
f"Add-on {self.slug} does not support writing to stdin!", _LOGGER.error
)
@@ -1419,7 +1419,7 @@ class Addon(AddonModel):
# If available
if not self._available(data[ATTR_SYSTEM]):
raise AddonNotSupportedError(
raise AddonsNotSupportedError(
f"Add-on {self.slug} is not available for this platform",
_LOGGER.error,
)

View File

@@ -14,9 +14,9 @@ from supervisor.jobs.const import JobConcurrency
from ..const import AddonBoot, AddonStartup, AddonState
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import (
AddonNotSupportedError,
AddonsError,
AddonsJobError,
AddonsNotSupportedError,
CoreDNSError,
DockerError,
HassioError,
@@ -184,9 +184,7 @@ class AddonManager(CoreSysAttributes):
on_condition=AddonsJobError,
concurrency=JobConcurrency.QUEUE,
)
async def install(
self, slug: str, *, validation_complete: asyncio.Event | None = None
) -> None:
async def install(self, slug: str) -> None:
"""Install an add-on."""
self.sys_jobs.current.reference = slug
@@ -199,10 +197,6 @@ class AddonManager(CoreSysAttributes):
store.validate_availability()
# If being run in the background, notify caller that validation has completed
if validation_complete:
validation_complete.set()
await Addon(self.coresys, slug).install()
_LOGGER.info("Add-on '%s' successfully installed", slug)
@@ -232,11 +226,7 @@ class AddonManager(CoreSysAttributes):
on_condition=AddonsJobError,
)
async def update(
self,
slug: str,
backup: bool | None = False,
*,
validation_complete: asyncio.Event | None = None,
self, slug: str, backup: bool | None = False
) -> asyncio.Task | None:
"""Update add-on.
@@ -261,10 +251,6 @@ class AddonManager(CoreSysAttributes):
# Check if available, Maybe something have changed
store.validate_availability()
# If being run in the background, notify caller that validation has completed
if validation_complete:
validation_complete.set()
if backup:
await self.sys_backups.do_backup_partial(
name=f"addon_{addon.slug}_{addon.version}",
@@ -307,7 +293,7 @@ class AddonManager(CoreSysAttributes):
"Version changed, use Update instead Rebuild", _LOGGER.error
)
if not force and not addon.need_build:
raise AddonNotSupportedError(
raise AddonsNotSupportedError(
"Can't rebuild a image based add-on", _LOGGER.error
)

View File

@@ -89,12 +89,7 @@ from ..const import (
)
from ..coresys import CoreSys
from ..docker.const import Capabilities
from ..exceptions import (
AddonNotSupportedArchitectureError,
AddonNotSupportedError,
AddonNotSupportedHomeAssistantVersionError,
AddonNotSupportedMachineTypeError,
)
from ..exceptions import AddonsNotSupportedError
from ..jobs.const import JOB_GROUP_ADDON
from ..jobs.job_group import JobGroup
from ..utils import version_is_new_enough
@@ -685,8 +680,9 @@ class AddonModel(JobGroup, ABC):
"""Validate if addon is available for current system."""
# Architecture
if not self.sys_arch.is_supported(config[ATTR_ARCH]):
raise AddonNotSupportedArchitectureError(
logger, slug=self.slug, architectures=config[ATTR_ARCH]
raise AddonsNotSupportedError(
f"Add-on {self.slug} not supported on this platform, supported architectures: {', '.join(config[ATTR_ARCH])}",
logger,
)
# Machine / Hardware
@@ -694,8 +690,9 @@ class AddonModel(JobGroup, ABC):
if machine and (
f"!{self.sys_machine}" in machine or self.sys_machine not in machine
):
raise AddonNotSupportedMachineTypeError(
logger, slug=self.slug, machine_types=machine
raise AddonsNotSupportedError(
f"Add-on {self.slug} not supported on this machine, supported machine types: {', '.join(machine)}",
logger,
)
# Home Assistant
@@ -704,15 +701,16 @@ class AddonModel(JobGroup, ABC):
if version and not version_is_new_enough(
self.sys_homeassistant.version, version
):
raise AddonNotSupportedHomeAssistantVersionError(
logger, slug=self.slug, version=str(version)
raise AddonsNotSupportedError(
f"Add-on {self.slug} not supported on this system, requires Home Assistant version {version} or greater",
logger,
)
def _available(self, config) -> bool:
"""Return True if this add-on is available on this platform."""
try:
self._validate_availability(config)
except AddonNotSupportedError:
except AddonsNotSupportedError:
return False
return True

View File

@@ -266,23 +266,10 @@ def _migrate_addon_config(protocol=False):
volumes = []
for entry in config.get(ATTR_MAP, []):
if isinstance(entry, dict):
# Validate that dict entries have required 'type' field
if ATTR_TYPE not in entry:
_LOGGER.warning(
"Add-on config has invalid map entry missing 'type' field: %s. Skipping invalid entry for %s",
entry,
name,
)
continue
volumes.append(entry)
if isinstance(entry, str):
result = RE_VOLUME.match(entry)
if not result:
_LOGGER.warning(
"Add-on config has invalid map entry: %s. Skipping invalid entry for %s",
entry,
name,
)
continue
volumes.append(
{
@@ -291,8 +278,8 @@ def _migrate_addon_config(protocol=False):
}
)
# Always update config to clear potentially malformed ones
config[ATTR_MAP] = volumes
if volumes:
config[ATTR_MAP] = volumes
# 2023-10 "config" became "homeassistant" so /config can be used for addon's public config
if any(volume[ATTR_TYPE] == MappingType.CONFIG for volume in volumes):

View File

@@ -146,14 +146,6 @@ class RestAPI(CoreSysAttributes):
follow=True,
),
),
web.get(
f"{path}/logs/latest",
partial(
self._api_host.advanced_logs,
identifier=syslog_identifier,
latest=True,
),
),
web.get(
f"{path}/logs/boots/{{bootid}}",
partial(self._api_host.advanced_logs, identifier=syslog_identifier),
@@ -448,7 +440,6 @@ class RestAPI(CoreSysAttributes):
# is known and reported to the user using the resolution center.
await async_capture_exception(err)
kwargs.pop("follow", None) # Follow is not supported for Docker logs
kwargs.pop("latest", None) # Latest is not supported for Docker logs
return await api_supervisor.logs(*args, **kwargs)
self.webapp.add_routes(
@@ -458,10 +449,6 @@ class RestAPI(CoreSysAttributes):
"/supervisor/logs/follow",
partial(get_supervisor_logs, follow=True),
),
web.get(
"/supervisor/logs/latest",
partial(get_supervisor_logs, latest=True),
),
web.get("/supervisor/logs/boots/{bootid}", get_supervisor_logs),
web.get(
"/supervisor/logs/boots/{bootid}/follow",
@@ -574,10 +561,6 @@ class RestAPI(CoreSysAttributes):
"/addons/{addon}/logs/follow",
partial(get_addon_logs, follow=True),
),
web.get(
"/addons/{addon}/logs/latest",
partial(get_addon_logs, latest=True),
),
web.get("/addons/{addon}/logs/boots/{bootid}", get_addon_logs),
web.get(
"/addons/{addon}/logs/boots/{bootid}/follow",
@@ -752,10 +735,6 @@ class RestAPI(CoreSysAttributes):
"/store/addons/{addon}/documentation",
api_store.addons_addon_documentation,
),
web.get(
"/store/addons/{addon}/availability",
api_store.addons_addon_availability,
),
web.post(
"/store/addons/{addon}/install", api_store.addons_addon_install
),

View File

@@ -3,6 +3,7 @@
from __future__ import annotations
import asyncio
from collections.abc import Callable
import errno
from io import IOBase
import logging
@@ -45,9 +46,12 @@ from ..const import (
ATTR_TYPE,
ATTR_VERSION,
REQUEST_FROM,
BusEvent,
CoreState,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError, APIForbidden, APINotFound
from ..jobs import JobSchedulerOptions, SupervisorJob
from ..mounts.const import MountUsage
from ..resolution.const import UnhealthyReason
from .const import (
@@ -57,7 +61,7 @@ from .const import (
ATTR_LOCATIONS,
CONTENT_TYPE_TAR,
)
from .utils import api_process, api_validate, background_task
from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
@@ -285,6 +289,41 @@ class APIBackups(CoreSysAttributes):
f"Location {LOCATION_CLOUD_BACKUP} is only available for Home Assistant"
)
async def _background_backup_task(
self, backup_method: Callable, *args, **kwargs
) -> tuple[asyncio.Task, str]:
"""Start backup task in background and return task and job ID."""
event = asyncio.Event()
job, backup_task = cast(
tuple[SupervisorJob, asyncio.Task],
self.sys_jobs.schedule_job(
backup_method, JobSchedulerOptions(), *args, **kwargs
),
)
async def release_on_freeze(new_state: CoreState):
if new_state == CoreState.FREEZE:
event.set()
# Wait for system to get into freeze state before returning
# If the backup fails validation it will raise before getting there
listener = self.sys_bus.register_event(
BusEvent.SUPERVISOR_STATE_CHANGE, release_on_freeze
)
try:
event_task = self.sys_create_task(event.wait())
_, pending = await asyncio.wait(
(backup_task, event_task),
return_when=asyncio.FIRST_COMPLETED,
)
# It seems backup returned early (error or something), make sure to cancel
# the event task to avoid "Task was destroyed but it is pending!" errors.
if event_task in pending:
event_task.cancel()
return (backup_task, job.uuid)
finally:
self.sys_bus.remove_listener(listener)
@api_process
async def backup_full(self, request: web.Request):
"""Create full backup."""
@@ -303,8 +342,8 @@ class APIBackups(CoreSysAttributes):
body[ATTR_ADDITIONAL_LOCATIONS] = locations
background = body.pop(ATTR_BACKGROUND)
backup_task, job_id = await background_task(
self, self.sys_backups.do_backup_full, **body
backup_task, job_id = await self._background_backup_task(
self.sys_backups.do_backup_full, **body
)
if background and not backup_task.done():
@@ -339,8 +378,8 @@ class APIBackups(CoreSysAttributes):
body[ATTR_ADDONS] = list(self.sys_addons.local)
background = body.pop(ATTR_BACKGROUND)
backup_task, job_id = await background_task(
self, self.sys_backups.do_backup_partial, **body
backup_task, job_id = await self._background_backup_task(
self.sys_backups.do_backup_partial, **body
)
if background and not backup_task.done():
@@ -363,8 +402,8 @@ class APIBackups(CoreSysAttributes):
request, body.get(ATTR_LOCATION, backup.location)
)
background = body.pop(ATTR_BACKGROUND)
restore_task, job_id = await background_task(
self, self.sys_backups.do_restore_full, backup, **body
restore_task, job_id = await self._background_backup_task(
self.sys_backups.do_restore_full, backup, **body
)
if background and not restore_task.done() or await restore_task:
@@ -383,8 +422,8 @@ class APIBackups(CoreSysAttributes):
request, body.get(ATTR_LOCATION, backup.location)
)
background = body.pop(ATTR_BACKGROUND)
restore_task, job_id = await background_task(
self, self.sys_backups.do_restore_partial, backup, **body
restore_task, job_id = await self._background_backup_task(
self.sys_backups.do_restore_partial, backup, **body
)
if background and not restore_task.done() or await restore_task:

View File

@@ -20,7 +20,6 @@ from ..const import (
ATTR_CPU_PERCENT,
ATTR_IMAGE,
ATTR_IP_ADDRESS,
ATTR_JOB_ID,
ATTR_MACHINE,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_PERCENT,
@@ -38,8 +37,8 @@ from ..const import (
from ..coresys import CoreSysAttributes
from ..exceptions import APIDBMigrationInProgress, APIError
from ..validate import docker_image, network_port, version_tag
from .const import ATTR_BACKGROUND, ATTR_FORCE, ATTR_SAFE_MODE
from .utils import api_process, api_validate, background_task
from .const import ATTR_FORCE, ATTR_SAFE_MODE
from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
@@ -62,7 +61,6 @@ SCHEMA_UPDATE = vol.Schema(
{
vol.Optional(ATTR_VERSION): version_tag,
vol.Optional(ATTR_BACKUP): bool,
vol.Optional(ATTR_BACKGROUND, default=False): bool,
}
)
@@ -172,24 +170,18 @@ class APIHomeAssistant(CoreSysAttributes):
}
@api_process
async def update(self, request: web.Request) -> dict[str, str] | None:
async def update(self, request: web.Request) -> None:
"""Update Home Assistant."""
body = await api_validate(SCHEMA_UPDATE, request)
await self._check_offline_migration()
background = body[ATTR_BACKGROUND]
update_task, job_id = await background_task(
self,
self.sys_homeassistant.core.update,
version=body.get(ATTR_VERSION, self.sys_homeassistant.latest_version),
backup=body.get(ATTR_BACKUP),
await asyncio.shield(
self.sys_homeassistant.core.update(
version=body.get(ATTR_VERSION, self.sys_homeassistant.latest_version),
backup=body.get(ATTR_BACKUP),
)
)
if background and not update_task.done():
return {ATTR_JOB_ID: job_id}
return await update_task
@api_process
async def stop(self, request: web.Request) -> Awaitable[None]:
"""Stop Home Assistant."""

View File

@@ -2,13 +2,11 @@
import asyncio
from contextlib import suppress
import datetime
import logging
from typing import Any
from aiohttp import ClientConnectionResetError, ClientPayloadError, web
from aiohttp.hdrs import ACCEPT, RANGE
from docker.models.containers import Container
import voluptuous as vol
from voluptuous.error import CoerceInvalid
@@ -196,11 +194,7 @@ class APIHost(CoreSysAttributes):
return possible_offset
async def advanced_logs_handler(
self,
request: web.Request,
identifier: str | None = None,
follow: bool = False,
latest: bool = False,
self, request: web.Request, identifier: str | None = None, follow: bool = False
) -> web.StreamResponse:
"""Return systemd-journald logs."""
log_formatter = LogFormatter.PLAIN
@@ -219,17 +213,6 @@ class APIHost(CoreSysAttributes):
if follow:
params[PARAM_FOLLOW] = ""
since: datetime.datetime | None = None
if latest:
if not identifier:
raise APIError(
"Latest logs can only be fetched for a specific identifier."
)
if not (since := await self._get_container_start_time(identifier)):
raise APIError(
f"Cannot determine start time of {identifier}, is it a Docker container name?"
)
if ACCEPT in request.headers and request.headers[ACCEPT] not in [
CONTENT_TYPE_TEXT,
CONTENT_TYPE_X_LOG,
@@ -256,14 +239,8 @@ class APIHost(CoreSysAttributes):
# instead. Since this is really an edge case that doesn't matter much, we'll just
# return 2 lines at minimum.
lines = max(2, lines)
if since:
# realtime=[since]:[until][[:num_skip]:num_entries]
range_header = f"realtime={int(since.timestamp())}::0:{lines}"
else:
# entries=cursor[[:num_skip]:num_entries]
range_header = f"entries=:-{lines - 1}:{SYSTEMD_JOURNAL_GATEWAYD_LINES_MAX if follow else lines}"
elif since:
range_header = f"realtime={int(since.timestamp())}::0:{SYSTEMD_JOURNAL_GATEWAYD_LINES_MAX}"
# entries=cursor[[:num_skip]:num_entries]
range_header = f"entries=:-{lines - 1}:{SYSTEMD_JOURNAL_GATEWAYD_LINES_MAX if follow else lines}"
elif RANGE in request.headers:
range_header = request.headers[RANGE]
else:
@@ -309,14 +286,10 @@ class APIHost(CoreSysAttributes):
@api_process_raw(CONTENT_TYPE_TEXT, error_type=CONTENT_TYPE_TEXT)
async def advanced_logs(
self,
request: web.Request,
identifier: str | None = None,
follow: bool = False,
latest: bool = False,
self, request: web.Request, identifier: str | None = None, follow: bool = False
) -> web.StreamResponse:
"""Return systemd-journald logs. Wrapped as standard API handler."""
return await self.advanced_logs_handler(request, identifier, follow, latest)
return await self.advanced_logs_handler(request, identifier, follow)
@api_process
async def disk_usage(self, request: web.Request) -> dict:
@@ -363,24 +336,3 @@ class APIHost(CoreSysAttributes):
*known_paths,
],
}
async def _get_container_start_time(
self, identifier: str
) -> datetime.datetime | None:
"""Get container start time for the given syslog identifier."""
container: Container = self.sys_docker.containers.get(identifier)
if not container:
return None
if not (started_at := container.attrs.get("State", {}).get("StartedAt")):
return None
try:
return datetime.datetime.fromisoformat(started_at)
except ValueError:
_LOGGER.warning(
"Failed to parse StartedAt time of %s container, got: %s",
identifier,
started_at,
)
return None

View File

@@ -199,25 +199,21 @@ class APIIngress(CoreSysAttributes):
url = f"{url}?{request.query_string}"
# Start proxy
try:
_LOGGER.debug("Proxing WebSocket to %s, upstream url: %s", addon.slug, url)
async with self.sys_websession.ws_connect(
url,
headers=source_header,
protocols=req_protocols,
autoclose=False,
autoping=False,
) as ws_client:
# Proxy requests
await asyncio.wait(
[
self.sys_create_task(_websocket_forward(ws_server, ws_client)),
self.sys_create_task(_websocket_forward(ws_client, ws_server)),
],
return_when=asyncio.FIRST_COMPLETED,
)
except TimeoutError:
_LOGGER.warning("WebSocket proxy to %s timed out", addon.slug)
async with self.sys_websession.ws_connect(
url,
headers=source_header,
protocols=req_protocols,
autoclose=False,
autoping=False,
) as ws_client:
# Proxy requests
await asyncio.wait(
[
self.sys_create_task(_websocket_forward(ws_server, ws_client)),
self.sys_create_task(_websocket_forward(ws_client, ws_server)),
],
return_when=asyncio.FIRST_COMPLETED,
)
return ws_server
@@ -290,7 +286,6 @@ class APIIngress(CoreSysAttributes):
aiohttp.ClientError,
aiohttp.ClientPayloadError,
ConnectionResetError,
ConnectionError,
) as err:
_LOGGER.error("Stream error with %s: %s", url, err)
@@ -391,9 +386,9 @@ async def _websocket_forward(ws_from, ws_to):
elif msg.type == aiohttp.WSMsgType.BINARY:
await ws_to.send_bytes(msg.data)
elif msg.type == aiohttp.WSMsgType.PING:
await ws_to.ping(msg.data)
await ws_to.ping()
elif msg.type == aiohttp.WSMsgType.PONG:
await ws_to.pong(msg.data)
await ws_to.pong()
elif ws_to.closed:
await ws_to.close(code=ws_to.close_code, message=msg.extra)
except RuntimeError:

View File

@@ -26,9 +26,7 @@ from ..const import (
ATTR_IP6_PRIVACY,
ATTR_IPV4,
ATTR_IPV6,
ATTR_LLMNR,
ATTR_MAC,
ATTR_MDNS,
ATTR_METHOD,
ATTR_MODE,
ATTR_NAMESERVERS,
@@ -56,7 +54,6 @@ from ..host.configuration import (
Ip6Setting,
IpConfig,
IpSetting,
MulticastDnsMode,
VlanConfig,
WifiConfig,
)
@@ -100,8 +97,6 @@ SCHEMA_UPDATE = vol.Schema(
vol.Optional(ATTR_IPV6): _SCHEMA_IPV6_CONFIG,
vol.Optional(ATTR_WIFI): _SCHEMA_WIFI_CONFIG,
vol.Optional(ATTR_ENABLED): vol.Boolean(),
vol.Optional(ATTR_MDNS): vol.Coerce(MulticastDnsMode),
vol.Optional(ATTR_LLMNR): vol.Coerce(MulticastDnsMode),
}
)
@@ -165,8 +160,6 @@ def interface_struct(interface: Interface) -> dict[str, Any]:
else None,
ATTR_WIFI: wifi_struct(interface.wifi) if interface.wifi else None,
ATTR_VLAN: vlan_struct(interface.vlan) if interface.vlan else None,
ATTR_MDNS: interface.mdns,
ATTR_LLMNR: interface.llmnr,
}
@@ -267,10 +260,6 @@ class APINetwork(CoreSysAttributes):
)
elif key == ATTR_ENABLED:
interface.enabled = config
elif key == ATTR_MDNS:
interface.mdns = config
elif key == ATTR_LLMNR:
interface.llmnr = config
await asyncio.shield(self.sys_host.network.apply_changes(interface))
@@ -311,15 +300,6 @@ class APINetwork(CoreSysAttributes):
vlan_config = VlanConfig(vlan, interface.name)
mdns_mode = MulticastDnsMode.DEFAULT
llmnr_mode = MulticastDnsMode.DEFAULT
if ATTR_MDNS in body:
mdns_mode = body[ATTR_MDNS]
if ATTR_LLMNR in body:
llmnr_mode = body[ATTR_LLMNR]
ipv4_setting = None
if ATTR_IPV4 in body:
ipv4_setting = IpSetting(
@@ -358,7 +338,5 @@ class APINetwork(CoreSysAttributes):
ipv6_setting,
None,
vlan_config,
mdns=mdns_mode,
llmnr=llmnr_mode,
)
await asyncio.shield(self.sys_host.network.create_vlan(vlan_interface))

View File

@@ -1,6 +1,7 @@
"""Init file for Supervisor Home Assistant RESTful API."""
import asyncio
from collections.abc import Awaitable
from pathlib import Path
from typing import Any, cast
@@ -35,7 +36,6 @@ from ..const import (
ATTR_ICON,
ATTR_INGRESS,
ATTR_INSTALLED,
ATTR_JOB_ID,
ATTR_LOGO,
ATTR_LONG_DESCRIPTION,
ATTR_MAINTAINER,
@@ -57,13 +57,11 @@ from ..exceptions import APIError, APIForbidden, APINotFound
from ..store.addon import AddonStore
from ..store.repository import Repository
from ..store.validate import validate_repository
from .const import ATTR_BACKGROUND, CONTENT_TYPE_PNG, CONTENT_TYPE_TEXT
from .utils import background_task
from .const import CONTENT_TYPE_PNG, CONTENT_TYPE_TEXT
SCHEMA_UPDATE = vol.Schema(
{
vol.Optional(ATTR_BACKUP): bool,
vol.Optional(ATTR_BACKGROUND, default=False): bool,
}
)
@@ -71,12 +69,6 @@ SCHEMA_ADD_REPOSITORY = vol.Schema(
{vol.Required(ATTR_REPOSITORY): vol.All(str, validate_repository)}
)
SCHEMA_INSTALL = vol.Schema(
{
vol.Optional(ATTR_BACKGROUND, default=False): bool,
}
)
def _read_static_text_file(path: Path) -> Any:
"""Read in a static text file asset for API output.
@@ -225,45 +217,24 @@ class APIStore(CoreSysAttributes):
}
@api_process
async def addons_addon_install(self, request: web.Request) -> dict[str, str] | None:
def addons_addon_install(self, request: web.Request) -> Awaitable[None]:
"""Install add-on."""
addon = self._extract_addon(request)
body = await api_validate(SCHEMA_INSTALL, request)
background = body[ATTR_BACKGROUND]
install_task, job_id = await background_task(
self, self.sys_addons.install, addon.slug
)
if background and not install_task.done():
return {ATTR_JOB_ID: job_id}
return await install_task
return asyncio.shield(self.sys_addons.install(addon.slug))
@api_process
async def addons_addon_update(self, request: web.Request) -> dict[str, str] | None:
async def addons_addon_update(self, request: web.Request) -> None:
"""Update add-on."""
addon = self._extract_addon(request, installed=True)
if addon == request.get(REQUEST_FROM):
raise APIForbidden(f"Add-on {addon.slug} can't update itself!")
body = await api_validate(SCHEMA_UPDATE, request)
background = body[ATTR_BACKGROUND]
update_task, job_id = await background_task(
self,
self.sys_addons.update,
addon.slug,
backup=body.get(ATTR_BACKUP),
)
if background and not update_task.done():
return {ATTR_JOB_ID: job_id}
if start_task := await update_task:
if start_task := await asyncio.shield(
self.sys_addons.update(addon.slug, backup=body.get(ATTR_BACKUP))
):
await start_task
return None
@api_process
async def addons_addon_info(self, request: web.Request) -> dict[str, Any]:
@@ -326,12 +297,6 @@ class APIStore(CoreSysAttributes):
_read_static_text_file, addon.path_documentation
)
@api_process
async def addons_addon_availability(self, request: web.Request) -> None:
"""Check add-on availability for current system."""
addon = cast(AddonStore, self._extract_addon(request))
addon.validate_availability()
@api_process
async def repositories_list(self, request: web.Request) -> list[dict[str, Any]]:
"""Return all repositories."""

View File

@@ -1,9 +1,7 @@
"""Init file for Supervisor util for RESTful API."""
import asyncio
from collections.abc import Callable
import json
from typing import Any, cast
from typing import Any
from aiohttp import web
from aiohttp.hdrs import AUTHORIZATION
@@ -16,11 +14,8 @@ from ..const import (
HEADER_TOKEN,
HEADER_TOKEN_OLD,
JSON_DATA,
JSON_ERROR_KEY,
JSON_EXTRA_FIELDS,
JSON_JOB_ID,
JSON_MESSAGE,
JSON_MESSAGE_TEMPLATE,
JSON_RESULT,
REQUEST_FROM,
RESULT_ERROR,
@@ -28,7 +23,6 @@ from ..const import (
)
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import APIError, BackupFileNotFoundError, DockerAPIError, HassioError
from ..jobs import JobSchedulerOptions, SupervisorJob
from ..utils import check_exception_chain, get_message_from_exception_chain
from ..utils.json import json_dumps, json_loads as json_loads_util
from ..utils.log_format import format_message
@@ -139,11 +133,10 @@ def api_process_raw(content, *, error_type=None):
def api_return_error(
error: HassioError | None = None,
error: Exception | None = None,
message: str | None = None,
error_type: str | None = None,
status: int = 400,
*,
job_id: str | None = None,
) -> web.Response:
"""Return an API error message."""
@@ -162,18 +155,12 @@ def api_return_error(
body=message.encode(), content_type=error_type, status=status
)
case _:
result: dict[str, Any] = {
result = {
JSON_RESULT: RESULT_ERROR,
JSON_MESSAGE: message,
}
if job_id:
result[JSON_JOB_ID] = job_id
if error and error.error_key:
result[JSON_ERROR_KEY] = error.error_key
if error and error.message_template:
result[JSON_MESSAGE_TEMPLATE] = error.message_template
if error and error.extra_fields:
result[JSON_EXTRA_FIELDS] = error.extra_fields
return web.json_response(
result,
@@ -211,47 +198,3 @@ async def api_validate(
data_validated[origin_value] = data[origin_value]
return data_validated
async def background_task(
coresys_obj: CoreSysAttributes,
task_method: Callable,
*args,
**kwargs,
) -> tuple[asyncio.Task, str]:
"""Start task in background and return task and job ID.
Args:
coresys_obj: Instance that accesses coresys data using CoreSysAttributes
task_method: The method to execute in the background. Must include a keyword arg 'validation_complete' of type asyncio.Event. Should set it after any initial validation has completed
*args: Arguments to pass to task_method
**kwargs: Keyword arguments to pass to task_method
Returns:
Tuple of (task, job_id)
"""
event = asyncio.Event()
job, task = cast(
tuple[SupervisorJob, asyncio.Task],
coresys_obj.sys_jobs.schedule_job(
task_method,
JobSchedulerOptions(),
*args,
validation_complete=event,
**kwargs,
),
)
# Wait for provided event before returning
# If the task fails validation it should raise before getting there
event_task = coresys_obj.sys_create_task(event.wait())
_, pending = await asyncio.wait(
(task, event_task),
return_when=asyncio.FIRST_COMPLETED,
)
# It seems task returned early (error or something), make sure to cancel
# the event task to avoid "Task was destroyed but it is pending!" errors.
if event_task in pending:
event_task.cancel()
return (task, job.uuid)

View File

@@ -598,7 +598,6 @@ class BackupManager(FileConfiguration, JobGroup):
homeassistant_exclude_database: bool | None = None,
extra: dict | None = None,
additional_locations: list[LOCATION_TYPE] | None = None,
validation_complete: asyncio.Event | None = None,
) -> Backup | None:
"""Create a full backup."""
await self._check_location(location)
@@ -615,10 +614,6 @@ class BackupManager(FileConfiguration, JobGroup):
name, filename, BackupType.FULL, password, compressed, location, extra
)
# If being run in the background, notify caller that validation has completed
if validation_complete:
validation_complete.set()
_LOGGER.info("Creating new full backup with slug %s", new_backup.slug)
backup = await self._do_backup(
new_backup,
@@ -653,7 +648,6 @@ class BackupManager(FileConfiguration, JobGroup):
homeassistant_exclude_database: bool | None = None,
extra: dict | None = None,
additional_locations: list[LOCATION_TYPE] | None = None,
validation_complete: asyncio.Event | None = None,
) -> Backup | None:
"""Create a partial backup."""
await self._check_location(location)
@@ -690,10 +684,6 @@ class BackupManager(FileConfiguration, JobGroup):
continue
_LOGGER.warning("Add-on %s not found/installed", addon_slug)
# If being run in the background, notify caller that validation has completed
if validation_complete:
validation_complete.set()
backup = await self._do_backup(
new_backup,
addon_list,
@@ -827,10 +817,8 @@ class BackupManager(FileConfiguration, JobGroup):
async def do_restore_full(
self,
backup: Backup,
*,
password: str | None = None,
location: str | None | type[DEFAULT] = DEFAULT,
validation_complete: asyncio.Event | None = None,
) -> bool:
"""Restore a backup."""
# Add backup ID to job
@@ -850,10 +838,6 @@ class BackupManager(FileConfiguration, JobGroup):
_LOGGER.error,
)
# If being run in the background, notify caller that validation has completed
if validation_complete:
validation_complete.set()
_LOGGER.info("Full-Restore %s start", backup.slug)
await self.sys_core.set_state(CoreState.FREEZE)
@@ -892,13 +876,11 @@ class BackupManager(FileConfiguration, JobGroup):
async def do_restore_partial(
self,
backup: Backup,
*,
homeassistant: bool = False,
addons: list[str] | None = None,
folders: list[str] | None = None,
password: str | None = None,
location: str | None | type[DEFAULT] = DEFAULT,
validation_complete: asyncio.Event | None = None,
) -> bool:
"""Restore a backup."""
# Add backup ID to job
@@ -926,10 +908,6 @@ class BackupManager(FileConfiguration, JobGroup):
_LOGGER.error,
)
# If being run in the background, notify caller that validation has completed
if validation_complete:
validation_complete.set()
_LOGGER.info("Partial-Restore %s start", backup.slug)
await self.sys_core.set_state(CoreState.FREEZE)

View File

@@ -76,9 +76,6 @@ JSON_DATA = "data"
JSON_MESSAGE = "message"
JSON_RESULT = "result"
JSON_JOB_ID = "job_id"
JSON_ERROR_KEY = "error_key"
JSON_MESSAGE_TEMPLATE = "message_template"
JSON_EXTRA_FIELDS = "extra_fields"
RESULT_ERROR = "error"
RESULT_OK = "ok"
@@ -202,7 +199,6 @@ ATTR_HASSIO_API = "hassio_api"
ATTR_HASSIO_ROLE = "hassio_role"
ATTR_HASSOS = "hassos"
ATTR_HASSOS_UNRESTRICTED = "hassos_unrestricted"
ATTR_HASSOS_UPGRADE = "hassos_upgrade"
ATTR_HEALTHY = "healthy"
ATTR_HEARTBEAT_LED = "heartbeat_led"
ATTR_HOMEASSISTANT = "homeassistant"
@@ -246,7 +242,6 @@ ATTR_KERNEL_MODULES = "kernel_modules"
ATTR_LABELS = "labels"
ATTR_LAST_BOOT = "last_boot"
ATTR_LEGACY = "legacy"
ATTR_LLMNR = "llmnr"
ATTR_LOCALS = "locals"
ATTR_LOCATION = "location"
ATTR_LOGGING = "logging"
@@ -257,7 +252,6 @@ ATTR_MACHINE = "machine"
ATTR_MACHINE_ID = "machine_id"
ATTR_MAINTAINER = "maintainer"
ATTR_MAP = "map"
ATTR_MDNS = "mdns"
ATTR_MEMORY_LIMIT = "memory_limit"
ATTR_MEMORY_PERCENT = "memory_percent"
ATTR_MEMORY_USAGE = "memory_usage"

View File

@@ -196,20 +196,30 @@ class Core(CoreSysAttributes):
self.sys_resolution.add_unhealthy_reason(UnhealthyReason.SETUP)
await async_capture_exception(err)
# Set OS Agent diagnostics if needed
if (
self.sys_config.diagnostics is not None
and self.sys_dbus.agent.diagnostics != self.sys_config.diagnostics
and not self.sys_dev
and self.supported
):
try:
await self.sys_dbus.agent.set_diagnostics(self.sys_config.diagnostics)
except Exception as err: # pylint: disable=broad-except
_LOGGER.warning(
"Could not set diagnostics to %s due to %s",
self.sys_config.diagnostics,
err,
)
await async_capture_exception(err)
# Evaluate the system
await self.sys_resolution.evaluate.evaluate_system()
async def start(self) -> None:
"""Start Supervisor orchestration."""
await self.set_state(CoreState.STARTUP)
# Set OS Agent diagnostics if needed
if (
self.sys_dbus.agent.is_connected
and self.sys_config.diagnostics is not None
and self.sys_dbus.agent.diagnostics != self.sys_config.diagnostics
and self.supported
):
_LOGGER.debug("Set OS Agent diagnostics to %s", self.sys_config.diagnostics)
await self.sys_dbus.agent.set_diagnostics(self.sys_config.diagnostics)
# Check if system is healthy
if not self.supported:
_LOGGER.warning("System running in a unsupported environment!")

View File

@@ -253,7 +253,7 @@ class ConnectionType(StrEnum):
class ConnectionStateType(IntEnum):
"""Connection states.
https://networkmanager.dev/docs/api/latest/nm-dbus-types.html#NMActiveConnectionState
https://developer.gnome.org/NetworkManager/stable/nm-dbus-types.html#NMActiveConnectionState
"""
UNKNOWN = 0
@@ -266,7 +266,7 @@ class ConnectionStateType(IntEnum):
class ConnectionStateFlags(IntEnum):
"""Connection state flags.
https://networkmanager.dev/docs/api/latest/nm-dbus-types.html#NMActivationStateFlags
https://developer-old.gnome.org/NetworkManager/stable/nm-dbus-types.html#NMActivationStateFlags
"""
NONE = 0
@@ -283,7 +283,7 @@ class ConnectionStateFlags(IntEnum):
class ConnectivityState(IntEnum):
"""Network connectvity.
https://networkmanager.dev/docs/api/latest/nm-dbus-types.html#NMConnectivityState
https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMConnectivityState
"""
CONNECTIVITY_UNKNOWN = 0
@@ -296,7 +296,7 @@ class ConnectivityState(IntEnum):
class DeviceType(IntEnum):
"""Device types.
https://networkmanager.dev/docs/api/latest/nm-dbus-types.html#NMDeviceType
https://developer.gnome.org/NetworkManager/stable/nm-dbus-types.html#NMDeviceType
"""
UNKNOWN = 0
@@ -333,15 +333,6 @@ class MulticastProtocolEnabled(StrEnum):
RESOLVE = "resolve"
class MulticastDnsValue(IntEnum):
"""Connection MulticastDNS (mdns/llmnr) values."""
DEFAULT = -1
OFF = 0
RESOLVE = 1
ANNOUNCE = 2
class DNSOverTLSEnabled(StrEnum):
"""DNS over TLS enabled."""

View File

@@ -44,7 +44,7 @@ MINIMAL_VERSION = AwesomeVersion("1.14.6")
class NetworkManager(DBusInterfaceProxy):
"""Handle D-Bus interface for Network Manager.
https://networkmanager.dev/docs/api/latest/gdbus-org.freedesktop.NetworkManager.html
https://developer.gnome.org/NetworkManager/stable/gdbus-org.freedesktop.NetworkManager.html
"""
name: str = DBUS_NAME_NM

View File

@@ -15,7 +15,7 @@ from ..interface import DBusInterfaceProxy, dbus_property
class NetworkWirelessAP(DBusInterfaceProxy):
"""NetworkWireless AP object for Network Manager.
https://networkmanager.dev/docs/api/latest/gdbus-org.freedesktop.NetworkManager.AccessPoint.html
https://developer.gnome.org/NetworkManager/stable/gdbus-org.freedesktop.NetworkManager.AccessPoint.html
"""
bus_name: str = DBUS_NAME_NM

View File

@@ -24,8 +24,6 @@ class ConnectionProperties:
uuid: str | None
type: str | None
interface_name: str | None
mdns: int | None
llmnr: int | None
@dataclass(slots=True)

View File

@@ -27,7 +27,7 @@ from .ip_configuration import IpConfiguration
class NetworkConnection(DBusInterfaceProxy):
"""Active network connection object for Network Manager.
https://networkmanager.dev/docs/api/latest/gdbus-org.freedesktop.NetworkManager.Connection.Active.html
https://developer.gnome.org/NetworkManager/stable/gdbus-org.freedesktop.NetworkManager.Connection.Active.html
"""
bus_name: str = DBUS_NAME_NM

View File

@@ -32,7 +32,7 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
class NetworkManagerDNS(DBusInterfaceProxy):
"""Handle D-Bus interface for NM DnsManager.
https://networkmanager.dev/docs/api/latest/gdbus-org.freedesktop.NetworkManager.DnsManager.html
https://developer.gnome.org/NetworkManager/stable/gdbus-org.freedesktop.NetworkManager.DnsManager.html
"""
bus_name: str = DBUS_NAME_NM

View File

@@ -27,7 +27,7 @@ from .wireless import NetworkWireless
class NetworkInterface(DBusInterfaceProxy):
"""NetworkInterface object represents Network Manager Device objects.
https://networkmanager.dev/docs/api/latest/gdbus-org.freedesktop.NetworkManager.Device.html
https://developer.gnome.org/NetworkManager/stable/gdbus-org.freedesktop.NetworkManager.Device.html
"""
bus_name: str = DBUS_NAME_NM

View File

@@ -6,7 +6,7 @@ from typing import Any
from dbus_fast import Variant
from dbus_fast.aio.message_bus import MessageBus
from ...const import DBUS_NAME_NM, MulticastDnsValue
from ...const import DBUS_NAME_NM
from ...interface import DBusInterface
from ...utils import dbus_connected
from ..configuration import (
@@ -225,7 +225,7 @@ class NetworkSetting(DBusInterface):
data = await self.get_settings()
# Get configuration settings we care about
# See: https://networkmanager.dev/docs/api/latest/nm-settings-dbus.html
# See: https://developer-old.gnome.org/NetworkManager/stable/ch01.html
if CONF_ATTR_CONNECTION in data:
self._connection = ConnectionProperties(
id=data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_ID),
@@ -234,12 +234,6 @@ class NetworkSetting(DBusInterface):
interface_name=data[CONF_ATTR_CONNECTION].get(
CONF_ATTR_CONNECTION_INTERFACE_NAME
),
mdns=data[CONF_ATTR_CONNECTION].get(
CONF_ATTR_CONNECTION_MDNS, MulticastDnsValue.DEFAULT.value
),
llmnr=data[CONF_ATTR_CONNECTION].get(
CONF_ATTR_CONNECTION_LLMNR, MulticastDnsValue.DEFAULT.value
),
)
if CONF_ATTR_802_ETHERNET in data:

View File

@@ -14,9 +14,7 @@ from ....host.const import (
InterfaceIp6Privacy,
InterfaceMethod,
InterfaceType,
MulticastDnsMode,
)
from ...const import MulticastDnsValue
from .. import NetworkManager
from . import (
CONF_ATTR_802_ETHERNET,
@@ -60,14 +58,6 @@ if TYPE_CHECKING:
from ....host.configuration import Interface
MULTICAST_DNS_MODE_VALUE_MAPPING = {
MulticastDnsMode.DEFAULT: MulticastDnsValue.DEFAULT,
MulticastDnsMode.OFF: MulticastDnsValue.OFF,
MulticastDnsMode.RESOLVE: MulticastDnsValue.RESOLVE,
MulticastDnsMode.ANNOUNCE: MulticastDnsValue.ANNOUNCE,
}
def _get_ipv4_connection_settings(ipv4setting: IpSetting | None) -> dict:
ipv4 = {}
if not ipv4setting or ipv4setting.method == InterfaceMethod.AUTO:
@@ -173,13 +163,6 @@ def _get_ipv6_connection_settings(
return ipv6
def _map_mdns_setting(mode: MulticastDnsMode | None) -> MulticastDnsValue:
if mode is None:
return MulticastDnsValue.DEFAULT
return MULTICAST_DNS_MODE_VALUE_MAPPING.get(mode, MulticastDnsValue.DEFAULT)
def get_connection_from_interface(
interface: Interface,
network_manager: NetworkManager,
@@ -206,16 +189,13 @@ def get_connection_from_interface(
if not uuid:
uuid = str(uuid4())
llmnr = _map_mdns_setting(interface.llmnr)
mdns = _map_mdns_setting(interface.mdns)
conn: dict[str, dict[str, Variant]] = {
CONF_ATTR_CONNECTION: {
CONF_ATTR_CONNECTION_ID: Variant("s", name),
CONF_ATTR_CONNECTION_UUID: Variant("s", uuid),
CONF_ATTR_CONNECTION_TYPE: Variant("s", iftype),
CONF_ATTR_CONNECTION_LLMNR: Variant("i", int(llmnr)),
CONF_ATTR_CONNECTION_MDNS: Variant("i", int(mdns)),
CONF_ATTR_CONNECTION_LLMNR: Variant("i", 2),
CONF_ATTR_CONNECTION_MDNS: Variant("i", 2),
CONF_ATTR_CONNECTION_AUTOCONNECT: Variant("b", True),
},
}

View File

@@ -17,7 +17,7 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
class NetworkManagerSettings(DBusInterface):
"""Handle D-Bus interface for Network Manager Connection Settings Profile Manager.
https://networkmanager.dev/docs/api/latest/gdbus-org.freedesktop.NetworkManager.Settings.html
https://developer.gnome.org/NetworkManager/stable/gdbus-org.freedesktop.NetworkManager.Settings.html
"""
bus_name: str = DBUS_NAME_NM

View File

@@ -21,7 +21,7 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
class NetworkWireless(DBusInterfaceProxy):
"""Wireless object for Network Manager.
https://networkmanager.dev/docs/api/latest/gdbus-org.freedesktop.NetworkManager.Device.Wireless.html
https://developer.gnome.org/NetworkManager/stable/gdbus-org.freedesktop.NetworkManager.Device.Wireless.html
"""
bus_name: str = DBUS_NAME_NM

View File

@@ -1,20 +1,15 @@
"""Docker constants."""
from __future__ import annotations
from contextlib import suppress
from enum import Enum, StrEnum
from functools import total_ordering
from pathlib import PurePath
import re
from typing import cast
from typing import Self, cast
from docker.types import Mount
from ..const import MACHINE_ID
RE_RETRYING_DOWNLOAD_STATUS = re.compile(r"Retrying in \d+ seconds?")
class Capabilities(StrEnum):
"""Linux Capabilities."""
@@ -84,7 +79,6 @@ class PullImageLayerStage(Enum):
"""
PULLING_FS_LAYER = 1, "Pulling fs layer"
RETRYING_DOWNLOAD = 2, "Retrying download"
DOWNLOADING = 2, "Downloading"
VERIFYING_CHECKSUM = 3, "Verifying Checksum"
DOWNLOAD_COMPLETE = 4, "Download complete"
@@ -113,16 +107,11 @@ class PullImageLayerStage(Enum):
return hash(self.status)
@classmethod
def from_status(cls, status: str) -> PullImageLayerStage | None:
def from_status(cls, status: str) -> Self | None:
"""Return stage instance from pull log status."""
for i in cls:
if i.status == status:
return i
# This one includes number of seconds until download so its not constant
if RE_RETRYING_DOWNLOAD_STATUS.match(status):
return cls.RETRYING_DOWNLOAD
return None

View File

@@ -291,10 +291,8 @@ class DockerInterface(JobGroup, ABC):
progress = 50
case PullImageLayerStage.PULL_COMPLETE:
progress = 100
case PullImageLayerStage.RETRYING_DOWNLOAD:
progress = 0
if stage != PullImageLayerStage.RETRYING_DOWNLOAD and progress < job.progress:
if progress < job.progress:
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for job {job.uuid} that implied progress was {progress} but current progress is {job.progress}, skipping",
_LOGGER.debug,
@@ -302,7 +300,7 @@ class DockerInterface(JobGroup, ABC):
# Our filters have all passed. Time to update the job
# Only downloading and extracting have progress details. Use that to set extra
# We'll leave it around on later stages as the total bytes may be useful after that stage
# We'll leave it around on other stages as the total bytes may be useful after that stage
if (
stage in {PullImageLayerStage.DOWNLOADING, PullImageLayerStage.EXTRACTING}
and reference.progress_detail
@@ -320,9 +318,6 @@ class DockerInterface(JobGroup, ABC):
progress=progress,
stage=stage.status,
done=stage == PullImageLayerStage.PULL_COMPLETE,
extra=None
if stage == PullImageLayerStage.RETRYING_DOWNLOAD
else job.extra,
)
@Job(

View File

@@ -1,32 +1,17 @@
"""Core Exceptions."""
from collections.abc import Callable
from typing import Any
class HassioError(Exception):
"""Root exception."""
error_key: str | None = None
message_template: str | None = None
def __init__(
self,
message: str | None = None,
logger: Callable[..., None] | None = None,
*,
extra_fields: dict[str, Any] | None = None,
) -> None:
"""Raise & log."""
self.extra_fields = extra_fields or {}
if not message and self.message_template:
message = (
self.message_template.format(**self.extra_fields)
if self.extra_fields
else self.message_template
)
if logger is not None and message is not None:
logger(message)
@@ -250,71 +235,8 @@ class AddonConfigurationError(AddonsError):
"""Error with add-on configuration."""
class AddonNotSupportedError(HassioNotSupportedError):
"""Addon doesn't support a function."""
class AddonNotSupportedArchitectureError(AddonNotSupportedError):
"""Addon does not support system due to architecture."""
error_key = "addon_not_supported_architecture_error"
message_template = "Add-on {slug} not supported on this platform, supported architectures: {architectures}"
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
slug: str,
architectures: list[str],
) -> None:
"""Initialize exception."""
super().__init__(
None,
logger,
extra_fields={"slug": slug, "architectures": ", ".join(architectures)},
)
class AddonNotSupportedMachineTypeError(AddonNotSupportedError):
"""Addon does not support system due to machine type."""
error_key = "addon_not_supported_machine_type_error"
message_template = "Add-on {slug} not supported on this machine, supported machine types: {machine_types}"
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
slug: str,
machine_types: list[str],
) -> None:
"""Initialize exception."""
super().__init__(
None,
logger,
extra_fields={"slug": slug, "machine_types": ", ".join(machine_types)},
)
class AddonNotSupportedHomeAssistantVersionError(AddonNotSupportedError):
"""Addon does not support system due to Home Assistant version."""
error_key = "addon_not_supported_home_assistant_version_error"
message_template = "Add-on {slug} not supported on this system, requires Home Assistant version {version} or greater"
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
slug: str,
version: str,
) -> None:
"""Initialize exception."""
super().__init__(
None,
logger,
extra_fields={"slug": slug, "version": version},
)
class AddonsNotSupportedError(HassioNotSupportedError):
"""Addons don't support a function."""
class AddonsJobError(AddonsError, JobException):
@@ -397,17 +319,10 @@ class APIError(HassioError, RuntimeError):
self,
message: str | None = None,
logger: Callable[..., None] | None = None,
*,
job_id: str | None = None,
error: HassioError | None = None,
) -> None:
"""Raise & log, optionally with job."""
# Allow these to be set from another error here since APIErrors essentially wrap others to add a status
self.error_key = error.error_key if error else None
self.message_template = error.message_template if error else None
super().__init__(
message, logger, extra_fields=error.extra_fields if error else None
)
super().__init__(message, logger)
self.job_id = job_id

View File

@@ -229,7 +229,6 @@ class HomeAssistantCore(JobGroup):
self,
version: AwesomeVersion | None = None,
backup: bool | None = False,
validation_complete: asyncio.Event | None = None,
) -> None:
"""Update HomeAssistant version."""
to_version = version or self.sys_homeassistant.latest_version
@@ -249,10 +248,6 @@ class HomeAssistantCore(JobGroup):
f"Version {to_version!s} is already installed", _LOGGER.warning
)
# If being run in the background, notify caller that validation has completed
if validation_complete:
validation_complete.set()
if backup:
await self.sys_backups.do_backup_partial(
name=f"core_{self.instance.version}",

View File

@@ -12,7 +12,6 @@ from ..dbus.const import (
InterfaceAddrGenMode as NMInterfaceAddrGenMode,
InterfaceIp6Privacy as NMInterfaceIp6Privacy,
InterfaceMethod as NMInterfaceMethod,
MulticastDnsValue,
)
from ..dbus.network.connection import NetworkConnection
from ..dbus.network.interface import NetworkInterface
@@ -22,19 +21,11 @@ from .const import (
InterfaceIp6Privacy,
InterfaceMethod,
InterfaceType,
MulticastDnsMode,
WifiMode,
)
_LOGGER: logging.Logger = logging.getLogger(__name__)
_MULTICAST_DNS_VALUE_MODE_MAPPING: dict[int, MulticastDnsMode] = {
MulticastDnsValue.DEFAULT.value: MulticastDnsMode.DEFAULT,
MulticastDnsValue.OFF.value: MulticastDnsMode.OFF,
MulticastDnsValue.RESOLVE.value: MulticastDnsMode.RESOLVE,
MulticastDnsValue.ANNOUNCE.value: MulticastDnsMode.ANNOUNCE,
}
@dataclass(slots=True)
class AccessPoint:
@@ -116,8 +107,6 @@ class Interface:
ipv6setting: Ip6Setting | None
wifi: WifiConfig | None
vlan: VlanConfig | None
mdns: MulticastDnsMode | None
llmnr: MulticastDnsMode | None
def equals_dbus_interface(self, inet: NetworkInterface) -> bool:
"""Return true if this represents the dbus interface."""
@@ -209,13 +198,6 @@ class Interface:
and ConnectionStateFlags.IP6_READY in inet.connection.state_flags
)
if inet.settings and inet.settings.connection:
mdns = inet.settings.connection.mdns
llmnr = inet.settings.connection.llmnr
else:
mdns = None
llmnr = None
return Interface(
name=inet.interface_name,
mac=inet.hw_address,
@@ -252,8 +234,6 @@ class Interface:
ipv6setting=ipv6_setting,
wifi=Interface._map_nm_wifi(inet),
vlan=Interface._map_nm_vlan(inet),
mdns=Interface._map_nm_multicast_dns(mdns),
llmnr=Interface._map_nm_multicast_dns(llmnr),
)
@staticmethod
@@ -360,10 +340,3 @@ class Interface:
return None
return VlanConfig(inet.settings.vlan.id, inet.settings.vlan.parent)
@staticmethod
def _map_nm_multicast_dns(mode: int | None) -> MulticastDnsMode | None:
if mode is None:
return None
return _MULTICAST_DNS_VALUE_MODE_MAPPING.get(mode)

View File

@@ -89,12 +89,3 @@ class LogFormatter(StrEnum):
PLAIN = "plain"
VERBOSE = "verbose"
class MulticastDnsMode(StrEnum):
"""Multicast DNS (MDNS/LLMNR) mode."""
DEFAULT = "default"
OFF = "off"
RESOLVE = "resolve"
ANNOUNCE = "announce"

View File

@@ -91,7 +91,7 @@ class SystemControl(CoreSysAttributes):
if (
self.coresys.os.available
and self.coresys.os.version is not None
and self.sys_os.version >= AwesomeVersion("16.2.dev20250814")
and self.sys_os.version >= AwesomeVersion("16.2.dev0")
):
_LOGGER.info("Setting host timezone: %s", timezone)
await self.sys_dbus.timedate.set_timezone(timezone)

View File

@@ -7,6 +7,7 @@ from contextvars import Context, ContextVar, Token
from dataclasses import dataclass
from datetime import datetime
import logging
import math
from typing import Any, Self
from uuid import uuid4
@@ -97,6 +98,7 @@ class SupervisorJob:
default=0,
validator=[ge(0), le(100), _invalid_if_done],
on_setattr=_on_change,
converter=lambda val: math.floor(val * 10) / 10,
)
stage: str | None = field(
default=None, validator=[_invalid_if_done], on_setattr=_on_change
@@ -117,7 +119,7 @@ class SupervisorJob:
"name": self.name,
"reference": self.reference,
"uuid": self.uuid,
"progress": round(self.progress, 1),
"progress": self.progress,
"stage": self.stage,
"done": self.done,
"parent_id": self.parent_id,

View File

@@ -215,7 +215,7 @@ class ResolutionManager(FileConfiguration, CoreSysAttributes):
async def load(self):
"""Load the resoulution manager."""
# Initial healthcheck check
# Initial healthcheck when the manager is loaded
await self.healthcheck()
# Schedule the healthcheck

View File

@@ -17,8 +17,8 @@ from .const import (
ATTR_CHANNEL,
ATTR_CLI,
ATTR_DNS,
ATTR_HASSOS,
ATTR_HASSOS_UNRESTRICTED,
ATTR_HASSOS_UPGRADE,
ATTR_HOMEASSISTANT,
ATTR_IMAGE,
ATTR_MULTICAST,
@@ -93,46 +93,13 @@ class Updater(FileConfiguration, CoreSysAttributes):
@property
def version_hassos(self) -> AwesomeVersion | None:
"""Return latest version of HassOS."""
upgrade_map = self.upgrade_map_hassos
unrestricted = self.version_hassos_unrestricted
# If no upgrade map exists, fall back to unrestricted version
if not upgrade_map:
return unrestricted
# If we have no unrestricted version or no current OS version, return unrestricted
if (
not unrestricted
or not self.sys_os.version
or self.sys_os.version.major is None
):
return unrestricted
current_major = str(self.sys_os.version.major)
# Check if there's an upgrade path for current major version
if current_major in upgrade_map:
last_in_major = AwesomeVersion(upgrade_map[current_major])
# If we're not at the last version in our major, upgrade to that first
if self.sys_os.version != last_in_major:
return last_in_major
# If we are at the last version in our major, check for next major
next_major = str(int(self.sys_os.version.major) + 1)
if next_major in upgrade_map:
return AwesomeVersion(upgrade_map[next_major])
# Fall back to unrestricted version
return unrestricted
return self._data.get(ATTR_HASSOS)
@property
def version_hassos_unrestricted(self) -> AwesomeVersion | None:
"""Return latest version of HassOS ignoring upgrade restrictions."""
return self._data.get(ATTR_HASSOS_UNRESTRICTED)
@property
def upgrade_map_hassos(self) -> dict[str, str] | None:
"""Return HassOS upgrade map."""
return self._data.get(ATTR_HASSOS_UPGRADE)
@property
def version_cli(self) -> AwesomeVersion | None:
"""Return latest version of CLI."""
@@ -324,10 +291,18 @@ class Updater(FileConfiguration, CoreSysAttributes):
if self.sys_os.board:
self._data[ATTR_OTA] = data["ota"]
if version := data["hassos"].get(self.sys_os.board):
self._data[ATTR_HASSOS_UNRESTRICTED] = AwesomeVersion(version)
# Store the upgrade map for persistent access
self._data[ATTR_HASSOS_UPGRADE] = data.get("hassos-upgrade", {})
self._data[ATTR_HASSOS_UNRESTRICTED] = version
events.append("os")
upgrade_map = data.get("hassos-upgrade", {})
if last_in_major := upgrade_map.get(str(self.sys_os.version.major)):
if self.sys_os.version != AwesomeVersion(last_in_major):
version = last_in_major
elif last_in_next_major := upgrade_map.get(
str(int(self.sys_os.version.major) + 1)
):
version = last_in_next_major
self._data[ATTR_HASSOS] = AwesomeVersion(version)
else:
_LOGGER.warning(
"Board '%s' not found in version file. No OS updates.",

View File

@@ -24,7 +24,6 @@ from .const import (
ATTR_FORCE_SECURITY,
ATTR_HASSOS,
ATTR_HASSOS_UNRESTRICTED,
ATTR_HASSOS_UPGRADE,
ATTR_HOMEASSISTANT,
ATTR_ID,
ATTR_IMAGE,
@@ -130,9 +129,6 @@ SCHEMA_UPDATER_CONFIG = vol.Schema(
vol.Optional(ATTR_SUPERVISOR): version_tag,
vol.Optional(ATTR_HASSOS): version_tag,
vol.Optional(ATTR_HASSOS_UNRESTRICTED): version_tag,
vol.Optional(ATTR_HASSOS_UPGRADE): vol.Schema(
{vol.Extra: version_tag}, extra=vol.ALLOW_EXTRA
),
vol.Optional(ATTR_CLI): version_tag,
vol.Optional(ATTR_DNS): version_tag,
vol.Optional(ATTR_AUDIO): version_tag,

View File

@@ -140,46 +140,6 @@ def test_valid_map():
vd.SCHEMA_ADDON_CONFIG(config)
def test_malformed_map_entries():
"""Test that malformed map entries are handled gracefully (issue #6124)."""
config = load_json_fixture("basic-addon-config.json")
# Test case 1: Empty dict in map (should be skipped with warning)
config["map"] = [{}]
valid_config = vd.SCHEMA_ADDON_CONFIG(config)
assert valid_config["map"] == []
# Test case 2: Dict missing required 'type' field (should be skipped with warning)
config["map"] = [{"read_only": False, "path": "/custom"}]
valid_config = vd.SCHEMA_ADDON_CONFIG(config)
assert valid_config["map"] == []
# Test case 3: Invalid string format that doesn't match regex
config["map"] = ["invalid_format", "not:a:valid:mapping", "share:invalid_mode"]
valid_config = vd.SCHEMA_ADDON_CONFIG(config)
assert valid_config["map"] == []
# Test case 4: Mix of valid and invalid entries (invalid should be filtered out)
config["map"] = [
"share:rw", # Valid string format
"invalid_string", # Invalid string format
{}, # Invalid empty dict
{"type": "config", "read_only": True}, # Valid dict format
{"read_only": False}, # Invalid - missing type
]
valid_config = vd.SCHEMA_ADDON_CONFIG(config)
# Should only keep the valid entries
assert len(valid_config["map"]) == 2
assert any(entry["type"] == "share" for entry in valid_config["map"])
assert any(entry["type"] == "config" for entry in valid_config["map"])
# Test case 5: The specific case from the UplandJacob repo (malformed YAML format)
# This simulates what YAML "- addon_config: rw" creates
config["map"] = [{"addon_config": "rw"}] # Wrong structure, missing 'type' key
valid_config = vd.SCHEMA_ADDON_CONFIG(config)
assert valid_config["map"] == []
def test_valid_basic_build():
"""Validate basic build config."""
config = load_json_fixture("basic-build-config.json")

View File

@@ -1,10 +1,9 @@
"""Test for API calls."""
from unittest.mock import MagicMock, patch
from unittest.mock import MagicMock
from aiohttp.test_utils import TestClient
from supervisor.coresys import CoreSys
from supervisor.host.const import LogFormat
DEFAULT_LOG_RANGE = "entries=:-99:100"
@@ -16,7 +15,6 @@ async def common_test_api_advanced_logs(
syslog_identifier: str,
api_client: TestClient,
journald_logs: MagicMock,
coresys: CoreSys,
):
"""Template for tests of endpoints using advanced logs."""
resp = await api_client.get(f"{path_prefix}/logs")
@@ -43,20 +41,6 @@ async def common_test_api_advanced_logs(
journald_logs.reset_mock()
container_mock = MagicMock()
container_mock.attrs = {"State": {"StartedAt": "2023-01-01T12:00:00.000000Z"}}
with patch.object(coresys.docker.containers, "get", return_value=container_mock):
resp = await api_client.get(f"{path_prefix}/logs/latest")
assert resp.status == 200
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier},
range_header="realtime=1672574400::0:18446744073709551615", # Unix timestamp for 2023-01-01 12:00:00
accept=LogFormat.JOURNAL,
)
coresys.docker.containers.get.assert_called_once_with(syslog_identifier)
journald_logs.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/boots/0")
assert resp.status == 200
assert resp.content_type == "text/plain"

View File

@@ -72,14 +72,11 @@ async def test_addons_info_not_installed(
async def test_api_addon_logs(
api_client: TestClient,
journald_logs: MagicMock,
coresys: CoreSys,
install_addon_ssh: Addon,
api_client: TestClient, journald_logs: MagicMock, install_addon_ssh: Addon
):
"""Test addon logs."""
await common_test_api_advanced_logs(
"/addons/local_ssh", "addon_local_ssh", api_client, journald_logs, coresys
"/addons/local_ssh", "addon_local_ssh", api_client, journald_logs
)

View File

@@ -4,15 +4,11 @@ from unittest.mock import MagicMock
from aiohttp.test_utils import TestClient
from supervisor.coresys import CoreSys
from tests.api import common_test_api_advanced_logs
async def test_api_audio_logs(
api_client: TestClient, journald_logs: MagicMock, coresys: CoreSys
):
async def test_api_audio_logs(api_client: TestClient, journald_logs: MagicMock):
"""Test audio logs."""
await common_test_api_advanced_logs(
"/audio", "hassio_audio", api_client, journald_logs, coresys
"/audio", "hassio_audio", api_client, journald_logs
)

View File

@@ -66,10 +66,6 @@ async def test_options(api_client: TestClient, coresys: CoreSys):
restart.assert_called_once()
async def test_api_dns_logs(
api_client: TestClient, journald_logs: MagicMock, coresys: CoreSys
):
async def test_api_dns_logs(api_client: TestClient, journald_logs: MagicMock):
"""Test dns logs."""
await common_test_api_advanced_logs(
"/dns", "hassio_dns", api_client, journald_logs, coresys
)
await common_test_api_advanced_logs("/dns", "hassio_dns", api_client, journald_logs)

View File

@@ -1,16 +1,13 @@
"""Test homeassistant api."""
import asyncio
from pathlib import Path
from unittest.mock import MagicMock, PropertyMock, patch
from unittest.mock import MagicMock, patch
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
import pytest
from supervisor.backups.manager import BackupManager
from supervisor.coresys import CoreSys
from supervisor.docker.interface import DockerInterface
from supervisor.homeassistant.api import APIState
from supervisor.homeassistant.core import HomeAssistantCore
from supervisor.homeassistant.module import HomeAssistant
@@ -21,10 +18,7 @@ from tests.common import load_json_fixture
@pytest.mark.parametrize("legacy_route", [True, False])
async def test_api_core_logs(
api_client: TestClient,
journald_logs: MagicMock,
coresys: CoreSys,
legacy_route: bool,
api_client: TestClient, journald_logs: MagicMock, legacy_route: bool
):
"""Test core logs."""
await common_test_api_advanced_logs(
@@ -32,7 +26,6 @@ async def test_api_core_logs(
"homeassistant",
api_client,
journald_logs,
coresys,
)
@@ -195,77 +188,3 @@ async def test_force_stop_during_migration(api_client: TestClient, coresys: Core
with patch.object(HomeAssistantCore, "stop") as stop:
await api_client.post("/homeassistant/stop", json={"force": True})
stop.assert_called_once()
@pytest.mark.parametrize(
("make_backup", "backup_called", "update_called"),
[(True, True, False), (False, False, True)],
)
async def test_home_assistant_background_update(
api_client: TestClient,
coresys: CoreSys,
make_backup: bool,
backup_called: bool,
update_called: bool,
):
"""Test background update of Home Assistant."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
event = asyncio.Event()
mock_update_called = mock_backup_called = False
# Mock backup/update as long-running tasks
async def mock_docker_interface_update(*args, **kwargs):
nonlocal mock_update_called
mock_update_called = True
await event.wait()
async def mock_partial_backup(*args, **kwargs):
nonlocal mock_backup_called
mock_backup_called = True
await event.wait()
with (
patch.object(DockerInterface, "update", new=mock_docker_interface_update),
patch.object(BackupManager, "do_backup_partial", new=mock_partial_backup),
patch.object(
DockerInterface,
"version",
new=PropertyMock(return_value=AwesomeVersion("2025.8.0")),
),
):
resp = await api_client.post(
"/core/update",
json={"background": True, "backup": make_backup, "version": "2025.8.3"},
)
assert mock_backup_called is backup_called
assert mock_update_called is update_called
assert resp.status == 200
body = await resp.json()
assert (job := coresys.jobs.get_job(body["data"]["job_id"]))
assert job.name == "home_assistant_core_update"
event.set()
async def test_background_home_assistant_update_fails_fast(
api_client: TestClient, coresys: CoreSys
):
"""Test background Home Assistant update returns error not job if validation doesn't succeed."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
with (
patch.object(
DockerInterface,
"version",
new=PropertyMock(return_value=AwesomeVersion("2025.8.3")),
),
):
resp = await api_client.post(
"/core/update",
json={"background": True, "version": "2025.8.3"},
)
assert resp.status == 400
body = await resp.json()
assert body["message"] == "Version 2025.8.3 is already installed"

View File

@@ -243,10 +243,6 @@ async def test_advanced_logs(
accept=LogFormat.JOURNAL,
)
# Host logs don't have a /latest endpoint
resp = await api_client.get("/host/logs/latest")
assert resp.status == 404
async def test_advaced_logs_query_parameters(
api_client: TestClient,
@@ -848,49 +844,3 @@ async def test_force_shutdown_during_migration(
with patch.object(SystemControl, "shutdown") as shutdown:
await api_client.post("/host/shutdown", json={"force": True})
shutdown.assert_called_once()
async def test_advanced_logs_latest_container_not_found_error(
api_client: TestClient, coresys: CoreSys
):
"""Test advanced logs API with latest parameter when container start time cannot be determined."""
container_mock = MagicMock()
container_mock.attrs = {"State": {}}
with patch.object(coresys.docker.containers, "get", return_value=container_mock):
# Test with a service endpoint that does exist
resp = await api_client.get("/core/logs/latest")
assert resp.status == 400
result = await resp.text()
assert "Cannot determine start time of homeassistant" in result
async def test_advanced_logs_latest_invalid_start_time(
api_client: TestClient, coresys: CoreSys, caplog: pytest.LogCaptureFixture
):
"""Test advanced logs API with latest parameter when container start time is invalid."""
# Mock container with invalid StartedAt attribute
container_mock = MagicMock()
container_mock.attrs = {"State": {"StartedAt": "1. 1. 2025"}}
with patch.object(coresys.docker.containers, "get", return_value=container_mock):
# Test with a service endpoint that does exist
resp = await api_client.get("/core/logs/latest")
assert resp.status == 400
result = await resp.text()
assert "Cannot determine start time of homeassistant" in result
async def test_advanced_logs_latest_invalid_container(
api_client: TestClient, coresys: CoreSys, caplog: pytest.LogCaptureFixture
):
"""Test advanced logs API with latest parameter when container can't be found."""
container_mock = MagicMock()
container_mock.attrs = {}
with patch.object(coresys.docker.containers, "get", return_value=None):
# Test with a service endpoint that does exist
resp = await api_client.get("/core/logs/latest")
assert resp.status == 400
result = await resp.text()
assert "Cannot determine start time of homeassistant" in result

View File

@@ -4,15 +4,11 @@ from unittest.mock import MagicMock
from aiohttp.test_utils import TestClient
from supervisor.coresys import CoreSys
from tests.api import common_test_api_advanced_logs
async def test_api_multicast_logs(
api_client: TestClient, journald_logs: MagicMock, coresys: CoreSys
):
async def test_api_multicast_logs(api_client: TestClient, journald_logs: MagicMock):
"""Test multicast logs."""
await common_test_api_advanced_logs(
"/multicast", "hassio_multicast", api_client, journald_logs, coresys
"/multicast", "hassio_multicast", api_client, journald_logs
)

View File

@@ -88,8 +88,6 @@ async def test_api_network_interface_info(api_client: TestClient, interface_id:
]
assert result["data"]["ipv6"]["ready"] is True
assert result["data"]["interface"] == TEST_INTERFACE_ETH_NAME
assert result["data"]["mdns"] == "announce"
assert result["data"]["llmnr"] == "announce"
async def test_api_network_interface_info_default(api_client: TestClient):
@@ -111,8 +109,6 @@ async def test_api_network_interface_info_default(api_client: TestClient):
]
assert result["data"]["ipv6"]["ready"] is True
assert result["data"]["interface"] == TEST_INTERFACE_ETH_NAME
assert result["data"]["mdns"] == "announce"
assert result["data"]["llmnr"] == "announce"
@pytest.mark.parametrize(
@@ -282,33 +278,6 @@ async def test_api_network_interface_update_wifi_error(api_client: TestClient):
)
async def test_api_network_interface_update_mdns(
api_client: TestClient,
coresys: CoreSys,
network_manager_service: NetworkManagerService,
connection_settings_service: ConnectionSettingsService,
):
"""Test network manager API update with mDNS/LLMNR mode."""
network_manager_service.CheckConnectivity.calls.clear()
connection_settings_service.Update.calls.clear()
resp = await api_client.post(
f"/network/interface/{TEST_INTERFACE_ETH_NAME}/update",
json={
"mdns": "resolve",
"llmnr": "off",
},
)
result = await resp.json()
assert result["result"] == "ok"
assert len(connection_settings_service.Update.calls) == 1
settings = connection_settings_service.Update.calls[0][0]
assert "connection" in settings
assert settings["connection"]["mdns"] == Variant("i", 1)
assert settings["connection"]["llmnr"] == Variant("i", 0)
async def test_api_network_interface_update_remove(api_client: TestClient):
"""Test network manager api."""
resp = await api_client.post(
@@ -411,7 +380,7 @@ async def test_api_network_vlan(
settings_service.AddConnection.calls.clear()
resp = await api_client.post(
f"/network/interface/{TEST_INTERFACE_ETH_NAME}/vlan/1",
json={"ipv4": {"method": "auto"}, "llmnr": "off"},
json={"ipv4": {"method": "auto"}},
)
result = await resp.json()
assert result["result"] == "ok"
@@ -422,8 +391,8 @@ async def test_api_network_vlan(
assert connection["connection"] == {
"id": Variant("s", "Supervisor eth0.1"),
"type": Variant("s", "vlan"),
"mdns": Variant("i", -1), # Default mode
"llmnr": Variant("i", 0),
"llmnr": Variant("i", 2),
"mdns": Variant("i", 2),
"autoconnect": Variant("b", True),
"uuid": connection["connection"]["uuid"],
}

View File

@@ -48,7 +48,7 @@ async def test_api_available_updates(
"version_latest": "9.2.1",
}
coresys.updater._data["hassos_unrestricted"] = "321"
coresys.updater._data["hassos"] = "321"
coresys.os._version = "123"
updates = await available_updates()
assert len(updates) == 2

View File

@@ -6,12 +6,10 @@ from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
from aiohttp import ClientResponse
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
import pytest
from supervisor.addons.addon import Addon
from supervisor.arch import CpuArch
from supervisor.backups.manager import BackupManager
from supervisor.config import CoreConfig
from supervisor.const import AddonState
from supervisor.coresys import CoreSys
@@ -19,7 +17,6 @@ from supervisor.docker.addon import DockerAddon
from supervisor.docker.const import ContainerState
from supervisor.docker.interface import DockerInterface
from supervisor.docker.monitor import DockerContainerStateEvent
from supervisor.homeassistant.module import HomeAssistant
from supervisor.store.addon import AddonStore
from supervisor.store.repository import Repository
@@ -308,7 +305,6 @@ async def get_message(resp: ClientResponse, json_expected: bool) -> str:
("post", "/store/addons/bad/install/1", True),
("post", "/store/addons/bad/update", True),
("post", "/store/addons/bad/update/1", True),
("get", "/store/addons/bad/availability", True),
# Legacy paths
("get", "/addons/bad/icon", False),
("get", "/addons/bad/logo", False),
@@ -394,327 +390,3 @@ async def test_api_store_addons_changelog_corrupted(
assert resp.status == 200
result = await resp.text()
assert result == "Text with an invalid UTF-8 char: <20>"
@pytest.mark.usefixtures("test_repository", "tmp_supervisor_data")
async def test_addon_install_in_background(api_client: TestClient, coresys: CoreSys):
"""Test installing an addon in the background."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
event = asyncio.Event()
# Mock a long-running install task
async def mock_addon_install(*args, **kwargs):
await event.wait()
with patch.object(Addon, "install", new=mock_addon_install):
resp = await api_client.post(
"/store/addons/local_ssh/install", json={"background": True}
)
assert resp.status == 200
body = await resp.json()
assert (job := coresys.jobs.get_job(body["data"]["job_id"]))
assert job.name == "addon_manager_install"
event.set()
@pytest.mark.usefixtures("install_addon_ssh")
async def test_background_addon_install_fails_fast(
api_client: TestClient, coresys: CoreSys
):
"""Test background addon install returns error not job if validation fails."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
resp = await api_client.post(
"/store/addons/local_ssh/install", json={"background": True}
)
assert resp.status == 400
body = await resp.json()
assert body["message"] == "Add-on local_ssh is already installed"
@pytest.mark.parametrize(
("make_backup", "backup_called", "update_called"),
[(True, True, False), (False, False, True)],
)
@pytest.mark.usefixtures("test_repository", "tmp_supervisor_data")
async def test_addon_update_in_background(
api_client: TestClient,
coresys: CoreSys,
install_addon_ssh: Addon,
make_backup: bool,
backup_called: bool,
update_called: bool,
):
"""Test updating an addon in the background."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
install_addon_ssh.data_store["version"] = "10.0.0"
event = asyncio.Event()
mock_update_called = mock_backup_called = False
# Mock backup/update as long-running tasks
async def mock_addon_update(*args, **kwargs):
nonlocal mock_update_called
mock_update_called = True
await event.wait()
async def mock_partial_backup(*args, **kwargs):
nonlocal mock_backup_called
mock_backup_called = True
await event.wait()
with (
patch.object(Addon, "update", new=mock_addon_update),
patch.object(BackupManager, "do_backup_partial", new=mock_partial_backup),
):
resp = await api_client.post(
"/store/addons/local_ssh/update",
json={"background": True, "backup": make_backup},
)
assert mock_backup_called is backup_called
assert mock_update_called is update_called
assert resp.status == 200
body = await resp.json()
assert (job := coresys.jobs.get_job(body["data"]["job_id"]))
assert job.name == "addon_manager_update"
event.set()
@pytest.mark.usefixtures("install_addon_ssh")
async def test_background_addon_update_fails_fast(
api_client: TestClient, coresys: CoreSys
):
"""Test background addon update returns error not job if validation doesn't succeed."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
resp = await api_client.post(
"/store/addons/local_ssh/update", json={"background": True}
)
assert resp.status == 400
body = await resp.json()
assert body["message"] == "No update available for add-on local_ssh"
async def test_api_store_addons_addon_availability_success(
api_client: TestClient, store_addon: AddonStore
):
"""Test /store/addons/{addon}/availability REST API - success case."""
resp = await api_client.get(f"/store/addons/{store_addon.slug}/availability")
assert resp.status == 200
@pytest.mark.parametrize(
("supported_architectures", "api_action", "api_method", "installed"),
[
(["i386"], "availability", "get", False),
(["i386", "aarch64"], "availability", "get", False),
(["i386"], "install", "post", False),
(["i386", "aarch64"], "install", "post", False),
(["i386"], "update", "post", True),
(["i386", "aarch64"], "update", "post", True),
],
)
async def test_api_store_addons_addon_availability_arch_not_supported(
api_client: TestClient,
coresys: CoreSys,
supported_architectures: list[str],
api_action: str,
api_method: str,
installed: bool,
):
"""Test availability errors for /store/addons/{addon}/* REST APIs - architecture not supported."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
# Create an addon with unsupported architecture
addon_obj = AddonStore(coresys, "test_arch_addon")
coresys.addons.store[addon_obj.slug] = addon_obj
# Set addon config with unsupported architecture
addon_config = {
"advanced": False,
"arch": supported_architectures,
"slug": "test_arch_addon",
"description": "Test arch add-on",
"name": "Test Arch Add-on",
"repository": "test",
"stage": "stable",
"version": "1.0.0",
}
coresys.store.data.addons[addon_obj.slug] = addon_config
if installed:
coresys.addons.local[addon_obj.slug] = Addon(coresys, addon_obj.slug)
coresys.addons.data.user[addon_obj.slug] = {"version": AwesomeVersion("0.0.1")}
# Mock the system architecture to be different
with patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])):
resp = await api_client.request(
api_method, f"/store/addons/{addon_obj.slug}/{api_action}"
)
assert resp.status == 400
result = await resp.json()
assert result["error_key"] == "addon_not_supported_architecture_error"
assert (
result["message_template"]
== "Add-on {slug} not supported on this platform, supported architectures: {architectures}"
)
assert result["extra_fields"] == {
"slug": "test_arch_addon",
"architectures": ", ".join(supported_architectures),
}
assert result["message"] == result["message_template"].format(
**result["extra_fields"]
)
@pytest.mark.parametrize(
("supported_machines", "api_action", "api_method", "installed"),
[
(["odroid-n2"], "availability", "get", False),
(["!qemux86-64"], "availability", "get", False),
(["a", "b"], "availability", "get", False),
(["odroid-n2"], "install", "post", False),
(["!qemux86-64"], "install", "post", False),
(["a", "b"], "install", "post", False),
(["odroid-n2"], "update", "post", True),
(["!qemux86-64"], "update", "post", True),
(["a", "b"], "update", "post", True),
],
)
async def test_api_store_addons_addon_availability_machine_not_supported(
api_client: TestClient,
coresys: CoreSys,
supported_machines: list[str],
api_action: str,
api_method: str,
installed: bool,
):
"""Test availability errors for /store/addons/{addon}/* REST APIs - machine not supported."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
# Create an addon with unsupported machine type
addon_obj = AddonStore(coresys, "test_machine_addon")
coresys.addons.store[addon_obj.slug] = addon_obj
# Set addon config with unsupported machine
addon_config = {
"advanced": False,
"arch": ["amd64"],
"machine": supported_machines,
"slug": "test_machine_addon",
"description": "Test machine add-on",
"name": "Test Machine Add-on",
"repository": "test",
"stage": "stable",
"version": "1.0.0",
}
coresys.store.data.addons[addon_obj.slug] = addon_config
if installed:
coresys.addons.local[addon_obj.slug] = Addon(coresys, addon_obj.slug)
coresys.addons.data.user[addon_obj.slug] = {"version": AwesomeVersion("0.0.1")}
# Mock the system machine to be different
with patch.object(CoreSys, "machine", new=PropertyMock(return_value="qemux86-64")):
resp = await api_client.request(
api_method, f"/store/addons/{addon_obj.slug}/{api_action}"
)
assert resp.status == 400
result = await resp.json()
assert result["error_key"] == "addon_not_supported_machine_type_error"
assert (
result["message_template"]
== "Add-on {slug} not supported on this machine, supported machine types: {machine_types}"
)
assert result["extra_fields"] == {
"slug": "test_machine_addon",
"machine_types": ", ".join(supported_machines),
}
assert result["message"] == result["message_template"].format(
**result["extra_fields"]
)
@pytest.mark.parametrize(
("api_action", "api_method", "installed"),
[
("availability", "get", False),
("install", "post", False),
("update", "post", True),
],
)
async def test_api_store_addons_addon_availability_homeassistant_version_too_old(
api_client: TestClient,
coresys: CoreSys,
api_action: str,
api_method: str,
installed: bool,
):
"""Test availability errors for /store/addons/{addon}/* REST APIs - Home Assistant version too old."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
# Create an addon that requires newer Home Assistant version
addon_obj = AddonStore(coresys, "test_version_addon")
coresys.addons.store[addon_obj.slug] = addon_obj
# Set addon config with minimum Home Assistant version requirement
addon_config = {
"advanced": False,
"arch": ["amd64"],
"homeassistant": "2023.1.1", # Requires newer version than current
"slug": "test_version_addon",
"description": "Test version add-on",
"name": "Test Version Add-on",
"repository": "test",
"stage": "stable",
"version": "1.0.0",
}
coresys.store.data.addons[addon_obj.slug] = addon_config
if installed:
coresys.addons.local[addon_obj.slug] = Addon(coresys, addon_obj.slug)
coresys.addons.data.user[addon_obj.slug] = {"version": AwesomeVersion("0.0.1")}
# Mock the Home Assistant version to be older
with patch.object(
HomeAssistant,
"version",
new=PropertyMock(return_value=AwesomeVersion("2022.1.1")),
):
resp = await api_client.request(
api_method, f"/store/addons/{addon_obj.slug}/{api_action}"
)
assert resp.status == 400
result = await resp.json()
assert result["error_key"] == "addon_not_supported_home_assistant_version_error"
assert (
result["message_template"]
== "Add-on {slug} not supported on this system, requires Home Assistant version {version} or greater"
)
assert result["extra_fields"] == {
"slug": "test_version_addon",
"version": "2023.1.1",
}
assert result["message"] == result["message_template"].format(
**result["extra_fields"]
)
async def test_api_store_addons_addon_availability_installed_addon(
api_client: TestClient, install_addon_ssh: Addon
):
"""Test /store/addons/{addon}/availability REST API - installed addon checks against latest version."""
resp = await api_client.get("/store/addons/local_ssh/availability")
assert resp.status == 200
install_addon_ssh.data_store["version"] = AwesomeVersion("10.0.0")
install_addon_ssh.data_store["homeassistant"] = AwesomeVersion("2023.1.1")
# Mock the Home Assistant version to be older
with patch.object(
HomeAssistant,
"version",
new=PropertyMock(return_value=AwesomeVersion("2022.1.1")),
):
resp = await api_client.get("/store/addons/local_ssh/availability")
assert resp.status == 400
result = await resp.json()
assert (
"requires Home Assistant version 2023.1.1 or greater" in result["message"]
)

View File

@@ -148,12 +148,10 @@ async def test_api_supervisor_options_diagnostics(
assert coresys.dbus.agent.diagnostics is False
async def test_api_supervisor_logs(
api_client: TestClient, journald_logs: MagicMock, coresys: CoreSys
):
async def test_api_supervisor_logs(api_client: TestClient, journald_logs: MagicMock):
"""Test supervisor logs."""
await common_test_api_advanced_logs(
"/supervisor", "hassio_supervisor", api_client, journald_logs, coresys
"/supervisor", "hassio_supervisor", api_client, journald_logs
)
@@ -177,7 +175,7 @@ async def test_api_supervisor_fallback(
b"\x1b[36m22-10-11 14:04:23 DEBUG (MainThread) [supervisor.utils.dbus] D-Bus call - org.freedesktop.DBus.Properties.call_get_all on /io/hass/os/AppArmor\x1b[0m",
]
# check fallback also works for the /follow endpoint (no mock reset needed)
# check fallback also works for the follow endpoint (no mock reset needed)
with patch("supervisor.api._LOGGER.exception") as logger:
resp = await api_client.get("/supervisor/logs/follow")
@@ -188,16 +186,7 @@ async def test_api_supervisor_fallback(
assert resp.status == 200
assert resp.content_type == "text/plain"
# check the /latest endpoint as well
with patch("supervisor.api._LOGGER.exception") as logger:
resp = await api_client.get("/supervisor/logs/latest")
logger.assert_called_once_with(
"Failed to get supervisor logs using advanced_logs API"
)
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.reset_mock()
# also check generic Python error
journald_logs.side_effect = OSError("Something bad happened!")

View File

@@ -6,7 +6,7 @@ from supervisor.dbus.network import NetworkManager
from supervisor.dbus.network.interface import NetworkInterface
from supervisor.dbus.network.setting.generate import get_connection_from_interface
from supervisor.host.configuration import Ip6Setting, IpConfig, IpSetting, VlanConfig
from supervisor.host.const import InterfaceMethod, InterfaceType, MulticastDnsMode
from supervisor.host.const import InterfaceMethod, InterfaceType
from supervisor.host.network import Interface
from tests.const import TEST_INTERFACE_ETH_NAME
@@ -22,8 +22,6 @@ async def test_get_connection_from_interface(network_manager: NetworkManager):
assert "interface-name" not in connection_payload["connection"]
assert connection_payload["connection"]["type"].value == "802-3-ethernet"
assert connection_payload["connection"]["mdns"].value == 2
assert connection_payload["connection"]["llmnr"].value == 2
assert connection_payload["match"]["path"].value == ["platform-ff3f0000.ethernet"]
assert connection_payload["ipv4"]["method"].value == "auto"
@@ -63,15 +61,11 @@ async def test_generate_from_vlan(network_manager: NetworkManager):
ipv6setting=Ip6Setting(InterfaceMethod.AUTO, [], None, []),
wifi=None,
vlan=VlanConfig(1, "eth0"),
mdns=MulticastDnsMode.RESOLVE,
llmnr=MulticastDnsMode.OFF,
)
connection_payload = get_connection_from_interface(vlan_interface, network_manager)
assert connection_payload["connection"]["id"].value == "Supervisor eth0.1"
assert connection_payload["connection"]["type"].value == "vlan"
assert connection_payload["connection"]["mdns"].value == 1 # resolve
assert connection_payload["connection"]["llmnr"].value == 0 # off
assert "uuid" in connection_payload["connection"]
assert "match" not in connection_payload["connection"]
assert "interface-name" not in connection_payload["connection"]

View File

@@ -21,6 +21,7 @@ from supervisor.docker.monitor import DockerContainerStateEvent
from supervisor.exceptions import (
DockerAPIError,
DockerError,
DockerLogOutOfOrder,
DockerNoSpaceOnDevice,
DockerNotFound,
DockerRequestError,
@@ -485,25 +486,25 @@ async def test_install_sends_progress_to_home_assistant(
{"stage": "Pulling fs layer", "progress": 0, "done": False, "extra": None},
{
"stage": "Downloading",
"progress": 0.1,
"progress": 0.0,
"done": False,
"extra": {"current": 539462, "total": 436480882},
},
{
"stage": "Downloading",
"progress": 0.6,
"progress": 0.5,
"done": False,
"extra": {"current": 4864838, "total": 436480882},
},
{
"stage": "Downloading",
"progress": 0.9,
"progress": 0.8,
"done": False,
"extra": {"current": 7552896, "total": 436480882},
},
{
"stage": "Downloading",
"progress": 1.2,
"progress": 1.1,
"done": False,
"extra": {"current": 10252544, "total": 436480882},
},
@@ -515,13 +516,13 @@ async def test_install_sends_progress_to_home_assistant(
},
{
"stage": "Downloading",
"progress": 11.9,
"progress": 11.8,
"done": False,
"extra": {"current": 103619904, "total": 436480882},
},
{
"stage": "Downloading",
"progress": 26.1,
"progress": 26.0,
"done": False,
"extra": {"current": 227726144, "total": 436480882},
},
@@ -533,49 +534,49 @@ async def test_install_sends_progress_to_home_assistant(
},
{
"stage": "Verifying Checksum",
"progress": 50,
"progress": 50.0,
"done": False,
"extra": {"current": 433170048, "total": 436480882},
},
{
"stage": "Download complete",
"progress": 50,
"progress": 50.0,
"done": False,
"extra": {"current": 433170048, "total": 436480882},
},
{
"stage": "Extracting",
"progress": 50.1,
"progress": 50.0,
"done": False,
"extra": {"current": 557056, "total": 436480882},
},
{
"stage": "Extracting",
"progress": 60.3,
"progress": 60.2,
"done": False,
"extra": {"current": 89686016, "total": 436480882},
},
{
"stage": "Extracting",
"progress": 70.0,
"progress": 69.9,
"done": False,
"extra": {"current": 174358528, "total": 436480882},
},
{
"stage": "Extracting",
"progress": 80.0,
"progress": 79.9,
"done": False,
"extra": {"current": 261816320, "total": 436480882},
},
{
"stage": "Extracting",
"progress": 88.4,
"progress": 88.3,
"done": False,
"extra": {"current": 334790656, "total": 436480882},
},
{
"stage": "Extracting",
"progress": 94.0,
"progress": 93.9,
"done": False,
"extra": {"current": 383811584, "total": 436480882},
},
@@ -600,136 +601,6 @@ async def test_install_sends_progress_to_home_assistant(
]
async def test_install_progress_rounding_does_not_cause_misses(
coresys: CoreSys, test_docker_interface: DockerInterface, ha_ws_client: AsyncMock
):
"""Test extremely close progress events do not create rounding issues."""
coresys.core.set_state(CoreState.RUNNING)
coresys.docker.docker.api.pull.return_value = [
{
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
"id": "2025.7.1",
},
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1e214cd6d7d0"},
{
"status": "Downloading",
"progressDetail": {"current": 432700000, "total": 436480882},
"progress": "[=================================================> ] 432.7MB/436.5MB",
"id": "1e214cd6d7d0",
},
{
"status": "Downloading",
"progressDetail": {"current": 432800000, "total": 436480882},
"progress": "[=================================================> ] 432.8MB/436.5MB",
"id": "1e214cd6d7d0",
},
{"status": "Verifying Checksum", "progressDetail": {}, "id": "1e214cd6d7d0"},
{"status": "Download complete", "progressDetail": {}, "id": "1e214cd6d7d0"},
{
"status": "Extracting",
"progressDetail": {"current": 432700000, "total": 436480882},
"progress": "[=================================================> ] 432.7MB/436.5MB",
"id": "1e214cd6d7d0",
},
{
"status": "Extracting",
"progressDetail": {"current": 432800000, "total": 436480882},
"progress": "[=================================================> ] 432.8MB/436.5MB",
"id": "1e214cd6d7d0",
},
{"status": "Pull complete", "progressDetail": {}, "id": "1e214cd6d7d0"},
{
"status": "Digest: sha256:7d97da645f232f82a768d0a537e452536719d56d484d419836e53dbe3e4ec736"
},
{
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/odroid-n2-homeassistant:2025.7.1"
},
]
with (
patch.object(
type(coresys.supervisor), "arch", PropertyMock(return_value="i386")
),
):
# Schedule job so we can listen for the end. Then we can assert against the WS mock
event = asyncio.Event()
job, install_task = coresys.jobs.schedule_job(
test_docker_interface.install,
JobSchedulerOptions(),
AwesomeVersion("1.2.3"),
"test",
)
async def listen_for_job_end(reference: SupervisorJob):
if reference.uuid != job.uuid:
return
event.set()
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
await install_task
await event.wait()
events = [
evt.args[0]["data"]["data"]
for evt in ha_ws_client.async_send_command.call_args_list
if "data" in evt.args[0]
and evt.args[0]["data"]["event"] == WSEvent.JOB
and evt.args[0]["data"]["data"]["reference"] == "1e214cd6d7d0"
and evt.args[0]["data"]["data"]["stage"] in {"Downloading", "Extracting"}
]
assert events == [
{
"name": "Pulling container image layer",
"stage": "Downloading",
"progress": 49.6,
"done": False,
"extra": {"current": 432700000, "total": 436480882},
"reference": "1e214cd6d7d0",
"parent_id": job.uuid,
"errors": [],
"uuid": ANY,
"created": ANY,
},
{
"name": "Pulling container image layer",
"stage": "Downloading",
"progress": 49.6,
"done": False,
"extra": {"current": 432800000, "total": 436480882},
"reference": "1e214cd6d7d0",
"parent_id": job.uuid,
"errors": [],
"uuid": ANY,
"created": ANY,
},
{
"name": "Pulling container image layer",
"stage": "Extracting",
"progress": 99.6,
"done": False,
"extra": {"current": 432700000, "total": 436480882},
"reference": "1e214cd6d7d0",
"parent_id": job.uuid,
"errors": [],
"uuid": ANY,
"created": ANY,
},
{
"name": "Pulling container image layer",
"stage": "Extracting",
"progress": 99.6,
"done": False,
"extra": {"current": 432800000, "total": 436480882},
"reference": "1e214cd6d7d0",
"parent_id": job.uuid,
"errors": [],
"uuid": ANY,
"created": ANY,
},
]
@pytest.mark.parametrize(
("error_log", "exc_type", "exc_msg"),
[
@@ -776,138 +647,56 @@ async def test_install_raises_on_pull_error(
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
async def test_install_progress_handles_download_restart(
coresys: CoreSys, test_docker_interface: DockerInterface, ha_ws_client: AsyncMock
async def test_process_pull_image_log_precision_fix(
coresys: CoreSys, test_docker_interface: DockerInterface
):
"""Test install handles docker progress events that include a download restart."""
coresys.core.set_state(CoreState.RUNNING)
coresys.docker.docker.api.pull.return_value = load_json_fixture(
"docker_pull_image_log_restart.json"
"""Test that precision issues don't cause DockerLogOutOfOrder errors."""
job_id = "test_job_123"
layer_id = "abc123"
# First, create the job with a "Pulling fs layer" event
fs_layer_entry = PullLogEntry(
job_id=job_id,
id=layer_id,
status="Pulling fs layer",
)
test_docker_interface._process_pull_image_log(job_id, fs_layer_entry)
# First extracting event with higher progress
entry1 = PullLogEntry(
job_id=job_id,
id=layer_id,
status="Extracting",
progress_detail=PullProgressDetail(current=91300, total=100000),
)
with (
patch.object(
type(coresys.supervisor), "arch", PropertyMock(return_value="i386")
),
):
# Schedule job so we can listen for the end. Then we can assert against the WS mock
event = asyncio.Event()
job, install_task = coresys.jobs.schedule_job(
test_docker_interface.install,
JobSchedulerOptions(),
AwesomeVersion("1.2.3"),
"test",
)
# Second extracting event with slightly lower progress that would cause precision issue
# This simulates the real-world scenario from the Sentry error
entry2 = PullLogEntry(
job_id=job_id,
id=layer_id,
status="Extracting",
progress_detail=PullProgressDetail(current=91284, total=100000),
)
async def listen_for_job_end(reference: SupervisorJob):
if reference.uuid != job.uuid:
return
event.set()
# Process first extracting entry
test_docker_interface._process_pull_image_log(job_id, entry1)
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
await install_task
await event.wait()
# Find the job to verify progress
layer_job = None
for job in coresys.jobs.jobs:
if job.parent_id == job_id and job.reference == layer_id:
layer_job = job
break
events = [
evt.args[0]["data"]["data"]
for evt in ha_ws_client.async_send_command.call_args_list
if "data" in evt.args[0] and evt.args[0]["data"]["event"] == WSEvent.JOB
]
assert layer_job is not None, "Layer job should have been created"
# Progress calculation: 50 + (50 * 91300/100000) = 50 + 45.65 = 95.65 -> floors to 95.6
assert layer_job.progress == 95.6
def make_sub_log(layer_id: str):
return [
{
"stage": evt["stage"],
"progress": evt["progress"],
"done": evt["done"],
"extra": evt["extra"],
}
for evt in events
if evt["name"] == "Pulling container image layer"
and evt["reference"] == layer_id
and evt["parent_id"] == job.uuid
]
layer_1_log = make_sub_log("1e214cd6d7d0")
assert len(layer_1_log) == 14
assert layer_1_log == [
{"stage": "Pulling fs layer", "progress": 0, "done": False, "extra": None},
{
"stage": "Downloading",
"progress": 11.9,
"done": False,
"extra": {"current": 103619904, "total": 436480882},
},
{
"stage": "Downloading",
"progress": 26.1,
"done": False,
"extra": {"current": 227726144, "total": 436480882},
},
{
"stage": "Downloading",
"progress": 49.6,
"done": False,
"extra": {"current": 433170048, "total": 436480882},
},
{
"stage": "Retrying download",
"progress": 0,
"done": False,
"extra": None,
},
{
"stage": "Retrying download",
"progress": 0,
"done": False,
"extra": None,
},
{
"stage": "Downloading",
"progress": 11.9,
"done": False,
"extra": {"current": 103619904, "total": 436480882},
},
{
"stage": "Downloading",
"progress": 26.1,
"done": False,
"extra": {"current": 227726144, "total": 436480882},
},
{
"stage": "Downloading",
"progress": 49.6,
"done": False,
"extra": {"current": 433170048, "total": 436480882},
},
{
"stage": "Verifying Checksum",
"progress": 50,
"done": False,
"extra": {"current": 433170048, "total": 436480882},
},
{
"stage": "Download complete",
"progress": 50,
"done": False,
"extra": {"current": 433170048, "total": 436480882},
},
{
"stage": "Extracting",
"progress": 80.0,
"done": False,
"extra": {"current": 261816320, "total": 436480882},
},
{
"stage": "Extracting",
"progress": 100.0,
"done": False,
"extra": {"current": 436480882, "total": 436480882},
},
{
"stage": "Pull complete",
"progress": 100.0,
"done": True,
"extra": {"current": 436480882, "total": 436480882},
},
]
# Process second entry - this should NOT raise DockerLogOutOfOrder
# Previously this would fail because the calculated progress (95.642...) was less than stored (95.7 if rounded up)
# With floor rounding, both values are consistent: calculated 95.6 <= stored 95.6
try:
test_docker_interface._process_pull_image_log(job_id, entry2)
except DockerLogOutOfOrder:
pytest.fail("DockerLogOutOfOrder should not be raised due to precision fix")

View File

@@ -1,134 +0,0 @@
[
{
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
"id": "2025.7.1"
},
{
"status": "Already exists",
"progressDetail": {},
"id": "6e771e15690e"
},
{
"status": "Already exists",
"progressDetail": {},
"id": "58da640818f4"
},
{
"status": "Pulling fs layer",
"progressDetail": {},
"id": "1e214cd6d7d0"
},
{
"status": "Already exists",
"progressDetail": {},
"id": "1a38e1d5e18d"
},
{
"status": "Waiting",
"progressDetail": {},
"id": "1e214cd6d7d0"
},
{
"status": "Downloading",
"progressDetail": {
"current": 103619904,
"total": 436480882
},
"progress": "[===========> ] 103.6MB/436.5MB",
"id": "1e214cd6d7d0"
},
{
"status": "Downloading",
"progressDetail": {
"current": 227726144,
"total": 436480882
},
"progress": "[==========================> ] 227.7MB/436.5MB",
"id": "1e214cd6d7d0"
},
{
"status": "Downloading",
"progressDetail": {
"current": 433170048,
"total": 436480882
},
"progress": "[=================================================> ] 433.2MB/436.5MB",
"id": "1e214cd6d7d0"
},
{
"status": "Retrying in 2 seconds",
"progressDetail": {},
"id": "1e214cd6d7d0"
},
{
"status": "Retrying in 1 seconds",
"progressDetail": {},
"id": "1e214cd6d7d0"
},
{
"status": "Downloading",
"progressDetail": {
"current": 103619904,
"total": 436480882
},
"progress": "[===========> ] 103.6MB/436.5MB",
"id": "1e214cd6d7d0"
},
{
"status": "Downloading",
"progressDetail": {
"current": 227726144,
"total": 436480882
},
"progress": "[==========================> ] 227.7MB/436.5MB",
"id": "1e214cd6d7d0"
},
{
"status": "Downloading",
"progressDetail": {
"current": 433170048,
"total": 436480882
},
"progress": "[=================================================> ] 433.2MB/436.5MB",
"id": "1e214cd6d7d0"
},
{
"status": "Verifying Checksum",
"progressDetail": {},
"id": "1e214cd6d7d0"
},
{
"status": "Download complete",
"progressDetail": {},
"id": "1e214cd6d7d0"
},
{
"status": "Extracting",
"progressDetail": {
"current": 261816320,
"total": 436480882
},
"progress": "[=============================> ] 261.8MB/436.5MB",
"id": "1e214cd6d7d0"
},
{
"status": "Extracting",
"progressDetail": {
"current": 436480882,
"total": 436480882
},
"progress": "[==================================================>] 436.5MB/436.5MB",
"id": "1e214cd6d7d0"
},
{
"status": "Pull complete",
"progressDetail": {},
"id": "1e214cd6d7d0"
},
{
"status": "Digest: sha256:7d97da645f232f82a768d0a537e452536719d56d484d419836e53dbe3e4ec736"
},
{
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/odroid-n2-homeassistant:2025.7.1"
}
]

View File

@@ -36,8 +36,6 @@ async def test_equals_dbus_interface_no_settings(coresys: CoreSys):
vlan=None,
path="platform-ff3f0000.ethernet",
mac="AA:BB:CC:DD:EE:FF",
mdns=None,
llmnr=None,
)
# Get network interface and remove its connection to simulate no settings
@@ -66,8 +64,6 @@ async def test_equals_dbus_interface_connection_name_match(coresys: CoreSys):
vlan=None,
path="platform-ff3f0000.ethernet",
mac="AA:BB:CC:DD:EE:FF",
mdns=None,
llmnr=None,
)
# Get the network interface - this should have connection settings with interface-name = "eth0"
@@ -94,8 +90,6 @@ def test_equals_dbus_interface_connection_name_no_match():
vlan=None,
path="platform-ff3f0000.ethernet",
mac="AA:BB:CC:DD:EE:FF",
mdns=None,
llmnr=None,
)
# Mock network interface with different connection name
@@ -131,8 +125,6 @@ async def test_equals_dbus_interface_path_match(
vlan=None,
path="platform-ff3f0000.ethernet",
mac="AA:BB:CC:DD:EE:FF",
mdns=None,
llmnr=None,
)
# Add match settings with path and remove interface name to force path matching
@@ -164,8 +156,6 @@ def test_equals_dbus_interface_vlan_type_mismatch():
vlan=VlanConfig(id=10, interface="0c23631e-2118-355c-bbb0-8943229cb0d6"),
path="",
mac="52:54:00:2B:36:80",
mdns=None,
llmnr=None,
)
# Mock non-VLAN NetworkInterface - should return False immediately
@@ -195,8 +185,6 @@ def test_equals_dbus_interface_vlan_missing_info():
vlan=None, # Missing VLAN config!
path="",
mac="52:54:00:2B:36:80",
mdns=None,
llmnr=None,
)
# Mock VLAN NetworkInterface
@@ -230,8 +218,6 @@ def test_equals_dbus_interface_vlan_no_vlan_settings():
vlan=VlanConfig(id=10, interface="0c23631e-2118-355c-bbb0-8943229cb0d6"),
path="",
mac="52:54:00:2B:36:80",
mdns=None,
llmnr=None,
)
# Mock VLAN NetworkInterface without VLAN settings
@@ -285,8 +271,6 @@ async def test_equals_dbus_interface_eth0_10_real(
),
path="",
mac="52:54:00:2B:36:80",
mdns=None,
llmnr=None,
)
# Test should pass with matching VLAN config

View File

@@ -366,21 +366,15 @@ async def test_throttle_rate_limit(coresys: CoreSys, error: JobException | None)
test = TestClass(coresys)
start = utcnow()
with time_machine.travel(start):
await asyncio.gather(*[test.execute(), test.execute()])
await asyncio.gather(*[test.execute(), test.execute()])
assert test.call == 2
with (
time_machine.travel(start + timedelta(milliseconds=1)),
pytest.raises(JobException if error is None else error),
):
with pytest.raises(JobException if error is None else error):
await test.execute()
assert test.call == 2
with time_machine.travel(start + timedelta(hours=1, milliseconds=1)):
with time_machine.travel(utcnow() + timedelta(hours=1)):
await test.execute()
assert test.call == 3
@@ -836,18 +830,15 @@ async def test_group_throttle(coresys: CoreSys):
test1 = TestClass(coresys, "test1")
test2 = TestClass(coresys, "test2")
start = utcnow()
# One call of each should work. The subsequent calls will be silently throttled due to period
with time_machine.travel(start):
await asyncio.gather(
test1.execute(0), test1.execute(0), test2.execute(0), test2.execute(0)
)
await asyncio.gather(
test1.execute(0), test1.execute(0), test2.execute(0), test2.execute(0)
)
assert test1.call == 1
assert test2.call == 1
# First call to each will work again since period cleared. Second throttled once more as they don't wait
with time_machine.travel(start + timedelta(milliseconds=100)):
with time_machine.travel(utcnow() + timedelta(milliseconds=100)):
await asyncio.gather(
test1.execute(0.1),
test1.execute(0.1),
@@ -887,18 +878,15 @@ async def test_group_throttle_with_queue(coresys: CoreSys):
test1 = TestClass(coresys, "test1")
test2 = TestClass(coresys, "test2")
start = utcnow()
# One call of each should work. The subsequent calls will be silently throttled after waiting due to period
with time_machine.travel(start):
await asyncio.gather(
*[test1.execute(0), test1.execute(0), test2.execute(0), test2.execute(0)]
)
await asyncio.gather(
*[test1.execute(0), test1.execute(0), test2.execute(0), test2.execute(0)]
)
assert test1.call == 1
assert test2.call == 1
# All calls should work as we cleared the period. And tasks take longer then period and are queued
with time_machine.travel(start + timedelta(milliseconds=100)):
with time_machine.travel(utcnow() + timedelta(milliseconds=100)):
await asyncio.gather(
*[
test1.execute(0.1),
@@ -939,25 +927,21 @@ async def test_group_throttle_rate_limit(coresys: CoreSys, error: JobException |
test1 = TestClass(coresys, "test1")
test2 = TestClass(coresys, "test2")
start = utcnow()
with time_machine.travel(start):
await asyncio.gather(
*[test1.execute(), test1.execute(), test2.execute(), test2.execute()]
)
await asyncio.gather(
*[test1.execute(), test1.execute(), test2.execute(), test2.execute()]
)
assert test1.call == 2
assert test2.call == 2
with time_machine.travel(start + timedelta(milliseconds=1)):
with pytest.raises(JobException if error is None else error):
await test1.execute()
with pytest.raises(JobException if error is None else error):
await test2.execute()
with pytest.raises(JobException if error is None else error):
await test1.execute()
with pytest.raises(JobException if error is None else error):
await test2.execute()
assert test1.call == 2
assert test2.call == 2
with time_machine.travel(start + timedelta(hours=1, milliseconds=1)):
with time_machine.travel(utcnow() + timedelta(hours=1)):
await test1.execute()
await test2.execute()
@@ -1301,26 +1285,20 @@ async def test_concurency_reject_and_rate_limit(
test = TestClass(coresys)
start = utcnow()
with time_machine.travel(start):
results = await asyncio.gather(
*[test.execute(0.1), test.execute(), test.execute()], return_exceptions=True
)
results = await asyncio.gather(
*[test.execute(0.1), test.execute(), test.execute()], return_exceptions=True
)
assert results[0] is None
assert isinstance(results[1], JobException)
assert isinstance(results[2], JobException)
assert test.call == 1
with (
time_machine.travel(start + timedelta(milliseconds=1)),
pytest.raises(JobException if error is None else error),
):
with pytest.raises(JobException if error is None else error):
await test.execute()
assert test.call == 1
with time_machine.travel(start + timedelta(hours=1, milliseconds=1)):
with time_machine.travel(utcnow() + timedelta(hours=1)):
await test.execute()
assert test.call == 2
@@ -1364,22 +1342,18 @@ async def test_group_concurrency_with_group_throttling(coresys: CoreSys):
test = TestClass(coresys)
start = utcnow()
# First call should work
with time_machine.travel(start):
await test.main_method()
await test.main_method()
assert test.call_count == 1
assert test.nested_call_count == 1
# Second call should be throttled (not execute due to throttle period)
with time_machine.travel(start + timedelta(milliseconds=1)):
await test.main_method()
await test.main_method()
assert test.call_count == 1 # Still 1, throttled
assert test.nested_call_count == 1 # Still 1, throttled
# Wait for throttle period to pass and try again
with time_machine.travel(start + timedelta(milliseconds=60)):
with time_machine.travel(utcnow() + timedelta(milliseconds=60)):
await test.main_method()
assert test.call_count == 2 # Should execute now

View File

@@ -12,7 +12,7 @@ from supervisor.addons.addon import Addon
from supervisor.arch import CpuArch
from supervisor.backups.manager import BackupManager
from supervisor.coresys import CoreSys
from supervisor.exceptions import AddonNotSupportedError, StoreJobError
from supervisor.exceptions import AddonsNotSupportedError, StoreJobError
from supervisor.homeassistant.module import HomeAssistant
from supervisor.store import StoreManager
from supervisor.store.addon import AddonStore
@@ -172,7 +172,7 @@ async def test_update_unavailable_addon(
),
patch("shutil.disk_usage", return_value=(42, 42, (1024.0**3))),
):
with pytest.raises(AddonNotSupportedError):
with pytest.raises(AddonsNotSupportedError):
await coresys.addons.update("local_ssh", backup=True)
backup.assert_not_called()
@@ -227,7 +227,7 @@ async def test_install_unavailable_addon(
new=PropertyMock(return_value=AwesomeVersion("2022.1.1")),
),
patch("shutil.disk_usage", return_value=(42, 42, (1024.0**3))),
pytest.raises(AddonNotSupportedError),
pytest.raises(AddonsNotSupportedError),
):
await coresys.addons.install("local_ssh")