Compare commits

..

2 Commits

Author SHA1 Message Date
Stefan Agner
230f359bf7 Support containerd snapshotter progress reporting 2025-11-05 15:09:17 +01:00
Stefan Agner
f996e60784 Fix docker image pull progress blocked by small layers
Small Docker layers (typically <100 bytes) can skip the downloading phase
entirely, going directly from "Pulling fs layer" to "Download complete"
without emitting any progress events with byte counts. This caused the
aggregate progress calculation to block indefinitely, as it required all
layer jobs to have their `extra` field populated with byte counts before
proceeding.

The issue manifested as parent job progress jumping from 0% to 97.9% after
long delays, as seen when a 96-byte layer held up progress reporting for
~50 seconds until it finally reached the "Extracting" phase.

Set a minimal `extra` field (current=1, total=1) when layers reach
"Download complete" without having gone through the downloading phase.
This allows the aggregate progress calculation to proceed immediately
while still correctly representing the layer as 100% downloaded.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-05 14:40:16 +01:00
87 changed files with 1112 additions and 8378 deletions

View File

@@ -34,9 +34,6 @@ on:
env:
DEFAULT_PYTHON: "3.13"
COSIGN_VERSION: "v2.5.3"
CRANE_VERSION: "v0.20.7"
CRANE_SHA256: "8ef3564d264e6b5ca93f7b7f5652704c4dd29d33935aff6947dd5adefd05953e"
BUILD_NAME: supervisor
BUILD_TYPE: supervisor
@@ -56,7 +53,7 @@ jobs:
requirements: ${{ steps.requirements.outputs.changed }}
steps:
- name: Checkout the repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
@@ -95,7 +92,7 @@ jobs:
arch: ${{ fromJson(needs.init.outputs.architectures) }}
steps:
- name: Checkout the repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
@@ -110,7 +107,7 @@ jobs:
# home-assistant/wheels doesn't support sha pinning
- name: Build wheels
if: needs.init.outputs.requirements == 'true'
uses: home-assistant/wheels@2025.11.0
uses: home-assistant/wheels@2025.10.0
with:
abi: cp313
tag: musllinux_1_2
@@ -129,7 +126,7 @@ jobs:
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
if: needs.init.outputs.publish == 'true'
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: ${{ env.DEFAULT_PYTHON }}
@@ -137,7 +134,7 @@ jobs:
if: needs.init.outputs.publish == 'true'
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
with:
cosign-release: ${{ env.COSIGN_VERSION }}
cosign-release: "v2.5.3"
- name: Install dirhash and calc hash
if: needs.init.outputs.publish == 'true'
@@ -176,12 +173,12 @@ jobs:
version:
name: Update version
needs: ["init", "run_supervisor", "retag_deprecated"]
needs: ["init", "run_supervisor"]
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
if: needs.init.outputs.publish == 'true'
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Initialize git
if: needs.init.outputs.publish == 'true'
@@ -206,7 +203,7 @@ jobs:
timeout-minutes: 60
steps:
- name: Checkout the repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
# home-assistant/builder doesn't support sha pinning
- name: Build the Supervisor
@@ -355,50 +352,3 @@ jobs:
- name: Get supervisor logs on failiure
if: ${{ cancelled() || failure() }}
run: docker logs hassio_supervisor
retag_deprecated:
needs: ["build", "init"]
name: Re-tag deprecated ${{ matrix.arch }} images
if: needs.init.outputs.publish == 'true'
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
packages: write
strategy:
matrix:
arch: ["armhf", "armv7", "i386"]
env:
# Last available release for deprecated architectures
FROZEN_VERSION: "2025.11.5"
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install Cosign
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
with:
cosign-release: ${{ env.COSIGN_VERSION }}
- name: Install crane
run: |
curl -sLO https://github.com/google/go-containerregistry/releases/download/${{ env.CRANE_VERSION }}/go-containerregistry_Linux_x86_64.tar.gz
echo "${{ env.CRANE_SHA256 }} go-containerregistry_Linux_x86_64.tar.gz" | sha256sum -c -
tar xzf go-containerregistry_Linux_x86_64.tar.gz crane
sudo mv crane /usr/local/bin/
- name: Re-tag deprecated image with updated version label
run: |
crane auth login ghcr.io -u ${{ github.repository_owner }} -p ${{ secrets.GITHUB_TOKEN }}
crane mutate \
--label io.hass.version=${{ needs.init.outputs.version }} \
--tag ghcr.io/home-assistant/${{ matrix.arch }}-hassio-supervisor:${{ needs.init.outputs.version }} \
ghcr.io/home-assistant/${{ matrix.arch }}-hassio-supervisor:${{ env.FROZEN_VERSION }}
- name: Sign image with Cosign
run: |
cosign sign --yes ghcr.io/home-assistant/${{ matrix.arch }}-hassio-supervisor:${{ needs.init.outputs.version }}

View File

@@ -26,10 +26,10 @@ jobs:
name: Prepare Python dependencies
steps:
- name: Check out code from GitHub
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python
id: python
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: ${{ env.DEFAULT_PYTHON }}
- name: Restore Python virtual environment
@@ -68,9 +68,9 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -111,9 +111,9 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -154,7 +154,7 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Register hadolint problem matcher
run: |
echo "::add-matcher::.github/workflows/matchers/hadolint.json"
@@ -169,9 +169,9 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -213,9 +213,9 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -257,9 +257,9 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -293,9 +293,9 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -339,9 +339,9 @@ jobs:
name: Run tests Python ${{ needs.prepare.outputs.python-version }}
steps:
- name: Check out code from GitHub
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -398,9 +398,9 @@ jobs:
needs: ["pytest", "prepare"]
steps:
- name: Check out code from GitHub
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}

View File

@@ -11,7 +11,7 @@ jobs:
name: Release Drafter
steps:
- name: Checkout the repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code from GitHub
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Sentry Release
uses: getsentry/action-release@128c5058bbbe93c8e02147fe0a9c713f166259a6 # v3.4.0
env:

View File

@@ -14,7 +14,7 @@ jobs:
latest_version: ${{ steps.latest_frontend_version.outputs.latest_tag }}
steps:
- name: Checkout code
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Get latest frontend release
id: latest_frontend_version
uses: abatilo/release-info-action@32cb932219f1cee3fc4f4a298fd65ead5d35b661 # v1.3.3
@@ -49,7 +49,7 @@ jobs:
if: needs.check-version.outputs.skip != 'true'
steps:
- name: Checkout code
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Clear www folder
run: |
rm -rf supervisor/api/panel/*
@@ -68,7 +68,7 @@ jobs:
run: |
rm -f supervisor/api/panel/home_assistant_frontend_supervisor-*.tar.gz
- name: Create PR
uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
commit-message: "Update frontend to version ${{ needs.check-version.outputs.latest_version }}"
branch: autoupdate-frontend

View File

@@ -1,7 +1,13 @@
image: ghcr.io/home-assistant/{arch}-hassio-supervisor
build_from:
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.13-alpine3.22-2025.11.1
amd64: ghcr.io/home-assistant/amd64-base-python:3.13-alpine3.22-2025.11.1
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.13-alpine3.22
armhf: ghcr.io/home-assistant/armhf-base-python:3.13-alpine3.22
armv7: ghcr.io/home-assistant/armv7-base-python:3.13-alpine3.22
amd64: ghcr.io/home-assistant/amd64-base-python:3.13-alpine3.22
i386: ghcr.io/home-assistant/i386-base-python:3.13-alpine3.22
codenotary:
signer: notary@home-assistant.io
base_image: notary@home-assistant.io
cosign:
base_identity: https://github.com/home-assistant/docker-base/.*
identity: https://github.com/home-assistant/supervisor/.*

View File

@@ -1,12 +1,10 @@
aiodns==3.5.0
aiodocker==0.24.0
aiohttp==3.13.2
atomicwrites-homeassistant==1.4.1
attrs==25.4.0
awesomeversion==25.8.0
backports.zstd==1.1.0
blockbuster==1.5.25
brotli==1.2.0
brotli==1.1.0
ciso8601==2.3.3
colorlog==6.10.1
cpe==1.3.1
@@ -25,8 +23,8 @@ pyudev==0.24.4
PyYAML==6.0.3
requests==2.32.5
securetar==2025.2.1
sentry-sdk==2.46.0
sentry-sdk==2.43.0
setuptools==80.9.0
voluptuous==0.15.2
dbus-fast==3.1.2
dbus-fast==2.44.5
zlib-fast==0.2.1

View File

@@ -1,16 +1,16 @@
astroid==4.0.2
coverage==7.12.0
astroid==4.0.1
coverage==7.11.0
mypy==1.18.2
pre-commit==4.5.0
pylint==4.0.3
pre-commit==4.3.0
pylint==4.0.2
pytest-aiohttp==1.1.0
pytest-asyncio==1.3.0
pytest-asyncio==0.25.2
pytest-cov==7.0.0
pytest-timeout==2.4.0
pytest==9.0.1
ruff==0.14.6
time-machine==3.1.0
types-docker==7.1.0.20251127
pytest==8.4.2
ruff==0.14.3
time-machine==2.19.0
types-docker==7.1.0.20251009
types-pyyaml==6.0.12.20250915
types-requests==2.32.4.20250913
urllib3==2.5.0

View File

@@ -2,9 +2,7 @@
from __future__ import annotations
import base64
from functools import cached_property
import json
from pathlib import Path
from typing import TYPE_CHECKING, Any
@@ -14,15 +12,12 @@ from ..const import (
ATTR_ARGS,
ATTR_BUILD_FROM,
ATTR_LABELS,
ATTR_PASSWORD,
ATTR_SQUASH,
ATTR_USERNAME,
FILE_SUFFIX_CONFIGURATION,
META_ADDON,
SOCKET_DOCKER,
)
from ..coresys import CoreSys, CoreSysAttributes
from ..docker.const import DOCKER_HUB
from ..docker.interface import MAP_ARCH
from ..exceptions import ConfigurationFileError, HassioArchNotFound
from ..utils.common import FileConfiguration, find_one_filetype
@@ -127,43 +122,8 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
except HassioArchNotFound:
return False
def get_docker_config_json(self) -> str | None:
"""Generate Docker config.json content with registry credentials for base image.
Returns a JSON string with registry credentials for the base image's registry,
or None if no matching registry is configured.
Raises:
HassioArchNotFound: If the add-on is not supported on the current architecture.
"""
# Early return before accessing base_image to avoid unnecessary arch lookup
if not self.sys_docker.config.registries:
return None
registry = self.sys_docker.config.get_registry_for_image(self.base_image)
if not registry:
return None
stored = self.sys_docker.config.registries[registry]
username = stored[ATTR_USERNAME]
password = stored[ATTR_PASSWORD]
# Docker config.json uses base64-encoded "username:password" for auth
auth_string = base64.b64encode(f"{username}:{password}".encode()).decode()
# Use the actual registry URL for the key
# Docker Hub uses "https://index.docker.io/v1/" as the key
registry_key = (
"https://index.docker.io/v1/" if registry == DOCKER_HUB else registry
)
config = {"auths": {registry_key: {"auth": auth_string}}}
return json.dumps(config)
def get_docker_args(
self, version: AwesomeVersion, image_tag: str, docker_config_path: Path | None
self, version: AwesomeVersion, image_tag: str
) -> dict[str, Any]:
"""Create a dict with Docker run args."""
dockerfile_path = self.get_dockerfile().relative_to(self.addon.path_location)
@@ -212,24 +172,12 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
self.addon.path_location
)
volumes = {
SOCKET_DOCKER: {"bind": "/var/run/docker.sock", "mode": "rw"},
addon_extern_path: {"bind": "/addon", "mode": "ro"},
}
# Mount Docker config with registry credentials if available
if docker_config_path:
docker_config_extern_path = self.sys_config.local_to_extern_path(
docker_config_path
)
volumes[docker_config_extern_path] = {
"bind": "/root/.docker/config.json",
"mode": "ro",
}
return {
"command": build_cmd,
"volumes": volumes,
"volumes": {
SOCKET_DOCKER: {"bind": "/var/run/docker.sock", "mode": "rw"},
addon_extern_path: {"bind": "/addon", "mode": "ro"},
},
"working_dir": "/addon",
}

View File

@@ -152,7 +152,6 @@ class RestAPI(CoreSysAttributes):
self._api_host.advanced_logs,
identifier=syslog_identifier,
latest=True,
no_colors=True,
),
),
web.get(
@@ -450,7 +449,6 @@ class RestAPI(CoreSysAttributes):
await async_capture_exception(err)
kwargs.pop("follow", None) # Follow is not supported for Docker logs
kwargs.pop("latest", None) # Latest is not supported for Docker logs
kwargs.pop("no_colors", None) # no_colors not supported for Docker logs
return await api_supervisor.logs(*args, **kwargs)
self.webapp.add_routes(
@@ -462,7 +460,7 @@ class RestAPI(CoreSysAttributes):
),
web.get(
"/supervisor/logs/latest",
partial(get_supervisor_logs, latest=True, no_colors=True),
partial(get_supervisor_logs, latest=True),
),
web.get("/supervisor/logs/boots/{bootid}", get_supervisor_logs),
web.get(
@@ -578,7 +576,7 @@ class RestAPI(CoreSysAttributes):
),
web.get(
"/addons/{addon}/logs/latest",
partial(get_addon_logs, latest=True, no_colors=True),
partial(get_addon_logs, latest=True),
),
web.get("/addons/{addon}/logs/boots/{bootid}", get_addon_logs),
web.get(
@@ -813,10 +811,6 @@ class RestAPI(CoreSysAttributes):
self.webapp.add_routes(
[
web.get("/docker/info", api_docker.info),
web.post(
"/docker/migrate-storage-driver",
api_docker.migrate_docker_storage_driver,
),
web.post("/docker/options", api_docker.options),
web.get("/docker/registries", api_docker.registries),
web.post("/docker/registries", api_docker.create_registry),

View File

@@ -4,7 +4,6 @@ import logging
from typing import Any
from aiohttp import web
from awesomeversion import AwesomeVersion
import voluptuous as vol
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
@@ -17,7 +16,6 @@ from ..const import (
ATTR_PASSWORD,
ATTR_REGISTRIES,
ATTR_STORAGE,
ATTR_STORAGE_DRIVER,
ATTR_USERNAME,
ATTR_VERSION,
)
@@ -44,12 +42,6 @@ SCHEMA_OPTIONS = vol.Schema(
}
)
SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER = vol.Schema(
{
vol.Required(ATTR_STORAGE_DRIVER): vol.In(["overlayfs", "overlay2"]),
}
)
class APIDocker(CoreSysAttributes):
"""Handle RESTful API for Docker configuration."""
@@ -131,27 +123,3 @@ class APIDocker(CoreSysAttributes):
del self.sys_docker.config.registries[hostname]
await self.sys_docker.config.save_data()
@api_process
async def migrate_docker_storage_driver(self, request: web.Request) -> None:
"""Migrate Docker storage driver."""
if (
not self.coresys.os.available
or not self.coresys.os.version
or self.coresys.os.version < AwesomeVersion("17.0.dev0")
):
raise APINotFound(
"Home Assistant OS 17.0 or newer required for Docker storage driver migration"
)
body = await api_validate(SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER, request)
await self.sys_dbus.agent.system.migrate_docker_storage_driver(
body[ATTR_STORAGE_DRIVER]
)
_LOGGER.info("Host system reboot required to apply Docker storage migration")
self.sys_resolution.create_issue(
IssueType.REBOOT_REQUIRED,
ContextType.SYSTEM,
suggestions=[SuggestionType.EXECUTE_REBOOT],
)

View File

@@ -206,7 +206,6 @@ class APIHost(CoreSysAttributes):
identifier: str | None = None,
follow: bool = False,
latest: bool = False,
no_colors: bool = False,
) -> web.StreamResponse:
"""Return systemd-journald logs."""
log_formatter = LogFormatter.PLAIN
@@ -252,9 +251,6 @@ class APIHost(CoreSysAttributes):
if "verbose" in request.query or request.headers[ACCEPT] == CONTENT_TYPE_X_LOG:
log_formatter = LogFormatter.VERBOSE
if "no_colors" in request.query:
no_colors = True
if "lines" in request.query:
lines = request.query.get("lines", DEFAULT_LINES)
try:
@@ -284,9 +280,7 @@ class APIHost(CoreSysAttributes):
response = web.StreamResponse()
response.content_type = CONTENT_TYPE_TEXT
headers_returned = False
async for cursor, line in journal_logs_reader(
resp, log_formatter, no_colors
):
async for cursor, line in journal_logs_reader(resp, log_formatter):
try:
if not headers_returned:
if cursor:
@@ -324,12 +318,9 @@ class APIHost(CoreSysAttributes):
identifier: str | None = None,
follow: bool = False,
latest: bool = False,
no_colors: bool = False,
) -> web.StreamResponse:
"""Return systemd-journald logs. Wrapped as standard API handler."""
return await self.advanced_logs_handler(
request, identifier, follow, latest, no_colors
)
return await self.advanced_logs_handler(request, identifier, follow, latest)
@api_process
async def disk_usage(self, request: web.Request) -> dict:
@@ -343,14 +334,10 @@ class APIHost(CoreSysAttributes):
disk = self.sys_hardware.disk
total, _, free = await self.sys_run_in_executor(
total, used, _ = await self.sys_run_in_executor(
disk.disk_usage, self.sys_config.path_supervisor
)
# Calculate used by subtracting free makes sure we include reserved space
# in used space reporting.
used = total - free
known_paths = await self.sys_run_in_executor(
disk.get_dir_sizes,
{

View File

@@ -253,28 +253,18 @@ class APIIngress(CoreSysAttributes):
skip_auto_headers={hdrs.CONTENT_TYPE},
) as result:
headers = _response_header(result)
# Avoid parsing content_type in simple cases for better performance
if maybe_content_type := result.headers.get(hdrs.CONTENT_TYPE):
content_type = (maybe_content_type.partition(";"))[0].strip()
else:
content_type = result.content_type
# Empty body responses (304, 204, HEAD, etc.) should not be streamed,
# otherwise aiohttp < 3.9.0 may generate an invalid "0\r\n\r\n" chunk
# This also avoids setting content_type for empty responses.
if must_be_empty_body(request.method, result.status):
# If upstream contains content-type, preserve it (e.g. for HEAD requests)
if maybe_content_type:
headers[hdrs.CONTENT_TYPE] = content_type
return web.Response(
headers=headers,
status=result.status,
)
# Simple request
if (
hdrs.CONTENT_LENGTH in result.headers
# empty body responses should not be streamed,
# otherwise aiohttp < 3.9.0 may generate
# an invalid "0\r\n\r\n" chunk instead of an empty response.
must_be_empty_body(request.method, result.status)
or hdrs.CONTENT_LENGTH in result.headers
and int(result.headers.get(hdrs.CONTENT_LENGTH, 0)) < 4_194_000
):
# Return Response

View File

@@ -63,10 +63,12 @@ def json_loads(data: Any) -> dict[str, Any]:
def api_process(method):
"""Wrap function with true/false calls to rest api."""
async def wrap_api(*args, **kwargs) -> web.Response | web.StreamResponse:
async def wrap_api(
api: CoreSysAttributes, *args, **kwargs
) -> web.Response | web.StreamResponse:
"""Return API information."""
try:
answer = await method(*args, **kwargs)
answer = await method(api, *args, **kwargs)
except BackupFileNotFoundError as err:
return api_return_error(err, status=404)
except APIError as err:
@@ -107,10 +109,12 @@ def api_process_raw(content, *, error_type=None):
def wrap_method(method):
"""Wrap function with raw output to rest api."""
async def wrap_api(*args, **kwargs) -> web.Response | web.StreamResponse:
async def wrap_api(
api: CoreSysAttributes, *args, **kwargs
) -> web.Response | web.StreamResponse:
"""Return api information."""
try:
msg_data = await method(*args, **kwargs)
msg_data = await method(api, *args, **kwargs)
except APIError as err:
return api_return_error(
err,

View File

@@ -2,7 +2,6 @@
from __future__ import annotations
from asyncio import Task
from collections.abc import Callable, Coroutine
import logging
from typing import Any
@@ -39,13 +38,11 @@ class Bus(CoreSysAttributes):
self._listeners.setdefault(event, []).append(listener)
return listener
def fire_event(self, event: BusEvent, reference: Any) -> list[Task]:
def fire_event(self, event: BusEvent, reference: Any) -> None:
"""Fire an event to the bus."""
_LOGGER.debug("Fire event '%s' with '%s'", event, reference)
tasks: list[Task] = []
for listener in self._listeners.get(event, []):
tasks.append(self.sys_create_task(listener.callback(reference)))
return tasks
self.sys_create_task(listener.callback(reference))
def remove_listener(self, listener: EventListener) -> None:
"""Unregister an listener."""

View File

@@ -328,7 +328,6 @@ ATTR_STATE = "state"
ATTR_STATIC = "static"
ATTR_STDIN = "stdin"
ATTR_STORAGE = "storage"
ATTR_STORAGE_DRIVER = "storage_driver"
ATTR_SUGGESTIONS = "suggestions"
ATTR_SUPERVISOR = "supervisor"
ATTR_SUPERVISOR_INTERNET = "supervisor_internet"

View File

@@ -9,7 +9,6 @@ from datetime import UTC, datetime, tzinfo
from functools import partial
import logging
import os
import time
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, Self, TypeVar
@@ -656,14 +655,8 @@ class CoreSys:
if kwargs:
funct = partial(funct, **kwargs)
# Convert datetime to event loop time base
# If datetime is in the past, delay will be negative and call_at will
# schedule the call as soon as possible.
delay = when.timestamp() - time.time()
loop_time = self.loop.time() + delay
return self.loop.call_at(
loop_time, funct, *args, context=self._create_context()
when.timestamp(), funct, *args, context=self._create_context()
)

View File

@@ -15,8 +15,3 @@ class System(DBusInterface):
async def schedule_wipe_device(self) -> bool:
"""Schedule a factory reset on next system boot."""
return await self.connected_dbus.System.call("schedule_wipe_device")
@dbus_connected
async def migrate_docker_storage_driver(self, backend: str) -> None:
"""Migrate Docker storage driver."""
await self.connected_dbus.System.call("migrate_docker_storage_driver", backend)

View File

@@ -306,8 +306,6 @@ class DeviceType(IntEnum):
VLAN = 11
TUN = 16
VETH = 20
WIREGUARD = 29
LOOPBACK = 32
class WirelessMethodType(IntEnum):

View File

@@ -115,7 +115,7 @@ class DBusManager(CoreSysAttributes):
async def load(self) -> None:
"""Connect interfaces to D-Bus."""
if not await self.sys_run_in_executor(SOCKET_DBUS.exists):
if not SOCKET_DBUS.exists():
_LOGGER.error(
"No D-Bus support on Host. Disabled any kind of host control!"
)

View File

@@ -134,10 +134,9 @@ class NetworkManager(DBusInterfaceProxy):
async def check_connectivity(self, *, force: bool = False) -> ConnectivityState:
"""Check the connectivity of the host."""
if force:
return ConnectivityState(
await self.connected_dbus.call("check_connectivity")
)
return ConnectivityState(await self.connected_dbus.get("connectivity"))
return await self.connected_dbus.call("check_connectivity")
else:
return await self.connected_dbus.get("connectivity")
async def connect(self, bus: MessageBus) -> None:
"""Connect to system's D-Bus."""

View File

@@ -69,7 +69,7 @@ class NetworkConnection(DBusInterfaceProxy):
@dbus_property
def state(self) -> ConnectionStateType:
"""Return the state of the connection."""
return ConnectionStateType(self.properties[DBUS_ATTR_STATE])
return self.properties[DBUS_ATTR_STATE]
@property
def state_flags(self) -> set[ConnectionStateFlags]:

View File

@@ -1,6 +1,5 @@
"""NetworkInterface object for Network Manager."""
import logging
from typing import Any
from dbus_fast.aio.message_bus import MessageBus
@@ -24,8 +23,6 @@ from .connection import NetworkConnection
from .setting import NetworkSetting
from .wireless import NetworkWireless
_LOGGER: logging.Logger = logging.getLogger(__name__)
class NetworkInterface(DBusInterfaceProxy):
"""NetworkInterface object represents Network Manager Device objects.
@@ -60,15 +57,7 @@ class NetworkInterface(DBusInterfaceProxy):
@dbus_property
def type(self) -> DeviceType:
"""Return interface type."""
try:
return DeviceType(self.properties[DBUS_ATTR_DEVICE_TYPE])
except ValueError:
_LOGGER.debug(
"Unknown device type %s for %s, treating as UNKNOWN",
self.properties[DBUS_ATTR_DEVICE_TYPE],
self.object_path,
)
return DeviceType.UNKNOWN
return self.properties[DBUS_ATTR_DEVICE_TYPE]
@property
@dbus_property

View File

@@ -75,7 +75,7 @@ class Resolved(DBusInterfaceProxy):
@dbus_property
def current_dns_server(
self,
) -> tuple[int, DNSAddressFamily, bytes] | None:
) -> list[tuple[int, DNSAddressFamily, bytes]] | None:
"""Return current DNS server."""
return self.properties[DBUS_ATTR_CURRENT_DNS_SERVER]
@@ -83,7 +83,7 @@ class Resolved(DBusInterfaceProxy):
@dbus_property
def current_dns_server_ex(
self,
) -> tuple[int, DNSAddressFamily, bytes, int, str] | None:
) -> list[tuple[int, DNSAddressFamily, bytes, int, str]] | None:
"""Return current DNS server including port and server name."""
return self.properties[DBUS_ATTR_CURRENT_DNS_SERVER_EX]

View File

@@ -70,7 +70,7 @@ class SystemdUnit(DBusInterface):
@dbus_connected
async def get_active_state(self) -> UnitActiveState:
"""Get active state of the unit."""
return UnitActiveState(await self.connected_dbus.Unit.get("active_state"))
return await self.connected_dbus.Unit.get("active_state")
@dbus_connected
def properties_changed(self) -> DBusSignalWrapper:

View File

@@ -9,7 +9,7 @@ from dbus_fast import Variant
from .const import EncryptType, EraseMode
def udisks2_bytes_to_path(path_bytes: bytes) -> Path:
def udisks2_bytes_to_path(path_bytes: bytearray) -> Path:
"""Convert bytes to path object without null character on end."""
if path_bytes and path_bytes[-1] == 0:
return Path(path_bytes[:-1].decode())
@@ -73,7 +73,7 @@ FormatOptionsDataType = TypedDict(
{
"label": NotRequired[str],
"take-ownership": NotRequired[bool],
"encrypt.passphrase": NotRequired[bytes],
"encrypt.passphrase": NotRequired[bytearray],
"encrypt.type": NotRequired[str],
"erase": NotRequired[str],
"update-partition-type": NotRequired[bool],

View File

@@ -7,10 +7,8 @@ from ipaddress import IPv4Address
import logging
import os
from pathlib import Path
import tempfile
from typing import TYPE_CHECKING, cast
import aiodocker
from attr import evolve
from awesomeversion import AwesomeVersion
import docker
@@ -706,38 +704,12 @@ class DockerAddon(DockerInterface):
with suppress(docker.errors.NotFound):
self.sys_docker.containers.get(builder_name).remove(force=True, v=True)
# Generate Docker config with registry credentials for base image if needed
docker_config_path: Path | None = None
docker_config_content = build_env.get_docker_config_json()
temp_dir: tempfile.TemporaryDirectory | None = None
try:
if docker_config_content:
# Create temporary directory for docker config
temp_dir = tempfile.TemporaryDirectory(
prefix="hassio_build_", dir=self.sys_config.path_tmp
)
docker_config_path = Path(temp_dir.name) / "config.json"
docker_config_path.write_text(
docker_config_content, encoding="utf-8"
)
_LOGGER.debug(
"Created temporary Docker config for build at %s",
docker_config_path,
)
result = self.sys_docker.run_command(
ADDON_BUILDER_IMAGE,
version=builder_version_tag,
name=builder_name,
**build_env.get_docker_args(
version, addon_image_tag, docker_config_path
),
)
finally:
# Clean up temporary directory
if temp_dir:
temp_dir.cleanup()
result = self.sys_docker.run_command(
ADDON_BUILDER_IMAGE,
version=builder_version_tag,
name=builder_name,
**build_env.get_docker_args(version, addon_image_tag),
)
logs = result.output.decode("utf-8")
@@ -745,21 +717,19 @@ class DockerAddon(DockerInterface):
error_message = f"Docker build failed for {addon_image_tag} (exit code {result.exit_code}). Build output:\n{logs}"
raise docker.errors.DockerException(error_message)
return addon_image_tag, logs
addon_image = self.sys_docker.images.get(addon_image_tag)
return addon_image, logs
try:
addon_image_tag, log = await self.sys_run_in_executor(build_image)
docker_image, log = await self.sys_run_in_executor(build_image)
_LOGGER.debug("Build %s:%s done: %s", self.image, version, log)
# Update meta data
self._meta = await self.sys_docker.images.inspect(addon_image_tag)
self._meta = docker_image.attrs
except (
docker.errors.DockerException,
requests.RequestException,
aiodocker.DockerError,
) as err:
except (docker.errors.DockerException, requests.RequestException) as err:
_LOGGER.error("Can't build %s:%s: %s", self.image, version, err)
raise DockerError() from err
@@ -781,8 +751,11 @@ class DockerAddon(DockerInterface):
)
async def import_image(self, tar_file: Path) -> None:
"""Import a tar file as image."""
if docker_image := await self.sys_docker.import_image(tar_file):
self._meta = docker_image
docker_image = await self.sys_run_in_executor(
self.sys_docker.import_image, tar_file
)
if docker_image:
self._meta = docker_image.attrs
_LOGGER.info("Importing image %s and version %s", tar_file, self.version)
with suppress(DockerError):
@@ -796,21 +769,17 @@ class DockerAddon(DockerInterface):
version: AwesomeVersion | None = None,
) -> None:
"""Check if old version exists and cleanup other versions of image not in use."""
if not (use_image := image or self.image):
raise DockerError("Cannot determine image from metadata!", _LOGGER.error)
if not (use_version := version or self.version):
raise DockerError("Cannot determine version from metadata!", _LOGGER.error)
await self.sys_docker.cleanup_old_images(
use_image,
use_version,
await self.sys_run_in_executor(
self.sys_docker.cleanup_old_images,
(image := image or self.image),
version or self.version,
{old_image} if old_image else None,
keep_images={
f"{addon.image}:{addon.version}"
for addon in self.sys_addons.installed
if addon.slug != self.addon.slug
and addon.image
and addon.image in {old_image, use_image}
and addon.image in {old_image, image}
},
)

View File

@@ -15,12 +15,6 @@ from ..const import MACHINE_ID
RE_RETRYING_DOWNLOAD_STATUS = re.compile(r"Retrying in \d+ seconds?")
# Docker Hub registry identifier
DOCKER_HUB = "hub.docker.com"
# Regex to match images with a registry host (e.g., ghcr.io/org/image)
IMAGE_WITH_HOST = re.compile(r"^((?:[a-z0-9]+(?:-[a-z0-9]+)*\.)+[a-z]{2,})\/.+")
class Capabilities(StrEnum):
"""Linux Capabilities."""

View File

@@ -1,5 +1,6 @@
"""Init file for Supervisor Docker object."""
from collections.abc import Awaitable
from ipaddress import IPv4Address
import logging
import re
@@ -235,10 +236,11 @@ class DockerHomeAssistant(DockerInterface):
environment={ENV_TIME: self.sys_timezone},
)
async def is_initialize(self) -> bool:
def is_initialize(self) -> Awaitable[bool]:
"""Return True if Docker container exists."""
if not self.sys_homeassistant.version:
return False
return await self.sys_docker.container_is_initialized(
self.name, self.image, self.sys_homeassistant.version
return self.sys_run_in_executor(
self.sys_docker.container_is_initialized,
self.name,
self.image,
self.sys_homeassistant.version,
)

View File

@@ -6,17 +6,17 @@ from abc import ABC, abstractmethod
from collections import defaultdict
from collections.abc import Awaitable
from contextlib import suppress
from http import HTTPStatus
import logging
import re
from time import time
from typing import Any, cast
from uuid import uuid4
import aiodocker
from awesomeversion import AwesomeVersion
from awesomeversion.strategy import AwesomeVersionStrategy
import docker
from docker.models.containers import Container
from docker.models.images import Image
import requests
from ..bus import EventListener
@@ -33,7 +33,6 @@ from ..coresys import CoreSys
from ..exceptions import (
DockerAPIError,
DockerError,
DockerHubRateLimitExceeded,
DockerJobError,
DockerLogOutOfOrder,
DockerNotFound,
@@ -45,13 +44,16 @@ from ..jobs.decorator import Job
from ..jobs.job_group import JobGroup
from ..resolution.const import ContextType, IssueType, SuggestionType
from ..utils.sentry import async_capture_exception
from .const import DOCKER_HUB, ContainerState, PullImageLayerStage, RestartPolicy
from .const import ContainerState, PullImageLayerStage, RestartPolicy
from .manager import CommandReturn, PullLogEntry
from .monitor import DockerContainerStateEvent
from .stats import DockerStats
_LOGGER: logging.Logger = logging.getLogger(__name__)
IMAGE_WITH_HOST = re.compile(r"^((?:[a-z0-9]+(?:-[a-z0-9]+)*\.)+[a-z]{2,})\/.+")
DOCKER_HUB = "hub.docker.com"
MAP_ARCH: dict[CpuArch | str, str] = {
CpuArch.ARMV7: "linux/arm/v7",
CpuArch.ARMHF: "linux/arm/v6",
@@ -176,16 +178,25 @@ class DockerInterface(JobGroup, ABC):
return self.meta_config.get("Healthcheck")
def _get_credentials(self, image: str) -> dict:
"""Return a dictionary with credentials for docker login."""
"""Return a dictionay with credentials for docker login."""
registry = None
credentials = {}
registry = self.sys_docker.config.get_registry_for_image(image)
matcher = IMAGE_WITH_HOST.match(image)
# Custom registry
if matcher:
if matcher.group(1) in self.sys_docker.config.registries:
registry = matcher.group(1)
credentials[ATTR_REGISTRY] = registry
# If no match assume "dockerhub" as registry
elif DOCKER_HUB in self.sys_docker.config.registries:
registry = DOCKER_HUB
if registry:
stored = self.sys_docker.config.registries[registry]
credentials[ATTR_USERNAME] = stored[ATTR_USERNAME]
credentials[ATTR_PASSWORD] = stored[ATTR_PASSWORD]
if registry != DOCKER_HUB:
credentials[ATTR_REGISTRY] = registry
_LOGGER.debug(
"Logging in to %s as %s",
@@ -195,6 +206,17 @@ class DockerInterface(JobGroup, ABC):
return credentials
async def _docker_login(self, image: str) -> None:
"""Try to log in to the registry if there are credentials available."""
if not self.sys_docker.config.registries:
return
credentials = self._get_credentials(image)
if not credentials:
return
await self.sys_run_in_executor(self.sys_docker.docker.login, **credentials)
def _process_pull_image_log( # noqa: C901
self, install_job_id: str, reference: PullLogEntry
) -> None:
@@ -226,16 +248,28 @@ class DockerInterface(JobGroup, ABC):
job = j
break
# There should no longer be any real risk of logs out of order anymore.
# However tests with very small images have shown that sometimes Docker
# skips stages in log. So keeping this one as a safety check on null job
# This likely only occurs if the logs came in out of sync and we got progress before the Pulling FS Layer one
if not job:
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {install_job_id} but could not find a matching job, skipping",
_LOGGER.debug,
)
# For progress calculation we assume downloading is 70% of time, extracting is 30% and others stages negligible
# Hopefully these come in order but if they sometimes get out of sync, avoid accidentally going backwards
# If it happens a lot though we may need to reconsider the value of this feature
if job.done:
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for job {job.uuid} but job was done, skipping",
_LOGGER.debug,
)
if job.stage and stage < PullImageLayerStage.from_status(job.stage):
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for job {job.uuid} but job was already on stage {job.stage}, skipping",
_LOGGER.debug,
)
# For progress calcuation we assume downloading and extracting are each 50% of the time and others stages negligible
progress = job.progress
match stage:
case PullImageLayerStage.DOWNLOADING | PullImageLayerStage.EXTRACTING:
@@ -244,26 +278,22 @@ class DockerInterface(JobGroup, ABC):
and reference.progress_detail.current
and reference.progress_detail.total
):
progress = (
progress = 50 * (
reference.progress_detail.current
/ reference.progress_detail.total
)
if stage == PullImageLayerStage.DOWNLOADING:
progress = 70 * progress
else:
progress = 70 + 30 * progress
if stage == PullImageLayerStage.EXTRACTING:
progress += 50
case (
PullImageLayerStage.VERIFYING_CHECKSUM
| PullImageLayerStage.DOWNLOAD_COMPLETE
):
progress = 70
progress = 50
case PullImageLayerStage.PULL_COMPLETE:
progress = 100
case PullImageLayerStage.RETRYING_DOWNLOAD:
progress = 0
# No real risk of getting things out of order in current implementation
# but keeping this one in case another change to these trips us up.
if stage != PullImageLayerStage.RETRYING_DOWNLOAD and progress < job.progress:
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for job {job.uuid} that implied progress was {progress} but current progress is {job.progress}, skipping",
@@ -278,8 +308,6 @@ class DockerInterface(JobGroup, ABC):
if (
stage in {PullImageLayerStage.DOWNLOADING, PullImageLayerStage.EXTRACTING}
and reference.progress_detail
and reference.progress_detail.current is not None
and reference.progress_detail.total is not None
):
job.update(
progress=progress,
@@ -312,44 +340,41 @@ class DockerInterface(JobGroup, ABC):
and job.name == "Pulling container image layer"
]
# Calculate total from layers that have reported size info
# With containerd snapshotter, some layers skip "Downloading" and go directly to
# "Download complete", so we can't wait for all layers to have extra before reporting progress
layers_with_extra = [
job for job in layer_jobs if job.extra and job.extra.get("total")
]
if not layers_with_extra:
return
# First set the total bytes to be downloaded/extracted on the main job
# Note: With containerd snapshotter, total may be None (time-based progress instead of byte-based)
if not install_job.extra:
total = 0
has_byte_progress = True
for job in layer_jobs:
if not job.extra:
return
# If any layer has None for total, we can't do byte-weighted aggregation
if job.extra["total"] is None:
has_byte_progress = False
break
total += job.extra["total"]
# Sum up total bytes. Layers that skip downloading get placeholder extra={1,1}
# which doesn't represent actual size. Separate "real" layers from placeholders.
# Filter guarantees job.extra is not None and has "total" key
real_layers = [
job for job in layers_with_extra if cast(dict, job.extra)["total"] > 1
]
placeholder_layers = [
job for job in layers_with_extra if cast(dict, job.extra)["total"] == 1
]
# Store whether we have byte-based progress for later use
install_job.extra = {"total": total if has_byte_progress else None}
else:
total = install_job.extra["total"]
has_byte_progress = total is not None
# If we only have placeholder layers (no real size info yet), don't report progress
# This prevents tiny cached layers from showing inflated progress before
# the actual download sizes are known
if not real_layers:
return
total = sum(cast(dict, job.extra)["total"] for job in real_layers)
if total == 0:
return
# Update install_job.extra with current total (may increase as more layers report)
install_job.extra = {"total": total}
# Calculate progress based on layers that have real size info
# Placeholder layers (skipped downloads) count as complete but don't affect weighted progress
# Then determine total progress based on progress of each sub-job
# If we have byte counts, weight by size. Otherwise, simple average.
progress = 0.0
stage = PullImageLayerStage.PULL_COMPLETE
for job in real_layers:
progress += job.progress * (cast(dict, job.extra)["total"] / total)
for job in layer_jobs:
if not job.extra:
return
if has_byte_progress:
# Byte-weighted progress (classic Docker behavior)
progress += job.progress * (job.extra["total"] / total)
else:
# Simple average progress (containerd snapshotter with time-based progress)
progress += job.progress / len(layer_jobs)
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
if job_stage < PullImageLayerStage.EXTRACTING:
@@ -360,28 +385,6 @@ class DockerInterface(JobGroup, ABC):
):
stage = PullImageLayerStage.EXTRACTING
# Check if any layers are still pending (no extra yet)
# If so, we're still in downloading phase even if all layers_with_extra are done
layers_pending = len(layer_jobs) - len(layers_with_extra)
if layers_pending > 0:
# Scale progress to account for unreported layers
# This prevents tiny layers that complete first from showing inflated progress
# e.g., if 2/25 layers reported at 70%, actual progress is ~70 * 2/25 = 5.6%
layers_fraction = len(layers_with_extra) / len(layer_jobs)
progress = progress * layers_fraction
if stage == PullImageLayerStage.PULL_COMPLETE:
stage = PullImageLayerStage.DOWNLOADING
# Also check if all placeholders are done but we're waiting for real layers
if placeholder_layers and stage == PullImageLayerStage.PULL_COMPLETE:
# All real layers are done, but check if placeholders are still extracting
for job in placeholder_layers:
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
if job_stage < PullImageLayerStage.PULL_COMPLETE:
stage = PullImageLayerStage.EXTRACTING
break
# Ensure progress is 100 at this point to prevent float drift
if stage == PullImageLayerStage.PULL_COMPLETE:
progress = 100
@@ -413,8 +416,9 @@ class DockerInterface(JobGroup, ABC):
_LOGGER.info("Downloading docker image %s with tag %s.", image, version)
try:
# Get credentials for private registries to pass to aiodocker
credentials = self._get_credentials(image) or None
if self.sys_docker.config.registries:
# Try login if we have defined credentials
await self._docker_login(image)
curr_job_id = self.sys_jobs.current.uuid
@@ -430,13 +434,13 @@ class DockerInterface(JobGroup, ABC):
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_image_log
)
# Pull new image, passing credentials to aiodocker
docker_image = await self.sys_docker.pull_image(
# Pull new image
docker_image = await self.sys_run_in_executor(
self.sys_docker.pull_image,
self.sys_jobs.current.uuid,
image,
str(version),
platform=MAP_ARCH[image_arch],
auth=credentials,
)
# Tag latest
@@ -444,37 +448,22 @@ class DockerInterface(JobGroup, ABC):
_LOGGER.info(
"Tagging image %s with version %s as latest", image, version
)
await self.sys_docker.images.tag(
docker_image["Id"], image, tag="latest"
)
await self.sys_run_in_executor(docker_image.tag, image, tag="latest")
except docker.errors.APIError as err:
if err.status_code == HTTPStatus.TOO_MANY_REQUESTS:
if err.status_code == 429:
self.sys_resolution.create_issue(
IssueType.DOCKER_RATELIMIT,
ContextType.SYSTEM,
suggestions=[SuggestionType.REGISTRY_LOGIN],
)
raise DockerHubRateLimitExceeded(_LOGGER.error) from err
await async_capture_exception(err)
raise DockerError(
f"Can't install {image}:{version!s}: {err}", _LOGGER.error
) from err
except aiodocker.DockerError as err:
if err.status == HTTPStatus.TOO_MANY_REQUESTS:
self.sys_resolution.create_issue(
IssueType.DOCKER_RATELIMIT,
ContextType.SYSTEM,
suggestions=[SuggestionType.REGISTRY_LOGIN],
_LOGGER.info(
"Your IP address has made too many requests to Docker Hub which activated a rate limit. "
"For more details see https://www.home-assistant.io/more-info/dockerhub-rate-limit"
)
raise DockerHubRateLimitExceeded(_LOGGER.error) from err
await async_capture_exception(err)
raise DockerError(
f"Can't install {image}:{version!s}: {err}", _LOGGER.error
) from err
except (
docker.errors.DockerException,
requests.RequestException,
) as err:
except (docker.errors.DockerException, requests.RequestException) as err:
await async_capture_exception(err)
raise DockerError(
f"Unknown error with {image}:{version!s} -> {err!s}", _LOGGER.error
@@ -483,12 +472,14 @@ class DockerInterface(JobGroup, ABC):
if listener:
self.sys_bus.remove_listener(listener)
self._meta = docker_image
self._meta = docker_image.attrs
async def exists(self) -> bool:
"""Return True if Docker image exists in local repository."""
with suppress(aiodocker.DockerError, requests.RequestException):
await self.sys_docker.images.inspect(f"{self.image}:{self.version!s}")
with suppress(docker.errors.DockerException, requests.RequestException):
await self.sys_run_in_executor(
self.sys_docker.images.get, f"{self.image}:{self.version!s}"
)
return True
return False
@@ -547,11 +538,11 @@ class DockerInterface(JobGroup, ABC):
),
)
with suppress(aiodocker.DockerError, requests.RequestException):
with suppress(docker.errors.DockerException, requests.RequestException):
if not self._meta and self.image:
self._meta = await self.sys_docker.images.inspect(
self._meta = self.sys_docker.images.get(
f"{self.image}:{version!s}"
)
).attrs
# Successful?
if not self._meta:
@@ -619,17 +610,14 @@ class DockerInterface(JobGroup, ABC):
)
async def remove(self, *, remove_image: bool = True) -> None:
"""Remove Docker images."""
if not self.image or not self.version:
raise DockerError(
"Cannot determine image and/or version from metadata!", _LOGGER.error
)
# Cleanup container
with suppress(DockerError):
await self.stop()
if remove_image:
await self.sys_docker.remove_image(self.image, self.version)
await self.sys_run_in_executor(
self.sys_docker.remove_image, self.image, self.version
)
self._meta = None
@@ -651,16 +639,18 @@ class DockerInterface(JobGroup, ABC):
image_name = f"{expected_image}:{version!s}"
if self.image == expected_image:
try:
image = await self.sys_docker.images.inspect(image_name)
except (aiodocker.DockerError, requests.RequestException) as err:
image: Image = await self.sys_run_in_executor(
self.sys_docker.images.get, image_name
)
except (docker.errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get {image_name} for check due to: {err!s}",
_LOGGER.error,
) from err
image_arch = f"{image['Os']}/{image['Architecture']}"
if "Variant" in image:
image_arch = f"{image_arch}/{image['Variant']}"
image_arch = f"{image.attrs['Os']}/{image.attrs['Architecture']}"
if "Variant" in image.attrs:
image_arch = f"{image_arch}/{image.attrs['Variant']}"
# If we have an image and its the right arch, all set
# It seems that newer Docker version return a variant for arm64 images.
@@ -722,13 +712,11 @@ class DockerInterface(JobGroup, ABC):
version: AwesomeVersion | None = None,
) -> None:
"""Check if old version exists and cleanup."""
if not (use_image := image or self.image):
raise DockerError("Cannot determine image from metadata!", _LOGGER.error)
if not (use_version := version or self.version):
raise DockerError("Cannot determine version from metadata!", _LOGGER.error)
await self.sys_docker.cleanup_old_images(
use_image, use_version, {old_image} if old_image else None
await self.sys_run_in_executor(
self.sys_docker.cleanup_old_images,
image or self.image,
version or self.version,
{old_image} if old_image else None,
)
@Job(
@@ -780,10 +768,10 @@ class DockerInterface(JobGroup, ABC):
"""Return latest version of local image."""
available_version: list[AwesomeVersion] = []
try:
for image in await self.sys_docker.images.list(
filters=f'{{"reference": ["{self.image}"]}}'
for image in await self.sys_run_in_executor(
self.sys_docker.images.list, self.image
):
for tag in image["RepoTags"]:
for tag in image.tags:
version = AwesomeVersion(tag.partition(":")[2])
if version.strategy == AwesomeVersionStrategy.UNKNOWN:
continue
@@ -792,7 +780,7 @@ class DockerInterface(JobGroup, ABC):
if not available_version:
raise ValueError()
except (aiodocker.DockerError, ValueError) as err:
except (docker.errors.DockerException, ValueError) as err:
raise DockerNotFound(
f"No version found for {self.image}", _LOGGER.info
) from err

View File

@@ -6,24 +6,20 @@ import asyncio
from contextlib import suppress
from dataclasses import dataclass
from functools import partial
from http import HTTPStatus
from ipaddress import IPv4Address
import json
import logging
import os
from pathlib import Path
import re
from typing import Any, Final, Self, cast
import aiodocker
from aiodocker.images import DockerImages
from aiohttp import ClientSession, ClientTimeout, UnixConnector
import attr
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
from docker import errors as docker_errors
from docker.api.client import APIClient
from docker.client import DockerClient
from docker.errors import DockerException, ImageNotFound, NotFound
from docker.models.containers import Container, ContainerCollection
from docker.models.images import Image, ImageCollection
from docker.models.networks import Network
from docker.types.daemon import CancellableStream
import requests
@@ -49,7 +45,7 @@ from ..exceptions import (
)
from ..utils.common import FileConfiguration
from ..validate import SCHEMA_DOCKER_CONFIG
from .const import DOCKER_HUB, IMAGE_WITH_HOST, LABEL_MANAGED
from .const import LABEL_MANAGED
from .monitor import DockerMonitor
from .network import DockerNetwork
@@ -57,7 +53,6 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
MIN_SUPPORTED_DOCKER: Final = AwesomeVersion("24.0.0")
DOCKER_NETWORK_HOST: Final = "host"
RE_IMPORT_IMAGE_STREAM = re.compile(r"(^Loaded image ID: |^Loaded image: )(.+)$")
@attr.s(frozen=True)
@@ -76,25 +71,15 @@ class DockerInfo:
storage: str = attr.ib()
logging: str = attr.ib()
cgroup: str = attr.ib()
support_cpu_realtime: bool = attr.ib()
@staticmethod
async def new(data: dict[str, Any]) -> DockerInfo:
def new(data: dict[str, Any]):
"""Create a object from docker info."""
# Check if CONFIG_RT_GROUP_SCHED is loaded (blocking I/O in executor)
cpu_rt_file_exists = await asyncio.get_running_loop().run_in_executor(
None, Path("/sys/fs/cgroup/cpu/cpu.rt_runtime_us").exists
)
cpu_rt_supported = (
cpu_rt_file_exists and os.environ.get(ENV_SUPERVISOR_CPU_RT) == "1"
)
return DockerInfo(
AwesomeVersion(data.get("ServerVersion", "0.0.0")),
data.get("Driver", "unknown"),
data.get("LoggingDriver", "unknown"),
data.get("CgroupVersion", "1"),
cpu_rt_supported,
)
@property
@@ -105,21 +90,23 @@ class DockerInfo:
except AwesomeVersionCompareException:
return False
@property
def support_cpu_realtime(self) -> bool:
"""Return true, if CONFIG_RT_GROUP_SCHED is loaded."""
if not Path("/sys/fs/cgroup/cpu/cpu.rt_runtime_us").exists():
return False
return bool(os.environ.get(ENV_SUPERVISOR_CPU_RT) == "1")
@dataclass(frozen=True, slots=True)
class PullProgressDetail:
"""Progress detail information for pull.
Documentation lacking but both of these seem to be in bytes when populated.
Containerd-snapshot update - When leveraging this new feature, this information
becomes useless to us while extracting. It simply tells elapsed time using
current and units.
"""
current: int | None = None
total: int | None = None
units: str | None = None
@classmethod
def from_pull_log_dict(cls, value: dict[str, int]) -> PullProgressDetail:
@@ -207,27 +194,6 @@ class DockerConfig(FileConfiguration):
"""Return credentials for docker registries."""
return self._data.get(ATTR_REGISTRIES, {})
def get_registry_for_image(self, image: str) -> str | None:
"""Return the registry name if credentials are available for the image.
Matches the image against configured registries and returns the registry
name if found, or None if no matching credentials are configured.
"""
if not self.registries:
return None
# Check if image uses a custom registry (e.g., ghcr.io/org/image)
matcher = IMAGE_WITH_HOST.match(image)
if matcher:
registry = matcher.group(1)
if registry in self.registries:
return registry
# If no registry prefix, check for Docker Hub credentials
elif DOCKER_HUB in self.registries:
return DOCKER_HUB
return None
class DockerAPI(CoreSysAttributes):
"""Docker Supervisor wrapper.
@@ -238,15 +204,7 @@ class DockerAPI(CoreSysAttributes):
def __init__(self, coresys: CoreSys):
"""Initialize Docker base wrapper."""
self.coresys = coresys
# We keep both until we can fully refactor to aiodocker
self._dockerpy: DockerClient | None = None
self.docker: aiodocker.Docker = aiodocker.Docker(
url="unix://localhost", # dummy hostname for URL composition
connector=(connector := UnixConnector(SOCKET_DOCKER.as_posix())),
session=ClientSession(connector=connector, timeout=ClientTimeout(900)),
api_version="auto",
)
self._docker: DockerClient | None = None
self._network: DockerNetwork | None = None
self._info: DockerInfo | None = None
self.config: DockerConfig = DockerConfig()
@@ -254,28 +212,28 @@ class DockerAPI(CoreSysAttributes):
async def post_init(self) -> Self:
"""Post init actions that must be done in event loop."""
self._dockerpy = await asyncio.get_running_loop().run_in_executor(
self._docker = await asyncio.get_running_loop().run_in_executor(
None,
partial(
DockerClient,
base_url=f"unix:/{SOCKET_DOCKER.as_posix()}",
base_url=f"unix:/{str(SOCKET_DOCKER)}",
version="auto",
timeout=900,
),
)
self._info = await DockerInfo.new(self.dockerpy.info())
self._info = DockerInfo.new(self.docker.info())
await self.config.read_data()
self._network = await DockerNetwork(self.dockerpy).post_init(
self._network = await DockerNetwork(self.docker).post_init(
self.config.enable_ipv6, self.config.mtu
)
return self
@property
def dockerpy(self) -> DockerClient:
def docker(self) -> DockerClient:
"""Get docker API client."""
if not self._dockerpy:
if not self._docker:
raise RuntimeError("Docker API Client not initialized!")
return self._dockerpy
return self._docker
@property
def network(self) -> DockerNetwork:
@@ -285,19 +243,19 @@ class DockerAPI(CoreSysAttributes):
return self._network
@property
def images(self) -> DockerImages:
def images(self) -> ImageCollection:
"""Return API images."""
return self.docker.images
@property
def containers(self) -> ContainerCollection:
"""Return API containers."""
return self.dockerpy.containers
return self.docker.containers
@property
def api(self) -> APIClient:
"""Return API containers."""
return self.dockerpy.api
return self.docker.api
@property
def info(self) -> DockerInfo:
@@ -309,7 +267,7 @@ class DockerAPI(CoreSysAttributes):
@property
def events(self) -> CancellableStream:
"""Return docker event stream."""
return self.dockerpy.events(decode=True)
return self.docker.events(decode=True)
@property
def monitor(self) -> DockerMonitor:
@@ -425,7 +383,7 @@ class DockerAPI(CoreSysAttributes):
with suppress(DockerError):
self.network.detach_default_bridge(container)
else:
host_network: Network = self.dockerpy.networks.get(DOCKER_NETWORK_HOST)
host_network: Network = self.docker.networks.get(DOCKER_NETWORK_HOST)
# Check if container is register on host
# https://github.com/moby/moby/issues/23302
@@ -452,33 +410,35 @@ class DockerAPI(CoreSysAttributes):
return container
async def pull_image(
def pull_image(
self,
job_id: str,
repository: str,
tag: str = "latest",
platform: str | None = None,
auth: dict[str, str] | None = None,
) -> dict[str, Any]:
) -> Image:
"""Pull the specified image and return it.
This mimics the high level API of images.pull but provides better error handling by raising
based on a docker error on pull. Whereas the high level API ignores all errors on pull and
raises only if the get fails afterwards. Additionally it fires progress reports for the pull
on the bus so listeners can use that to update status for users.
Must be run in executor.
"""
async for e in self.images.pull(
repository, tag=tag, platform=platform, auth=auth, stream=True
):
pull_log = self.docker.api.pull(
repository, tag=tag, platform=platform, stream=True, decode=True
)
for e in pull_log:
entry = PullLogEntry.from_pull_log_dict(job_id, e)
if entry.error:
raise entry.exception
await asyncio.gather(
*self.sys_bus.fire_event(BusEvent.DOCKER_IMAGE_PULL_UPDATE, entry)
self.sys_loop.call_soon_threadsafe(
self.sys_bus.fire_event, BusEvent.DOCKER_IMAGE_PULL_UPDATE, entry
)
sep = "@" if tag.startswith("sha256:") else ":"
return await self.images.inspect(f"{repository}{sep}{tag}")
return self.images.get(f"{repository}{sep}{tag}")
def run_command(
self,
@@ -499,7 +459,7 @@ class DockerAPI(CoreSysAttributes):
_LOGGER.info("Runing command '%s' on %s", command, image_with_tag)
container = None
try:
container = self.dockerpy.containers.run(
container = self.docker.containers.run(
image_with_tag,
command=command,
detach=True,
@@ -527,35 +487,35 @@ class DockerAPI(CoreSysAttributes):
"""Repair local docker overlayfs2 issues."""
_LOGGER.info("Prune stale containers")
try:
output = self.dockerpy.api.prune_containers()
output = self.docker.api.prune_containers()
_LOGGER.debug("Containers prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for containers prune: %s", err)
_LOGGER.info("Prune stale images")
try:
output = self.dockerpy.api.prune_images(filters={"dangling": False})
output = self.docker.api.prune_images(filters={"dangling": False})
_LOGGER.debug("Images prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for images prune: %s", err)
_LOGGER.info("Prune stale builds")
try:
output = self.dockerpy.api.prune_builds()
output = self.docker.api.prune_builds()
_LOGGER.debug("Builds prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for builds prune: %s", err)
_LOGGER.info("Prune stale volumes")
try:
output = self.dockerpy.api.prune_volumes()
output = self.docker.api.prune_builds()
_LOGGER.debug("Volumes prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for volumes prune: %s", err)
_LOGGER.info("Prune stale networks")
try:
output = self.dockerpy.api.prune_networks()
output = self.docker.api.prune_networks()
_LOGGER.debug("Networks prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for networks prune: %s", err)
@@ -577,11 +537,11 @@ class DockerAPI(CoreSysAttributes):
Fix: https://github.com/moby/moby/issues/23302
"""
network: Network = self.dockerpy.networks.get(network_name)
network: Network = self.docker.networks.get(network_name)
for cid, data in network.attrs.get("Containers", {}).items():
try:
self.dockerpy.containers.get(cid)
self.docker.containers.get(cid)
continue
except docker_errors.NotFound:
_LOGGER.debug(
@@ -596,26 +556,22 @@ class DockerAPI(CoreSysAttributes):
with suppress(docker_errors.DockerException, requests.RequestException):
network.disconnect(data.get("Name", cid), force=True)
async def container_is_initialized(
def container_is_initialized(
self, name: str, image: str, version: AwesomeVersion
) -> bool:
"""Return True if docker container exists in good state and is built from expected image."""
try:
docker_container = await self.sys_run_in_executor(self.containers.get, name)
docker_image = await self.images.inspect(f"{image}:{version}")
except docker_errors.NotFound:
docker_container = self.containers.get(name)
docker_image = self.images.get(f"{image}:{version}")
except NotFound:
return False
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
return False
raise DockerError() from err
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
# Check the image is correct and state is good
return (
docker_container.image is not None
and docker_container.image.id == docker_image["Id"]
and docker_container.image.id == docker_image.id
and docker_container.status in ("exited", "running", "created")
)
@@ -625,18 +581,18 @@ class DockerAPI(CoreSysAttributes):
"""Stop/remove Docker container."""
try:
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
except NotFound:
raise DockerNotFound() from None
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
if docker_container.status == "running":
_LOGGER.info("Stopping %s application", name)
with suppress(docker_errors.DockerException, requests.RequestException):
with suppress(DockerException, requests.RequestException):
docker_container.stop(timeout=timeout)
if remove_container:
with suppress(docker_errors.DockerException, requests.RequestException):
with suppress(DockerException, requests.RequestException):
_LOGGER.info("Cleaning %s application", name)
docker_container.remove(force=True, v=True)
@@ -648,11 +604,11 @@ class DockerAPI(CoreSysAttributes):
"""Start Docker container."""
try:
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
except NotFound:
raise DockerNotFound(
f"{name} not found for starting up", _LOGGER.error
) from None
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get {name} for starting up", _LOGGER.error
) from err
@@ -660,36 +616,36 @@ class DockerAPI(CoreSysAttributes):
_LOGGER.info("Starting %s", name)
try:
docker_container.start()
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(f"Can't start {name}: {err}", _LOGGER.error) from err
def restart_container(self, name: str, timeout: int) -> None:
"""Restart docker container."""
try:
container: Container = self.containers.get(name)
except docker_errors.NotFound:
except NotFound:
raise DockerNotFound() from None
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
_LOGGER.info("Restarting %s", name)
try:
container.restart(timeout=timeout)
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(f"Can't restart {name}: {err}", _LOGGER.warning) from err
def container_logs(self, name: str, tail: int = 100) -> bytes:
"""Return Docker logs of container."""
try:
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
except NotFound:
raise DockerNotFound() from None
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
try:
return docker_container.logs(tail=tail, stdout=True, stderr=True)
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't grep logs from {name}: {err}", _LOGGER.warning
) from err
@@ -698,9 +654,9 @@ class DockerAPI(CoreSysAttributes):
"""Read and return stats from container."""
try:
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
except NotFound:
raise DockerNotFound() from None
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
# container is not running
@@ -709,7 +665,7 @@ class DockerAPI(CoreSysAttributes):
try:
return docker_container.stats(stream=False)
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't read stats from {name}: {err}", _LOGGER.error
) from err
@@ -718,84 +674,61 @@ class DockerAPI(CoreSysAttributes):
"""Execute a command inside Docker container."""
try:
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
except NotFound:
raise DockerNotFound() from None
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
# Execute
try:
code, output = docker_container.exec_run(command)
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
return CommandReturn(code, output)
async def remove_image(
def remove_image(
self, image: str, version: AwesomeVersion, latest: bool = True
) -> None:
"""Remove a Docker image by version and latest."""
try:
if latest:
_LOGGER.info("Removing image %s with latest", image)
try:
await self.images.delete(f"{image}:latest", force=True)
except aiodocker.DockerError as err:
if err.status != HTTPStatus.NOT_FOUND:
raise
with suppress(ImageNotFound):
self.images.remove(image=f"{image}:latest", force=True)
_LOGGER.info("Removing image %s with %s", image, version)
try:
await self.images.delete(f"{image}:{version!s}", force=True)
except aiodocker.DockerError as err:
if err.status != HTTPStatus.NOT_FOUND:
raise
with suppress(ImageNotFound):
self.images.remove(image=f"{image}:{version!s}", force=True)
except (aiodocker.DockerError, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't remove image {image}: {err}", _LOGGER.warning
) from err
async def import_image(self, tar_file: Path) -> dict[str, Any] | None:
def import_image(self, tar_file: Path) -> Image | None:
"""Import a tar file as image."""
try:
with tar_file.open("rb") as read_tar:
resp: list[dict[str, Any]] = self.images.import_image(read_tar)
except (aiodocker.DockerError, OSError) as err:
docker_image_list: list[Image] = self.images.load(read_tar) # type: ignore
if len(docker_image_list) != 1:
_LOGGER.warning(
"Unexpected image count %d while importing image from tar",
len(docker_image_list),
)
return None
return docker_image_list[0]
except (DockerException, OSError) as err:
raise DockerError(
f"Can't import image from tar: {err}", _LOGGER.error
) from err
docker_image_list: list[str] = []
for chunk in resp:
if "errorDetail" in chunk:
raise DockerError(
f"Can't import image from tar: {chunk['errorDetail']['message']}",
_LOGGER.error,
)
if "stream" in chunk:
if match := RE_IMPORT_IMAGE_STREAM.search(chunk["stream"]):
docker_image_list.append(match.group(2))
if len(docker_image_list) != 1:
_LOGGER.warning(
"Unexpected image count %d while importing image from tar",
len(docker_image_list),
)
return None
try:
return await self.images.inspect(docker_image_list[0])
except (aiodocker.DockerError, requests.RequestException) as err:
raise DockerError(
f"Could not inspect imported image due to: {err!s}", _LOGGER.error
) from err
def export_image(self, image: str, version: AwesomeVersion, tar_file: Path) -> None:
"""Export current images into a tar file."""
try:
docker_image = self.api.get_image(f"{image}:{version}")
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't fetch image {image}: {err}", _LOGGER.error
) from err
@@ -812,7 +745,7 @@ class DockerAPI(CoreSysAttributes):
_LOGGER.info("Export image %s done", image)
async def cleanup_old_images(
def cleanup_old_images(
self,
current_image: str,
current_version: AwesomeVersion,
@@ -823,57 +756,46 @@ class DockerAPI(CoreSysAttributes):
"""Clean up old versions of an image."""
image = f"{current_image}:{current_version!s}"
try:
try:
image_attr = await self.images.inspect(image)
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
raise DockerNotFound(
f"{current_image} not found for cleanup", _LOGGER.warning
) from None
raise
except (aiodocker.DockerError, requests.RequestException) as err:
keep = {cast(str, self.images.get(image).id)}
except ImageNotFound:
raise DockerNotFound(
f"{current_image} not found for cleanup", _LOGGER.warning
) from None
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't get {current_image} for cleanup", _LOGGER.warning
) from err
keep = {cast(str, image_attr["Id"])}
if keep_images:
keep_images -= {image}
results = await asyncio.gather(
*[self.images.inspect(image) for image in keep_images],
return_exceptions=True,
)
for result in results:
# If its not found, no need to preserve it from getting removed
if (
isinstance(result, aiodocker.DockerError)
and result.status == HTTPStatus.NOT_FOUND
):
continue
if isinstance(result, BaseException):
raise DockerError(
f"Failed to get one or more images from {keep} during cleanup",
_LOGGER.warning,
) from result
keep.add(cast(str, result["Id"]))
try:
for image in keep_images:
# If its not found, no need to preserve it from getting removed
with suppress(ImageNotFound):
keep.add(cast(str, self.images.get(image).id))
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Failed to get one or more images from {keep} during cleanup",
_LOGGER.warning,
) from err
# Cleanup old and current
image_names = list(
old_images | {current_image} if old_images else {current_image}
)
try:
images_list = await self.images.list(
filters=json.dumps({"reference": image_names})
)
except (aiodocker.DockerError, requests.RequestException) as err:
# This API accepts a list of image names. Tested and confirmed working on docker==7.1.0
# Its typing does say only `str` though. Bit concerning, could an update break this?
images_list = self.images.list(name=image_names) # type: ignore
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Corrupt docker overlayfs found: {err}", _LOGGER.warning
) from err
for docker_image in images_list:
if docker_image["Id"] in keep:
if docker_image.id in keep:
continue
with suppress(aiodocker.DockerError, requests.RequestException):
_LOGGER.info("Cleanup images: %s", docker_image["RepoTags"])
await self.images.delete(docker_image["Id"], force=True)
with suppress(DockerException, requests.RequestException):
_LOGGER.info("Cleanup images: %s", docker_image.tags)
self.images.remove(docker_image.id, force=True)

View File

@@ -89,7 +89,7 @@ class DockerMonitor(CoreSysAttributes, Thread):
DockerContainerStateEvent(
name=attributes["name"],
state=container_state,
id=event["Actor"]["ID"],
id=event["id"],
time=event["time"],
),
)

View File

@@ -1,12 +1,10 @@
"""Init file for Supervisor Docker object."""
import asyncio
from collections.abc import Awaitable
from ipaddress import IPv4Address
import logging
import os
import aiodocker
from awesomeversion.awesomeversion import AwesomeVersion
import docker
import requests
@@ -114,18 +112,19 @@ class DockerSupervisor(DockerInterface):
name="docker_supervisor_update_start_tag",
concurrency=JobConcurrency.GROUP_QUEUE,
)
async def update_start_tag(self, image: str, version: AwesomeVersion) -> None:
def update_start_tag(self, image: str, version: AwesomeVersion) -> Awaitable[None]:
"""Update start tag to new version."""
return self.sys_run_in_executor(self._update_start_tag, image, version)
def _update_start_tag(self, image: str, version: AwesomeVersion) -> None:
"""Update start tag to new version.
Need run inside executor.
"""
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers.get, self.name
)
docker_image = await self.sys_docker.images.inspect(f"{image}:{version!s}")
except (
aiodocker.DockerError,
docker.errors.DockerException,
requests.RequestException,
) as err:
docker_container = self.sys_docker.containers.get(self.name)
docker_image = self.sys_docker.images.get(f"{image}:{version!s}")
except (docker.errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't get image or container to fix start tag: {err}", _LOGGER.error
) from err
@@ -145,14 +144,8 @@ class DockerSupervisor(DockerInterface):
# If version tag
if start_tag != "latest":
continue
await asyncio.gather(
self.sys_docker.images.tag(
docker_image["Id"], start_image, tag=start_tag
),
self.sys_docker.images.tag(
docker_image["Id"], start_image, tag=version.string
),
)
docker_image.tag(start_image, start_tag)
docker_image.tag(start_image, version.string)
except (aiodocker.DockerError, requests.RequestException) as err:
except (docker.errors.DockerException, requests.RequestException) as err:
raise DockerError(f"Can't fix start tag: {err}", _LOGGER.error) from err

View File

@@ -639,32 +639,9 @@ class DockerLogOutOfOrder(DockerError):
class DockerNoSpaceOnDevice(DockerError):
"""Raise if a docker pull fails due to available space."""
error_key = "docker_no_space_on_device"
message_template = "No space left on disk"
def __init__(self, logger: Callable[..., None] | None = None) -> None:
"""Raise & log."""
super().__init__(None, logger=logger)
class DockerHubRateLimitExceeded(DockerError):
"""Raise for docker hub rate limit exceeded error."""
error_key = "dockerhub_rate_limit_exceeded"
message_template = (
"Your IP address has made too many requests to Docker Hub which activated a rate limit. "
"For more details see {dockerhub_rate_limit_url}"
)
def __init__(self, logger: Callable[..., None] | None = None) -> None:
"""Raise & log."""
super().__init__(
None,
logger=logger,
extra_fields={
"dockerhub_rate_limit_url": "https://www.home-assistant.io/more-info/dockerhub-rate-limit"
},
)
super().__init__("No space left on disk", logger=logger)
class DockerJobError(DockerError, JobException):

View File

@@ -9,7 +9,7 @@ from contextvars import Context, ContextVar, Token
from dataclasses import dataclass
from datetime import datetime
import logging
from typing import Any, Self, cast
from typing import Any, Self
from uuid import uuid4
from attr.validators import gt, lt
@@ -196,7 +196,7 @@ class SupervisorJob:
self,
progress: float | None = None,
stage: str | None = None,
extra: dict[str, Any] | None | type[DEFAULT] = DEFAULT,
extra: dict[str, Any] | None = DEFAULT, # type: ignore
done: bool | None = None,
) -> None:
"""Update multiple fields with one on change event."""
@@ -207,8 +207,8 @@ class SupervisorJob:
self.progress = progress
if stage is not None:
self.stage = stage
if extra is not DEFAULT:
self.extra = cast(dict[str, Any] | None, extra)
if extra != DEFAULT:
self.extra = extra
# Done has special event. use that to trigger on change if included
# If not then just use any other field to trigger
@@ -306,21 +306,19 @@ class JobManager(FileConfiguration, CoreSysAttributes):
reference: str | None = None,
initial_stage: str | None = None,
internal: bool = False,
parent_id: str | None | type[DEFAULT] = DEFAULT,
parent_id: str | None = DEFAULT, # type: ignore
child_job_syncs: list[ChildJobSyncFilter] | None = None,
) -> SupervisorJob:
"""Create a new job."""
kwargs: dict[str, Any] = {
"reference": reference,
"stage": initial_stage,
"on_change": self._on_job_change,
"internal": internal,
"child_job_syncs": child_job_syncs,
}
if parent_id is not DEFAULT:
kwargs["parent_id"] = parent_id
job = SupervisorJob(name, **kwargs)
job = SupervisorJob(
name,
reference=reference,
stage=initial_stage,
on_change=self._on_job_change,
internal=internal,
child_job_syncs=child_job_syncs,
**({} if parent_id == DEFAULT else {"parent_id": parent_id}), # type: ignore
)
# Shouldn't happen but inability to find a parent for progress reporting
# shouldn't raise and break the active job

View File

@@ -34,7 +34,6 @@ class JobCondition(StrEnum):
PLUGINS_UPDATED = "plugins_updated"
RUNNING = "running"
SUPERVISOR_UPDATED = "supervisor_updated"
ARCHITECTURE_SUPPORTED = "architecture_supported"
class JobConcurrency(StrEnum):

View File

@@ -441,14 +441,6 @@ class Job(CoreSysAttributes):
raise JobConditionException(
f"'{method_name}' blocked from execution, supervisor needs to be updated first"
)
if (
JobCondition.ARCHITECTURE_SUPPORTED in used_conditions
and UnsupportedReason.SYSTEM_ARCHITECTURE
in coresys.sys_resolution.unsupported
):
raise JobConditionException(
f"'{method_name}' blocked from execution, unsupported system architecture"
)
if JobCondition.PLUGINS_UPDATED in used_conditions and (
out_of_date := [

View File

@@ -64,19 +64,6 @@ def filter_data(coresys: CoreSys, event: Event, hint: Hint) -> Event | None:
# Not full startup - missing information
if coresys.core.state in (CoreState.INITIALIZE, CoreState.SETUP):
# During SETUP, we have basic system info available for better debugging
if coresys.core.state == CoreState.SETUP:
event.setdefault("contexts", {}).update(
{
"versions": {
"docker": coresys.docker.info.version,
"supervisor": coresys.supervisor.version,
},
"host": {
"machine": coresys.machine,
},
}
)
return event
# List installed addons

View File

@@ -1,6 +1,5 @@
"""A collection of tasks."""
from contextlib import suppress
from datetime import datetime, timedelta
import logging
from typing import cast
@@ -14,7 +13,6 @@ from ..exceptions import (
BackupFileNotFoundError,
HomeAssistantError,
ObserverError,
SupervisorUpdateError,
)
from ..homeassistant.const import LANDINGPAGE, WSType
from ..jobs.const import JobConcurrency
@@ -163,7 +161,6 @@ class Tasks(CoreSysAttributes):
JobCondition.INTERNET_HOST,
JobCondition.OS_SUPPORTED,
JobCondition.RUNNING,
JobCondition.ARCHITECTURE_SUPPORTED,
],
concurrency=JobConcurrency.REJECT,
)
@@ -176,11 +173,7 @@ class Tasks(CoreSysAttributes):
"Found new Supervisor version %s, updating",
self.sys_supervisor.latest_version,
)
# Errors are logged by the exceptions, we can't really do something
# if an update fails here.
with suppress(SupervisorUpdateError):
await self.sys_supervisor.update()
await self.sys_supervisor.update()
async def _watchdog_homeassistant_api(self):
"""Create scheduler task for monitoring running state of API.

View File

@@ -135,7 +135,7 @@ class Mount(CoreSysAttributes, ABC):
@property
def state(self) -> UnitActiveState | None:
"""Get state of mount."""
return UnitActiveState(self._state) if self._state is not None else None
return self._state
@cached_property
def local_where(self) -> Path:

View File

@@ -23,5 +23,4 @@ PLUGIN_UPDATE_CONDITIONS = [
JobCondition.HEALTHY,
JobCondition.INTERNET_HOST,
JobCondition.SUPERVISOR_UPDATED,
JobCondition.ARCHITECTURE_SUPPORTED,
]

View File

@@ -58,7 +58,6 @@ class UnsupportedReason(StrEnum):
SYSTEMD_JOURNAL = "systemd_journal"
SYSTEMD_RESOLVED = "systemd_resolved"
VIRTUALIZATION_IMAGE = "virtualization_image"
SYSTEM_ARCHITECTURE = "system_architecture"
class UnhealthyReason(StrEnum):

View File

@@ -13,6 +13,7 @@ from .validate import get_valid_modules
_LOGGER: logging.Logger = logging.getLogger(__name__)
UNHEALTHY = [
UnsupportedReason.DOCKER_VERSION,
UnsupportedReason.LXC,
UnsupportedReason.PRIVILEGED,
]

View File

@@ -5,6 +5,8 @@ from ...coresys import CoreSys
from ..const import UnsupportedReason
from .base import EvaluateBase
SUPPORTED_OS = ["Debian GNU/Linux 12 (bookworm)"]
def setup(coresys: CoreSys) -> EvaluateBase:
"""Initialize evaluation-setup function."""
@@ -31,4 +33,6 @@ class EvaluateOperatingSystem(EvaluateBase):
async def evaluate(self) -> bool:
"""Run evaluation."""
return not self.sys_os.available
if self.sys_os.available:
return False
return self.sys_host.info.operating_system not in SUPPORTED_OS

View File

@@ -1,38 +0,0 @@
"""Evaluation class for system architecture support."""
from ...const import CoreState
from ...coresys import CoreSys
from ..const import UnsupportedReason
from .base import EvaluateBase
def setup(coresys: CoreSys) -> EvaluateBase:
"""Initialize evaluation-setup function."""
return EvaluateSystemArchitecture(coresys)
class EvaluateSystemArchitecture(EvaluateBase):
"""Evaluate if the current Supervisor architecture is supported."""
@property
def reason(self) -> UnsupportedReason:
"""Return a UnsupportedReason enum."""
return UnsupportedReason.SYSTEM_ARCHITECTURE
@property
def on_failure(self) -> str:
"""Return a string that is printed when self.evaluate is True."""
return "System architecture is no longer supported. Move to a supported system architecture."
@property
def states(self) -> list[CoreState]:
"""Return a list of valid states when this evaluation can run."""
return [CoreState.INITIALIZE]
async def evaluate(self):
"""Run evaluation."""
return self.sys_host.info.sys_arch.supervisor in {
"i386",
"armhf",
"armv7",
}

View File

@@ -242,10 +242,9 @@ class Updater(FileConfiguration, CoreSysAttributes):
@Job(
name="updater_fetch_data",
conditions=[
JobCondition.ARCHITECTURE_SUPPORTED,
JobCondition.INTERNET_SYSTEM,
JobCondition.HOME_ASSISTANT_CORE_SUPPORTED,
JobCondition.OS_SUPPORTED,
JobCondition.HOME_ASSISTANT_CORE_SUPPORTED,
],
on_condition=UpdaterJobError,
throttle_period=timedelta(seconds=30),

View File

@@ -7,7 +7,13 @@ from collections.abc import Awaitable, Callable
import logging
from typing import Any, Protocol, cast
from dbus_fast import ErrorType, InvalidIntrospectionError, Message, MessageType
from dbus_fast import (
ErrorType,
InvalidIntrospectionError,
Message,
MessageType,
Variant,
)
from dbus_fast.aio.message_bus import MessageBus
from dbus_fast.aio.proxy_object import ProxyInterface, ProxyObject
from dbus_fast.errors import DBusError as DBusFastDBusError
@@ -259,7 +265,7 @@ class DBus:
"""
async def sync_property_change(
prop_interface: str, changed: dict[str, Any], invalidated: list[str]
prop_interface: str, changed: dict[str, Variant], invalidated: list[str]
) -> None:
"""Sync property changes to cache."""
if interface != prop_interface:

View File

@@ -5,20 +5,12 @@ from collections.abc import AsyncGenerator
from datetime import UTC, datetime
from functools import wraps
import json
import re
from aiohttp import ClientResponse
from supervisor.exceptions import MalformedBinaryEntryError
from supervisor.host.const import LogFormatter
_RE_ANSI_CSI_COLORS_PATTERN = re.compile(r"\x1B\[[0-9;]*m")
def _strip_ansi_colors(message: str) -> str:
"""Remove ANSI color codes from a message string."""
return _RE_ANSI_CSI_COLORS_PATTERN.sub("", message)
def formatter(required_fields: list[str]):
"""Decorate journal entry formatters with list of required fields.
@@ -39,9 +31,9 @@ def formatter(required_fields: list[str]):
@formatter(["MESSAGE"])
def journal_plain_formatter(entries: dict[str, str], no_colors: bool = False) -> str:
def journal_plain_formatter(entries: dict[str, str]) -> str:
"""Format parsed journal entries as a plain message."""
return _strip_ansi_colors(entries["MESSAGE"]) if no_colors else entries["MESSAGE"]
return entries["MESSAGE"]
@formatter(
@@ -53,7 +45,7 @@ def journal_plain_formatter(entries: dict[str, str], no_colors: bool = False) ->
"MESSAGE",
]
)
def journal_verbose_formatter(entries: dict[str, str], no_colors: bool = False) -> str:
def journal_verbose_formatter(entries: dict[str, str]) -> str:
"""Format parsed journal entries to a journalctl-like format."""
ts = datetime.fromtimestamp(
int(entries["__REALTIME_TIMESTAMP"]) / 1e6, UTC
@@ -66,24 +58,14 @@ def journal_verbose_formatter(entries: dict[str, str], no_colors: bool = False)
else entries.get("SYSLOG_IDENTIFIER", "_UNKNOWN_")
)
message = (
_strip_ansi_colors(entries.get("MESSAGE", ""))
if no_colors
else entries.get("MESSAGE", "")
)
return f"{ts} {entries.get('_HOSTNAME', '')} {identifier}: {message}"
return f"{ts} {entries.get('_HOSTNAME', '')} {identifier}: {entries.get('MESSAGE', '')}"
async def journal_logs_reader(
journal_logs: ClientResponse,
log_formatter: LogFormatter = LogFormatter.PLAIN,
no_colors: bool = False,
journal_logs: ClientResponse, log_formatter: LogFormatter = LogFormatter.PLAIN
) -> AsyncGenerator[tuple[str | None, str]]:
"""Read logs from systemd journal line by line, formatted using the given formatter.
Optionally strip ANSI color codes from the entries' messages.
Returns a generator of (cursor, formatted_entry) tuples.
"""
match log_formatter:
@@ -102,10 +84,7 @@ async def journal_logs_reader(
# at EOF (likely race between at_eof and EOF check in readuntil)
if line == b"\n" or not line:
if entries:
yield (
entries.get("__CURSOR"),
formatter_(entries, no_colors=no_colors),
)
yield entries.get("__CURSOR"), formatter_(entries)
entries = {}
continue

View File

@@ -3,25 +3,22 @@
import asyncio
from datetime import timedelta
import errno
from http import HTTPStatus
from pathlib import Path
from unittest.mock import MagicMock, PropertyMock, call, patch
from unittest.mock import MagicMock, PropertyMock, patch
import aiodocker
from awesomeversion import AwesomeVersion
from docker.errors import APIError, DockerException, NotFound
from docker.errors import DockerException, ImageNotFound, NotFound
import pytest
from securetar import SecureTarFile
from supervisor.addons.addon import Addon
from supervisor.addons.const import AddonBackupMode
from supervisor.addons.model import AddonModel
from supervisor.config import CoreConfig
from supervisor.const import AddonBoot, AddonState, BusEvent
from supervisor.coresys import CoreSys
from supervisor.docker.addon import DockerAddon
from supervisor.docker.const import ContainerState
from supervisor.docker.manager import CommandReturn, DockerAPI
from supervisor.docker.manager import CommandReturn
from supervisor.docker.monitor import DockerContainerStateEvent
from supervisor.exceptions import AddonsError, AddonsJobError, AudioUpdateError
from supervisor.hardware.helper import HwHelper
@@ -864,14 +861,16 @@ async def test_addon_loads_wrong_image(
container.remove.assert_called_with(force=True, v=True)
# one for removing the addon, one for removing the addon builder
assert coresys.docker.images.delete.call_count == 2
assert coresys.docker.images.remove.call_count == 2
assert coresys.docker.images.delete.call_args_list[0] == call(
"local/aarch64-addon-ssh:latest", force=True
)
assert coresys.docker.images.delete.call_args_list[1] == call(
"local/aarch64-addon-ssh:9.2.1", force=True
)
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
"image": "local/aarch64-addon-ssh:latest",
"force": True,
}
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
"image": "local/aarch64-addon-ssh:9.2.1",
"force": True,
}
mock_run_command.assert_called_once()
assert mock_run_command.call_args.args[0] == "docker.io/library/docker"
assert mock_run_command.call_args.kwargs["version"] == "1.0.0-cli"
@@ -895,9 +894,7 @@ async def test_addon_loads_missing_image(
mock_amd64_arch_supported,
):
"""Test addon corrects a missing image on load."""
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
HTTPStatus.NOT_FOUND, {"message": "missing"}
)
coresys.docker.images.get.side_effect = ImageNotFound("missing")
with (
patch("pathlib.Path.is_file", return_value=True),
@@ -929,51 +926,41 @@ async def test_addon_loads_missing_image(
assert install_addon_ssh.image == "local/amd64-addon-ssh"
@pytest.mark.parametrize(
"pull_image_exc",
[APIError("error"), aiodocker.DockerError(400, {"message": "error"})],
)
@pytest.mark.usefixtures("container", "mock_amd64_arch_supported")
async def test_addon_load_succeeds_with_docker_errors(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
caplog: pytest.LogCaptureFixture,
pull_image_exc: Exception,
mock_amd64_arch_supported,
):
"""Docker errors while building/pulling an image during load should not raise and fail setup."""
# Build env invalid failure
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
HTTPStatus.NOT_FOUND, {"message": "missing"}
)
coresys.docker.images.get.side_effect = ImageNotFound("missing")
caplog.clear()
await install_addon_ssh.load()
assert "Invalid build environment" in caplog.text
# Image build failure
coresys.docker.images.build.side_effect = DockerException()
caplog.clear()
with (
patch("pathlib.Path.is_file", return_value=True),
patch.object(
CoreConfig, "local_to_extern_path", return_value="/addon/path/on/host"
),
patch.object(
DockerAPI,
"run_command",
return_value=MagicMock(exit_code=1, output=b"error"),
type(coresys.config),
"local_to_extern_path",
return_value="/addon/path/on/host",
),
):
await install_addon_ssh.load()
assert (
"Can't build local/amd64-addon-ssh:9.2.1: Docker build failed for local/amd64-addon-ssh:9.2.1 (exit code 1). Build output:\nerror"
in caplog.text
)
assert "Can't build local/amd64-addon-ssh:9.2.1" in caplog.text
# Image pull failure
install_addon_ssh.data["image"] = "test/amd64-addon-ssh"
coresys.docker.images.build.reset_mock(side_effect=True)
coresys.docker.pull_image.side_effect = DockerException()
caplog.clear()
with patch.object(DockerAPI, "pull_image", side_effect=pull_image_exc):
await install_addon_ssh.load()
assert "Can't install test/amd64-addon-ssh:9.2.1:" in caplog.text
await install_addon_ssh.load()
assert "Unknown error with test/amd64-addon-ssh:9.2.1" in caplog.text
async def test_addon_manual_only_boot(coresys: CoreSys, install_addon_example: Addon):

View File

@@ -1,8 +1,5 @@
"""Test addon build."""
import base64
import json
from pathlib import Path
from unittest.mock import PropertyMock, patch
from awesomeversion import AwesomeVersion
@@ -10,7 +7,6 @@ from awesomeversion import AwesomeVersion
from supervisor.addons.addon import Addon
from supervisor.addons.build import AddonBuild
from supervisor.coresys import CoreSys
from supervisor.docker.const import DOCKER_HUB
from tests.common import is_in_list
@@ -33,7 +29,7 @@ async def test_platform_set(coresys: CoreSys, install_addon_ssh: Addon):
),
):
args = await coresys.run_in_executor(
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest"
)
assert is_in_list(["--platform", "linux/amd64"], args["command"])
@@ -57,7 +53,7 @@ async def test_dockerfile_evaluation(coresys: CoreSys, install_addon_ssh: Addon)
),
):
args = await coresys.run_in_executor(
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest"
)
assert is_in_list(["--file", "Dockerfile"], args["command"])
@@ -85,7 +81,7 @@ async def test_dockerfile_evaluation_arch(coresys: CoreSys, install_addon_ssh: A
),
):
args = await coresys.run_in_executor(
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest"
)
assert is_in_list(["--file", "Dockerfile.aarch64"], args["command"])
@@ -121,158 +117,3 @@ async def test_build_invalid(coresys: CoreSys, install_addon_ssh: Addon):
),
):
assert not await build.is_valid()
async def test_docker_config_no_registries(coresys: CoreSys, install_addon_ssh: Addon):
"""Test docker config generation when no registries configured."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
# No registries configured by default
assert build.get_docker_config_json() is None
async def test_docker_config_no_matching_registry(
coresys: CoreSys, install_addon_ssh: Addon
):
"""Test docker config generation when registry doesn't match base image."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
# Configure a registry that doesn't match the base image
# pylint: disable-next=protected-access
coresys.docker.config._data["registries"] = {
"some.other.registry": {"username": "user", "password": "pass"}
}
with (
patch.object(
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
),
patch.object(
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
),
):
# Base image is ghcr.io/home-assistant/... which doesn't match
assert build.get_docker_config_json() is None
async def test_docker_config_matching_registry(
coresys: CoreSys, install_addon_ssh: Addon
):
"""Test docker config generation when registry matches base image."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
# Configure ghcr.io registry which matches the default base image
# pylint: disable-next=protected-access
coresys.docker.config._data["registries"] = {
"ghcr.io": {"username": "testuser", "password": "testpass"}
}
with (
patch.object(
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
),
patch.object(
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
),
):
config_json = build.get_docker_config_json()
assert config_json is not None
config = json.loads(config_json)
assert "auths" in config
assert "ghcr.io" in config["auths"]
# Verify base64-encoded credentials
expected_auth = base64.b64encode(b"testuser:testpass").decode()
assert config["auths"]["ghcr.io"]["auth"] == expected_auth
async def test_docker_config_docker_hub(coresys: CoreSys, install_addon_ssh: Addon):
"""Test docker config generation for Docker Hub registry."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
# Configure Docker Hub registry
# pylint: disable-next=protected-access
coresys.docker.config._data["registries"] = {
DOCKER_HUB: {"username": "hubuser", "password": "hubpass"}
}
# Mock base_image to return a Docker Hub image (no registry prefix)
with patch.object(
type(build),
"base_image",
new=PropertyMock(return_value="library/alpine:latest"),
):
config_json = build.get_docker_config_json()
assert config_json is not None
config = json.loads(config_json)
# Docker Hub uses special URL as key
assert "https://index.docker.io/v1/" in config["auths"]
expected_auth = base64.b64encode(b"hubuser:hubpass").decode()
assert config["auths"]["https://index.docker.io/v1/"]["auth"] == expected_auth
async def test_docker_args_with_config_path(coresys: CoreSys, install_addon_ssh: Addon):
"""Test docker args include config volume when path provided."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
with (
patch.object(
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
),
patch.object(
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
),
patch.object(
type(coresys.config),
"local_to_extern_path",
side_effect=lambda p: f"/extern{p}",
),
):
config_path = Path("/data/supervisor/tmp/config.json")
args = await coresys.run_in_executor(
build.get_docker_args,
AwesomeVersion("latest"),
"test-image:latest",
config_path,
)
# Check that config is mounted
assert "/extern/data/supervisor/tmp/config.json" in args["volumes"]
assert (
args["volumes"]["/extern/data/supervisor/tmp/config.json"]["bind"]
== "/root/.docker/config.json"
)
assert args["volumes"]["/extern/data/supervisor/tmp/config.json"]["mode"] == "ro"
async def test_docker_args_without_config_path(
coresys: CoreSys, install_addon_ssh: Addon
):
"""Test docker args don't include config volume when no path provided."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
with (
patch.object(
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
),
patch.object(
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
),
patch.object(
type(coresys.config),
"local_to_extern_path",
return_value="/addon/path/on/host",
),
):
args = await coresys.run_in_executor(
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
)
# Only docker socket and addon path should be mounted
assert len(args["volumes"]) == 2
# Verify no docker config mount
for bind in args["volumes"].values():
assert bind["bind"] != "/root/.docker/config.json"

View File

@@ -4,7 +4,7 @@ import asyncio
from collections.abc import AsyncGenerator, Generator
from copy import deepcopy
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, call, patch
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
from awesomeversion import AwesomeVersion
import pytest
@@ -514,13 +514,19 @@ async def test_shared_image_kept_on_uninstall(
latest = f"{install_addon_example.image}:latest"
await coresys.addons.uninstall("local_example2")
coresys.docker.images.delete.assert_not_called()
coresys.docker.images.remove.assert_not_called()
assert not coresys.addons.get("local_example2", local_only=True)
await coresys.addons.uninstall("local_example")
assert coresys.docker.images.delete.call_count == 2
assert coresys.docker.images.delete.call_args_list[0] == call(latest, force=True)
assert coresys.docker.images.delete.call_args_list[1] == call(image, force=True)
assert coresys.docker.images.remove.call_count == 2
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
"image": latest,
"force": True,
}
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
"image": image,
"force": True,
}
assert not coresys.addons.get("local_example", local_only=True)
@@ -548,17 +554,19 @@ async def test_shared_image_kept_on_update(
assert example_2.version == "1.2.0"
assert install_addon_example_image.version == "1.2.0"
image_new = {"Id": "image_new", "RepoTags": ["image_new:latest"]}
image_old = {"Id": "image_old", "RepoTags": ["image_old:latest"]}
docker.images.inspect.side_effect = [image_new, image_old]
image_new = MagicMock()
image_new.id = "image_new"
image_old = MagicMock()
image_old.id = "image_old"
docker.images.get.side_effect = [image_new, image_old]
docker.images.list.return_value = [image_new, image_old]
with patch.object(DockerAPI, "pull_image", return_value=image_new):
await coresys.addons.update("local_example2")
docker.images.delete.assert_not_called()
docker.images.remove.assert_not_called()
assert example_2.version == "1.3.0"
docker.images.inspect.side_effect = [image_new]
docker.images.get.side_effect = [image_new]
await coresys.addons.update("local_example_image")
docker.images.delete.assert_called_once_with("image_old", force=True)
docker.images.remove.assert_called_once_with("image_old", force=True)
assert install_addon_example_image.version == "1.3.0"

View File

@@ -1 +1,95 @@
"""Test for API calls."""
from unittest.mock import AsyncMock, MagicMock
from aiohttp.test_utils import TestClient
from supervisor.coresys import CoreSys
from supervisor.host.const import LogFormat
DEFAULT_LOG_RANGE = "entries=:-99:100"
DEFAULT_LOG_RANGE_FOLLOW = "entries=:-99:18446744073709551615"
async def common_test_api_advanced_logs(
path_prefix: str,
syslog_identifier: str,
api_client: TestClient,
journald_logs: MagicMock,
coresys: CoreSys,
os_available: None,
):
"""Template for tests of endpoints using advanced logs."""
resp = await api_client.get(f"{path_prefix}/logs")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier},
range_header=DEFAULT_LOG_RANGE,
accept=LogFormat.JOURNAL,
)
journald_logs.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/follow")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier, "follow": ""},
range_header=DEFAULT_LOG_RANGE_FOLLOW,
accept=LogFormat.JOURNAL,
)
journald_logs.reset_mock()
mock_response = MagicMock()
mock_response.text = AsyncMock(
return_value='{"CONTAINER_LOG_EPOCH": "12345"}\n{"CONTAINER_LOG_EPOCH": "12345"}\n'
)
journald_logs.return_value.__aenter__.return_value = mock_response
resp = await api_client.get(f"{path_prefix}/logs/latest")
assert resp.status == 200
assert journald_logs.call_count == 2
# Check the first call for getting epoch
epoch_call = journald_logs.call_args_list[0]
assert epoch_call[1]["params"] == {"CONTAINER_NAME": syslog_identifier}
assert epoch_call[1]["range_header"] == "entries=:-1:2"
# Check the second call for getting logs with the epoch
logs_call = journald_logs.call_args_list[1]
assert logs_call[1]["params"]["SYSLOG_IDENTIFIER"] == syslog_identifier
assert logs_call[1]["params"]["CONTAINER_LOG_EPOCH"] == "12345"
assert logs_call[1]["range_header"] == "entries=:0:18446744073709551615"
journald_logs.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/boots/0")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier, "_BOOT_ID": "ccc"},
range_header=DEFAULT_LOG_RANGE,
accept=LogFormat.JOURNAL,
)
journald_logs.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/boots/0/follow")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={
"SYSLOG_IDENTIFIER": syslog_identifier,
"_BOOT_ID": "ccc",
"follow": "",
},
range_header=DEFAULT_LOG_RANGE_FOLLOW,
accept=LogFormat.JOURNAL,
)

View File

@@ -1,149 +0,0 @@
"""Fixtures for API tests."""
from collections.abc import Awaitable, Callable
from unittest.mock import ANY, AsyncMock, MagicMock
from aiohttp.test_utils import TestClient
import pytest
from supervisor.coresys import CoreSys
from supervisor.host.const import LogFormat, LogFormatter
DEFAULT_LOG_RANGE = "entries=:-99:100"
DEFAULT_LOG_RANGE_FOLLOW = "entries=:-99:18446744073709551615"
async def _common_test_api_advanced_logs(
path_prefix: str,
syslog_identifier: str,
api_client: TestClient,
journald_logs: MagicMock,
coresys: CoreSys,
os_available: None,
journal_logs_reader: MagicMock,
):
"""Template for tests of endpoints using advanced logs."""
resp = await api_client.get(f"{path_prefix}/logs")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier},
range_header=DEFAULT_LOG_RANGE,
accept=LogFormat.JOURNAL,
)
journal_logs_reader.assert_called_with(ANY, LogFormatter.PLAIN, False)
journald_logs.reset_mock()
journal_logs_reader.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs?no_colors")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier},
range_header=DEFAULT_LOG_RANGE,
accept=LogFormat.JOURNAL,
)
journal_logs_reader.assert_called_with(ANY, LogFormatter.PLAIN, True)
journald_logs.reset_mock()
journal_logs_reader.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/follow")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier, "follow": ""},
range_header=DEFAULT_LOG_RANGE_FOLLOW,
accept=LogFormat.JOURNAL,
)
journal_logs_reader.assert_called_with(ANY, LogFormatter.PLAIN, False)
journald_logs.reset_mock()
journal_logs_reader.reset_mock()
mock_response = MagicMock()
mock_response.text = AsyncMock(
return_value='{"CONTAINER_LOG_EPOCH": "12345"}\n{"CONTAINER_LOG_EPOCH": "12345"}\n'
)
journald_logs.return_value.__aenter__.return_value = mock_response
resp = await api_client.get(f"{path_prefix}/logs/latest")
assert resp.status == 200
assert journald_logs.call_count == 2
# Check the first call for getting epoch
epoch_call = journald_logs.call_args_list[0]
assert epoch_call[1]["params"] == {"CONTAINER_NAME": syslog_identifier}
assert epoch_call[1]["range_header"] == "entries=:-1:2"
# Check the second call for getting logs with the epoch
logs_call = journald_logs.call_args_list[1]
assert logs_call[1]["params"]["SYSLOG_IDENTIFIER"] == syslog_identifier
assert logs_call[1]["params"]["CONTAINER_LOG_EPOCH"] == "12345"
assert logs_call[1]["range_header"] == "entries=:0:18446744073709551615"
journal_logs_reader.assert_called_with(ANY, LogFormatter.PLAIN, True)
journald_logs.reset_mock()
journal_logs_reader.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/boots/0")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier, "_BOOT_ID": "ccc"},
range_header=DEFAULT_LOG_RANGE,
accept=LogFormat.JOURNAL,
)
journald_logs.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/boots/0/follow")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={
"SYSLOG_IDENTIFIER": syslog_identifier,
"_BOOT_ID": "ccc",
"follow": "",
},
range_header=DEFAULT_LOG_RANGE_FOLLOW,
accept=LogFormat.JOURNAL,
)
@pytest.fixture
async def advanced_logs_tester(
api_client: TestClient,
journald_logs: MagicMock,
coresys: CoreSys,
os_available,
journal_logs_reader: MagicMock,
) -> Callable[[str, str], Awaitable[None]]:
"""Fixture that returns a function to test advanced logs endpoints.
This allows tests to avoid explicitly passing all the required fixtures.
Usage:
async def test_my_logs(advanced_logs_tester):
await advanced_logs_tester("/path/prefix", "syslog_identifier")
"""
async def test_logs(path_prefix: str, syslog_identifier: str):
await _common_test_api_advanced_logs(
path_prefix,
syslog_identifier,
api_client,
journald_logs,
coresys,
os_available,
journal_logs_reader,
)
return test_logs

View File

@@ -20,6 +20,7 @@ from supervisor.exceptions import HassioError
from supervisor.store.repository import Repository
from ..const import TEST_ADDON_SLUG
from . import common_test_api_advanced_logs
def _create_test_event(name: str, state: ContainerState) -> DockerContainerStateEvent:
@@ -71,11 +72,21 @@ async def test_addons_info_not_installed(
async def test_api_addon_logs(
advanced_logs_tester,
api_client: TestClient,
journald_logs: MagicMock,
coresys: CoreSys,
os_available,
install_addon_ssh: Addon,
):
"""Test addon logs."""
await advanced_logs_tester("/addons/local_ssh", "addon_local_ssh")
await common_test_api_advanced_logs(
"/addons/local_ssh",
"addon_local_ssh",
api_client,
journald_logs,
coresys,
os_available,
)
async def test_api_addon_logs_not_installed(api_client: TestClient):

View File

@@ -1,6 +1,18 @@
"""Test audio api."""
from unittest.mock import MagicMock
async def test_api_audio_logs(advanced_logs_tester) -> None:
from aiohttp.test_utils import TestClient
from supervisor.coresys import CoreSys
from tests.api import common_test_api_advanced_logs
async def test_api_audio_logs(
api_client: TestClient, journald_logs: MagicMock, coresys: CoreSys, os_available
):
"""Test audio logs."""
await advanced_logs_tester("/audio", "hassio_audio")
await common_test_api_advanced_logs(
"/audio", "hassio_audio", api_client, journald_logs, coresys, os_available
)

View File

@@ -1,12 +1,13 @@
"""Test DNS API."""
from unittest.mock import patch
from unittest.mock import MagicMock, patch
from aiohttp.test_utils import TestClient
from supervisor.coresys import CoreSys
from supervisor.dbus.resolved import Resolved
from tests.api import common_test_api_advanced_logs
from tests.dbus_service_mocks.base import DBusServiceMock
from tests.dbus_service_mocks.resolved import Resolved as ResolvedService
@@ -65,6 +66,15 @@ async def test_options(api_client: TestClient, coresys: CoreSys):
restart.assert_called_once()
async def test_api_dns_logs(advanced_logs_tester):
async def test_api_dns_logs(
api_client: TestClient, journald_logs: MagicMock, coresys: CoreSys, os_available
):
"""Test dns logs."""
await advanced_logs_tester("/dns", "hassio_dns")
await common_test_api_advanced_logs(
"/dns",
"hassio_dns",
api_client,
journald_logs,
coresys,
os_available,
)

View File

@@ -4,11 +4,6 @@ from aiohttp.test_utils import TestClient
import pytest
from supervisor.coresys import CoreSys
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
from supervisor.resolution.data import Issue, Suggestion
from tests.dbus_service_mocks.agent_system import System as SystemService
from tests.dbus_service_mocks.base import DBusServiceMock
@pytest.mark.asyncio
@@ -89,79 +84,3 @@ async def test_registry_not_found(api_client: TestClient):
assert resp.status == 404
body = await resp.json()
assert body["message"] == "Hostname bad does not exist in registries"
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
async def test_api_migrate_docker_storage_driver(
api_client: TestClient,
coresys: CoreSys,
os_agent_services: dict[str, DBusServiceMock],
os_available,
):
"""Test Docker storage driver migration."""
system_service: SystemService = os_agent_services["agent_system"]
system_service.MigrateDockerStorageDriver.calls.clear()
resp = await api_client.post(
"/docker/migrate-storage-driver",
json={"storage_driver": "overlayfs"},
)
assert resp.status == 200
assert system_service.MigrateDockerStorageDriver.calls == [("overlayfs",)]
assert (
Issue(IssueType.REBOOT_REQUIRED, ContextType.SYSTEM)
in coresys.resolution.issues
)
assert (
Suggestion(SuggestionType.EXECUTE_REBOOT, ContextType.SYSTEM)
in coresys.resolution.suggestions
)
# Test migration back to overlay2 (graph driver)
system_service.MigrateDockerStorageDriver.calls.clear()
resp = await api_client.post(
"/docker/migrate-storage-driver",
json={"storage_driver": "overlay2"},
)
assert resp.status == 200
assert system_service.MigrateDockerStorageDriver.calls == [("overlay2",)]
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
async def test_api_migrate_docker_storage_driver_invalid_backend(
api_client: TestClient,
os_available,
):
"""Test 400 is returned for invalid storage driver."""
resp = await api_client.post(
"/docker/migrate-storage-driver",
json={"storage_driver": "invalid"},
)
assert resp.status == 400
async def test_api_migrate_docker_storage_driver_not_os(
api_client: TestClient,
coresys: CoreSys,
):
"""Test 404 is returned if not running on HAOS."""
resp = await api_client.post(
"/docker/migrate-storage-driver",
json={"storage_driver": "overlayfs"},
)
assert resp.status == 404
@pytest.mark.parametrize("os_available", ["16.2"], indirect=True)
async def test_api_migrate_docker_storage_driver_old_os(
api_client: TestClient,
coresys: CoreSys,
os_available,
):
"""Test 404 is returned if OS is older than 17.0."""
resp = await api_client.post(
"/docker/migrate-storage-driver",
json={"storage_driver": "overlayfs"},
)
assert resp.status == 404

View File

@@ -18,18 +18,26 @@ from supervisor.homeassistant.const import WSEvent
from supervisor.homeassistant.core import HomeAssistantCore
from supervisor.homeassistant.module import HomeAssistant
from tests.common import AsyncIterator, load_json_fixture
from tests.api import common_test_api_advanced_logs
from tests.common import load_json_fixture
@pytest.mark.parametrize("legacy_route", [True, False])
async def test_api_core_logs(
advanced_logs_tester: AsyncMock,
api_client: TestClient,
journald_logs: MagicMock,
coresys: CoreSys,
os_available,
legacy_route: bool,
):
"""Test core logs."""
await advanced_logs_tester(
await common_test_api_advanced_logs(
f"/{'homeassistant' if legacy_route else 'core'}",
"homeassistant",
api_client,
journald_logs,
coresys,
os_available,
)
@@ -275,9 +283,9 @@ async def test_api_progress_updates_home_assistant_update(
"""Test progress updates sent to Home Assistant for updates."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
coresys.core.set_state(CoreState.RUNNING)
logs = load_json_fixture("docker_pull_image_log.json")
coresys.docker.images.pull.return_value = AsyncIterator(logs)
coresys.docker.docker.api.pull.return_value = load_json_fixture(
"docker_pull_image_log.json"
)
coresys.homeassistant.version = AwesomeVersion("2025.8.0")
with (
@@ -323,29 +331,29 @@ async def test_api_progress_updates_home_assistant_update(
},
{
"stage": None,
"progress": 1.7,
"progress": 1.2,
"done": False,
},
{
"stage": None,
"progress": 4.0,
"progress": 2.8,
"done": False,
},
]
assert events[-5:] == [
{
"stage": None,
"progress": 98.2,
"progress": 97.2,
"done": False,
},
{
"stage": None,
"progress": 98.3,
"progress": 98.4,
"done": False,
},
{
"stage": None,
"progress": 99.3,
"progress": 99.4,
"done": False,
},
{

View File

@@ -272,7 +272,7 @@ async def test_advaced_logs_query_parameters(
range_header=DEFAULT_RANGE,
accept=LogFormat.JOURNAL,
)
journal_logs_reader.assert_called_with(ANY, LogFormatter.VERBOSE, False)
journal_logs_reader.assert_called_with(ANY, LogFormatter.VERBOSE)
journal_logs_reader.reset_mock()
journald_logs.reset_mock()
@@ -290,19 +290,7 @@ async def test_advaced_logs_query_parameters(
range_header="entries=:-52:53",
accept=LogFormat.JOURNAL,
)
journal_logs_reader.assert_called_with(ANY, LogFormatter.VERBOSE, False)
journal_logs_reader.reset_mock()
journald_logs.reset_mock()
# Check no_colors query parameter
await api_client.get("/host/logs?no_colors")
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": coresys.host.logs.default_identifiers},
range_header=DEFAULT_RANGE,
accept=LogFormat.JOURNAL,
)
journal_logs_reader.assert_called_with(ANY, LogFormatter.VERBOSE, True)
journal_logs_reader.assert_called_with(ANY, LogFormatter.VERBOSE)
async def test_advanced_logs_boot_id_offset(
@@ -355,24 +343,24 @@ async def test_advanced_logs_formatters(
"""Test advanced logs formatters varying on Accept header."""
await api_client.get("/host/logs")
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.VERBOSE, False)
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.VERBOSE)
journal_logs_reader.reset_mock()
headers = {"Accept": "text/x-log"}
await api_client.get("/host/logs", headers=headers)
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.VERBOSE, False)
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.VERBOSE)
journal_logs_reader.reset_mock()
await api_client.get("/host/logs/identifiers/test")
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.PLAIN, False)
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.PLAIN)
journal_logs_reader.reset_mock()
headers = {"Accept": "text/x-log"}
await api_client.get("/host/logs/identifiers/test", headers=headers)
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.VERBOSE, False)
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.VERBOSE)
async def test_advanced_logs_errors(coresys: CoreSys, api_client: TestClient):

View File

@@ -1,28 +1,12 @@
"""Test ingress API."""
from collections.abc import AsyncGenerator
from unittest.mock import AsyncMock, MagicMock, patch
from unittest.mock import AsyncMock, patch
import aiohttp
from aiohttp import hdrs, web
from aiohttp.test_utils import TestClient, TestServer
import pytest
from aiohttp.test_utils import TestClient
from supervisor.addons.addon import Addon
from supervisor.coresys import CoreSys
@pytest.fixture(name="real_websession")
async def fixture_real_websession(
coresys: CoreSys,
) -> AsyncGenerator[aiohttp.ClientSession]:
"""Fixture for real aiohttp ClientSession for ingress proxy tests."""
session = aiohttp.ClientSession()
coresys._websession = session # pylint: disable=W0212
yield session
await session.close()
async def test_validate_session(api_client: TestClient, coresys: CoreSys):
"""Test validating ingress session."""
with patch("aiohttp.web_request.BaseRequest.__getitem__", return_value=None):
@@ -102,126 +86,3 @@ async def test_validate_session_with_user_id(
assert (
coresys.ingress.get_session_data(session).user.display_name == "Some Name"
)
async def test_ingress_proxy_no_content_type_for_empty_body_responses(
api_client: TestClient, coresys: CoreSys, real_websession: aiohttp.ClientSession
):
"""Test that empty body responses don't get Content-Type header."""
# Create a mock add-on backend server that returns various status codes
async def mock_addon_handler(request: web.Request) -> web.Response:
"""Mock add-on handler that returns different status codes based on path."""
path = request.path
if path == "/204":
# 204 No Content - should not have Content-Type
return web.Response(status=204)
elif path == "/304":
# 304 Not Modified - should not have Content-Type
return web.Response(status=304)
elif path == "/100":
# 100 Continue - should not have Content-Type
return web.Response(status=100)
elif path == "/head":
# HEAD request - should have Content-Type (same as GET would)
return web.Response(body=b"test", content_type="text/html")
elif path == "/200":
# 200 OK with body - should have Content-Type
return web.Response(body=b"test content", content_type="text/plain")
elif path == "/200-no-content-type":
# 200 OK without explicit Content-Type - should get default
return web.Response(body=b"test content")
elif path == "/200-json":
# 200 OK with JSON - should preserve Content-Type
return web.Response(
body=b'{"key": "value"}', content_type="application/json"
)
else:
return web.Response(body=b"default", content_type="text/html")
# Create test server for mock add-on
app = web.Application()
app.router.add_route("*", "/{tail:.*}", mock_addon_handler)
addon_server = TestServer(app)
await addon_server.start_server()
try:
# Create ingress session
resp = await api_client.post("/ingress/session")
result = await resp.json()
session = result["data"]["session"]
# Create a mock add-on
mock_addon = MagicMock(spec=Addon)
mock_addon.slug = "test_addon"
mock_addon.ip_address = addon_server.host
mock_addon.ingress_port = addon_server.port
mock_addon.ingress_stream = False
# Generate an ingress token and register the add-on
ingress_token = coresys.ingress.create_session()
with patch.object(coresys.ingress, "get", return_value=mock_addon):
# Test 204 No Content - should NOT have Content-Type
resp = await api_client.get(
f"/ingress/{ingress_token}/204",
cookies={"ingress_session": session},
)
assert resp.status == 204
assert hdrs.CONTENT_TYPE not in resp.headers
# Test 304 Not Modified - should NOT have Content-Type
resp = await api_client.get(
f"/ingress/{ingress_token}/304",
cookies={"ingress_session": session},
)
assert resp.status == 304
assert hdrs.CONTENT_TYPE not in resp.headers
# Test HEAD request - SHOULD have Content-Type (same as GET)
# per RFC 9110: HEAD should return same headers as GET
resp = await api_client.head(
f"/ingress/{ingress_token}/head",
cookies={"ingress_session": session},
)
assert resp.status == 200
assert hdrs.CONTENT_TYPE in resp.headers
assert "text/html" in resp.headers[hdrs.CONTENT_TYPE]
# Body should be empty for HEAD
body = await resp.read()
assert body == b""
# Test 200 OK with body - SHOULD have Content-Type
resp = await api_client.get(
f"/ingress/{ingress_token}/200",
cookies={"ingress_session": session},
)
assert resp.status == 200
assert hdrs.CONTENT_TYPE in resp.headers
assert resp.headers[hdrs.CONTENT_TYPE] == "text/plain"
body = await resp.read()
assert body == b"test content"
# Test 200 OK without explicit Content-Type - SHOULD get default
resp = await api_client.get(
f"/ingress/{ingress_token}/200-no-content-type",
cookies={"ingress_session": session},
)
assert resp.status == 200
assert hdrs.CONTENT_TYPE in resp.headers
# Should get application/octet-stream as default from aiohttp ClientResponse
assert "application/octet-stream" in resp.headers[hdrs.CONTENT_TYPE]
# Test 200 OK with JSON - SHOULD preserve Content-Type
resp = await api_client.get(
f"/ingress/{ingress_token}/200-json",
cookies={"ingress_session": session},
)
assert resp.status == 200
assert hdrs.CONTENT_TYPE in resp.headers
assert "application/json" in resp.headers[hdrs.CONTENT_TYPE]
body = await resp.read()
assert body == b'{"key": "value"}'
finally:
await addon_server.close()

View File

@@ -1,6 +1,23 @@
"""Test multicast api."""
from unittest.mock import MagicMock
async def test_api_multicast_logs(advanced_logs_tester):
from aiohttp.test_utils import TestClient
from supervisor.coresys import CoreSys
from tests.api import common_test_api_advanced_logs
async def test_api_multicast_logs(
api_client: TestClient, journald_logs: MagicMock, coresys: CoreSys, os_available
):
"""Test multicast logs."""
await advanced_logs_tester("/multicast", "hassio_multicast")
await common_test_api_advanced_logs(
"/multicast",
"hassio_multicast",
api_client,
journald_logs,
coresys,
os_available,
)

View File

@@ -24,7 +24,7 @@ from supervisor.homeassistant.module import HomeAssistant
from supervisor.store.addon import AddonStore
from supervisor.store.repository import Repository
from tests.common import AsyncIterator, load_json_fixture
from tests.common import load_json_fixture
from tests.const import TEST_ADDON_SLUG
REPO_URL = "https://github.com/awesome-developer/awesome-repo"
@@ -732,10 +732,9 @@ async def test_api_progress_updates_addon_install_update(
"""Test progress updates sent to Home Assistant for installs/updates."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
coresys.core.set_state(CoreState.RUNNING)
logs = load_json_fixture("docker_pull_image_log.json")
coresys.docker.images.pull.return_value = AsyncIterator(logs)
coresys.docker.docker.api.pull.return_value = load_json_fixture(
"docker_pull_image_log.json"
)
coresys.arch._supported_arch = ["amd64"] # pylint: disable=protected-access
install_addon_example.data_store["version"] = AwesomeVersion("2.0.0")
@@ -773,29 +772,29 @@ async def test_api_progress_updates_addon_install_update(
},
{
"stage": None,
"progress": 1.7,
"progress": 1.2,
"done": False,
},
{
"stage": None,
"progress": 4.0,
"progress": 2.8,
"done": False,
},
]
assert events[-5:] == [
{
"stage": None,
"progress": 98.2,
"progress": 97.2,
"done": False,
},
{
"stage": None,
"progress": 98.3,
"progress": 98.4,
"done": False,
},
{
"stage": None,
"progress": 99.3,
"progress": 99.4,
"done": False,
},
{

View File

@@ -18,7 +18,8 @@ from supervisor.store.repository import Repository
from supervisor.supervisor import Supervisor
from supervisor.updater import Updater
from tests.common import AsyncIterator, load_json_fixture
from tests.api import common_test_api_advanced_logs
from tests.common import load_json_fixture
from tests.dbus_service_mocks.base import DBusServiceMock
from tests.dbus_service_mocks.os_agent import OSAgent as OSAgentService
@@ -154,9 +155,18 @@ async def test_api_supervisor_options_diagnostics(
assert coresys.dbus.agent.diagnostics is False
async def test_api_supervisor_logs(advanced_logs_tester):
async def test_api_supervisor_logs(
api_client: TestClient, journald_logs: MagicMock, coresys: CoreSys, os_available
):
"""Test supervisor logs."""
await advanced_logs_tester("/supervisor", "hassio_supervisor")
await common_test_api_advanced_logs(
"/supervisor",
"hassio_supervisor",
api_client,
journald_logs,
coresys,
os_available,
)
async def test_api_supervisor_fallback(
@@ -322,9 +332,9 @@ async def test_api_progress_updates_supervisor_update(
"""Test progress updates sent to Home Assistant for updates."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
coresys.core.set_state(CoreState.RUNNING)
logs = load_json_fixture("docker_pull_image_log.json")
coresys.docker.images.pull.return_value = AsyncIterator(logs)
coresys.docker.docker.api.pull.return_value = load_json_fixture(
"docker_pull_image_log.json"
)
with (
patch.object(
@@ -371,29 +381,29 @@ async def test_api_progress_updates_supervisor_update(
},
{
"stage": None,
"progress": 1.7,
"progress": 1.2,
"done": False,
},
{
"stage": None,
"progress": 4.0,
"progress": 2.8,
"done": False,
},
]
assert events[-5:] == [
{
"stage": None,
"progress": 98.2,
"progress": 97.2,
"done": False,
},
{
"stage": None,
"progress": 98.3,
"progress": 98.4,
"done": False,
},
{
"stage": None,
"progress": 99.3,
"progress": 99.4,
"done": False,
},
{

View File

@@ -1,14 +1,13 @@
"""Common test functions."""
import asyncio
from collections.abc import Sequence
from datetime import datetime
from functools import partial
from importlib import import_module
from inspect import getclosurevars
import json
from pathlib import Path
from typing import Any, Self
from typing import Any
from dbus_fast.aio.message_bus import MessageBus
@@ -146,22 +145,3 @@ class MockResponse:
async def __aexit__(self, exc_type, exc, tb):
"""Exit the context manager."""
class AsyncIterator:
"""Make list/fixture into async iterator for test mocks."""
def __init__(self, seq: Sequence[Any]) -> None:
"""Initialize with sequence."""
self.iter = iter(seq)
def __aiter__(self) -> Self:
"""Implement aiter."""
return self
async def __anext__(self) -> Any:
"""Return next in sequence."""
try:
return next(self.iter)
except StopIteration:
raise StopAsyncIteration() from None

View File

@@ -9,7 +9,6 @@ import subprocess
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
from uuid import uuid4
from aiodocker.docker import DockerImages
from aiohttp import ClientSession, web
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
@@ -56,7 +55,6 @@ from supervisor.store.repository import Repository
from supervisor.utils.dt import utcnow
from .common import (
AsyncIterator,
MockResponse,
load_binary_fixture,
load_fixture,
@@ -114,46 +112,40 @@ async def supervisor_name() -> None:
@pytest.fixture
async def docker() -> DockerAPI:
"""Mock DockerAPI."""
image_inspect = {
"Os": "linux",
"Architecture": "amd64",
"Id": "test123",
"RepoTags": ["ghcr.io/home-assistant/amd64-hassio-supervisor:latest"],
}
images = [MagicMock(tags=["ghcr.io/home-assistant/amd64-hassio-supervisor:latest"])]
image = MagicMock()
image.attrs = {"Os": "linux", "Architecture": "amd64"}
with (
patch("supervisor.docker.manager.DockerClient", return_value=MagicMock()),
patch("supervisor.docker.manager.DockerAPI.images", return_value=MagicMock()),
patch(
"supervisor.docker.manager.DockerAPI.containers", return_value=MagicMock()
),
patch("supervisor.docker.manager.DockerAPI.api", return_value=MagicMock()),
patch("supervisor.docker.manager.DockerAPI.info", return_value=MagicMock()),
patch("supervisor.docker.manager.DockerAPI.unload"),
patch("supervisor.docker.manager.aiodocker.Docker", return_value=MagicMock()),
patch(
"supervisor.docker.manager.DockerAPI.images",
new=PropertyMock(
return_value=(docker_images := MagicMock(spec=DockerImages))
),
"supervisor.docker.manager.DockerAPI.api",
return_value=(api_mock := MagicMock()),
),
patch("supervisor.docker.manager.DockerAPI.images.get", return_value=image),
patch("supervisor.docker.manager.DockerAPI.images.list", return_value=images),
patch(
"supervisor.docker.manager.DockerAPI.info",
return_value=MagicMock(),
),
patch("supervisor.docker.manager.DockerAPI.unload"),
):
docker_obj = await DockerAPI(MagicMock()).post_init()
docker_obj.config._data = {"registries": {}}
with patch("supervisor.docker.monitor.DockerMonitor.load"):
await docker_obj.load()
docker_images.inspect.return_value = image_inspect
docker_images.list.return_value = [image_inspect]
docker_images.import_image.return_value = [
{"stream": "Loaded image: test:latest\n"}
]
docker_images.pull.return_value = AsyncIterator([{}])
docker_obj.info.logging = "journald"
docker_obj.info.storage = "overlay2"
docker_obj.info.version = AwesomeVersion("1.0.0")
# Need an iterable for logs
api_mock.pull.return_value = []
yield docker_obj
@@ -846,9 +838,11 @@ async def container(docker: DockerAPI) -> MagicMock:
"""Mock attrs and status for container on attach."""
docker.containers.get.return_value = addon = MagicMock()
docker.containers.create.return_value = addon
docker.images.build.return_value = (addon, "")
addon.status = "stopped"
addon.attrs = {"State": {"ExitCode": 0}}
yield addon
with patch.object(DockerAPI, "pull_image", return_value=addon):
yield addon
@pytest.fixture

View File

@@ -184,20 +184,3 @@ async def test_interface_becomes_unmanaged(
assert wireless.is_connected is False
assert eth0.connection is None
assert connection.is_connected is False
async def test_unknown_device_type(
device_eth0_service: DeviceService, dbus_session_bus: MessageBus
):
"""Test unknown device types are handled gracefully."""
interface = NetworkInterface("/org/freedesktop/NetworkManager/Devices/1")
await interface.connect(dbus_session_bus)
# Emit an unknown device type (e.g., 1000 which doesn't exist in the enum)
device_eth0_service.emit_properties_changed({"DeviceType": 1000})
await device_eth0_service.ping()
# Should return UNKNOWN instead of crashing
assert interface.type == DeviceType.UNKNOWN
# Wireless should be None since it's not a wireless device
assert interface.wireless is None

View File

@@ -41,51 +41,51 @@ async def test_dbus_resolved_info(
assert resolved.dns_over_tls == DNSOverTLSEnabled.NO
assert len(resolved.dns) == 2
assert resolved.dns[0] == (0, 2, inet_aton("127.0.0.1"))
assert resolved.dns[1] == (0, 10, inet_pton(AF_INET6, "::1"))
assert resolved.dns[0] == [0, 2, inet_aton("127.0.0.1")]
assert resolved.dns[1] == [0, 10, inet_pton(AF_INET6, "::1")]
assert len(resolved.dns_ex) == 2
assert resolved.dns_ex[0] == (0, 2, inet_aton("127.0.0.1"), 0, "")
assert resolved.dns_ex[1] == (0, 10, inet_pton(AF_INET6, "::1"), 0, "")
assert resolved.dns_ex[0] == [0, 2, inet_aton("127.0.0.1"), 0, ""]
assert resolved.dns_ex[1] == [0, 10, inet_pton(AF_INET6, "::1"), 0, ""]
assert len(resolved.fallback_dns) == 2
assert resolved.fallback_dns[0] == (0, 2, inet_aton("1.1.1.1"))
assert resolved.fallback_dns[1] == (
assert resolved.fallback_dns[0] == [0, 2, inet_aton("1.1.1.1")]
assert resolved.fallback_dns[1] == [
0,
10,
inet_pton(AF_INET6, "2606:4700:4700::1111"),
)
]
assert len(resolved.fallback_dns_ex) == 2
assert resolved.fallback_dns_ex[0] == (
assert resolved.fallback_dns_ex[0] == [
0,
2,
inet_aton("1.1.1.1"),
0,
"cloudflare-dns.com",
)
assert resolved.fallback_dns_ex[1] == (
]
assert resolved.fallback_dns_ex[1] == [
0,
10,
inet_pton(AF_INET6, "2606:4700:4700::1111"),
0,
"cloudflare-dns.com",
)
]
assert resolved.current_dns_server == (0, 2, inet_aton("127.0.0.1"))
assert resolved.current_dns_server_ex == (
assert resolved.current_dns_server == [0, 2, inet_aton("127.0.0.1")]
assert resolved.current_dns_server_ex == [
0,
2,
inet_aton("127.0.0.1"),
0,
"",
)
]
assert len(resolved.domains) == 1
assert resolved.domains[0] == (0, "local.hass.io", False)
assert resolved.domains[0] == [0, "local.hass.io", False]
assert resolved.transaction_statistics == (0, 100000)
assert resolved.cache_statistics == (10, 50000, 10000)
assert resolved.transaction_statistics == [0, 100000]
assert resolved.cache_statistics == [10, 50000, 10000]
assert resolved.dnssec == DNSSECValidation.NO
assert resolved.dnssec_statistics == (0, 0, 0, 0)
assert resolved.dnssec_statistics == [0, 0, 0, 0]
assert resolved.dnssec_supported is False
assert resolved.dnssec_negative_trust_anchors == [
"168.192.in-addr.arpa",

View File

@@ -185,10 +185,10 @@ async def test_start_transient_unit(
"tmp-test.mount",
"fail",
[
("Description", Variant("s", "Test")),
("What", Variant("s", "//homeassistant/config")),
("Type", Variant("s", "cifs")),
("Options", Variant("s", "username=homeassistant,password=password")),
["Description", Variant("s", "Test")],
["What", Variant("s", "//homeassistant/config")],
["Type", Variant("s", "cifs")],
["Options", Variant("s", "username=homeassistant,password=password")],
],
[],
)

View File

@@ -1,6 +1,6 @@
"""Mock of OS Agent System dbus service."""
from dbus_fast import DBusError, ErrorType
from dbus_fast import DBusError
from .base import DBusServiceMock, dbus_method
@@ -21,7 +21,6 @@ class System(DBusServiceMock):
object_path = "/io/hass/os/System"
interface = "io.hass.os.System"
response_schedule_wipe_device: bool | DBusError = True
response_migrate_docker_storage_driver: None | DBusError = None
@dbus_method()
def ScheduleWipeDevice(self) -> "b":
@@ -29,14 +28,3 @@ class System(DBusServiceMock):
if isinstance(self.response_schedule_wipe_device, DBusError):
raise self.response_schedule_wipe_device # pylint: disable=raising-bad-type
return self.response_schedule_wipe_device
@dbus_method()
def MigrateDockerStorageDriver(self, backend: "s") -> None:
"""Migrate Docker storage driver."""
if isinstance(self.response_migrate_docker_storage_driver, DBusError):
raise self.response_migrate_docker_storage_driver # pylint: disable=raising-bad-type
if backend not in ("overlayfs", "overlay2"):
raise DBusError(
ErrorType.FAILED,
f"unsupported driver: {backend} (only 'overlayfs' and 'overlay2' are supported)",
)

View File

@@ -45,8 +45,8 @@ class Resolved(DBusServiceMock):
def DNS(self) -> "a(iiay)":
"""Get DNS."""
return [
(0, 2, bytes([127, 0, 0, 1])),
(
[0, 2, bytes([127, 0, 0, 1])],
[
0,
10,
bytes(
@@ -69,15 +69,15 @@ class Resolved(DBusServiceMock):
0x1,
]
),
),
],
]
@dbus_property(access=PropertyAccess.READ)
def DNSEx(self) -> "a(iiayqs)":
"""Get DNSEx."""
return [
(0, 2, bytes([127, 0, 0, 1]), 0, ""),
(
[0, 2, bytes([127, 0, 0, 1]), 0, ""],
[
0,
10,
bytes(
@@ -102,15 +102,15 @@ class Resolved(DBusServiceMock):
),
0,
"",
),
],
]
@dbus_property(access=PropertyAccess.READ)
def FallbackDNS(self) -> "a(iiay)":
"""Get FallbackDNS."""
return [
(0, 2, bytes([1, 1, 1, 1])),
(
[0, 2, bytes([1, 1, 1, 1])],
[
0,
10,
bytes(
@@ -133,15 +133,15 @@ class Resolved(DBusServiceMock):
0x11,
]
),
),
],
]
@dbus_property(access=PropertyAccess.READ)
def FallbackDNSEx(self) -> "a(iiayqs)":
"""Get FallbackDNSEx."""
return [
(0, 2, bytes([1, 1, 1, 1]), 0, "cloudflare-dns.com"),
(
[0, 2, bytes([1, 1, 1, 1]), 0, "cloudflare-dns.com"],
[
0,
10,
bytes(
@@ -166,33 +166,33 @@ class Resolved(DBusServiceMock):
),
0,
"cloudflare-dns.com",
),
],
]
@dbus_property(access=PropertyAccess.READ)
def CurrentDNSServer(self) -> "(iiay)":
"""Get CurrentDNSServer."""
return (0, 2, bytes([127, 0, 0, 1]))
return [0, 2, bytes([127, 0, 0, 1])]
@dbus_property(access=PropertyAccess.READ)
def CurrentDNSServerEx(self) -> "(iiayqs)":
"""Get CurrentDNSServerEx."""
return (0, 2, bytes([127, 0, 0, 1]), 0, "")
return [0, 2, bytes([127, 0, 0, 1]), 0, ""]
@dbus_property(access=PropertyAccess.READ)
def Domains(self) -> "a(isb)":
"""Get Domains."""
return [(0, "local.hass.io", False)]
return [[0, "local.hass.io", False]]
@dbus_property(access=PropertyAccess.READ)
def TransactionStatistics(self) -> "(tt)":
"""Get TransactionStatistics."""
return (0, 100000)
return [0, 100000]
@dbus_property(access=PropertyAccess.READ)
def CacheStatistics(self) -> "(ttt)":
"""Get CacheStatistics."""
return (10, 50000, 10000)
return [10, 50000, 10000]
@dbus_property(access=PropertyAccess.READ)
def DNSSEC(self) -> "s":
@@ -202,7 +202,7 @@ class Resolved(DBusServiceMock):
@dbus_property(access=PropertyAccess.READ)
def DNSSECStatistics(self) -> "(tttt)":
"""Get DNSSECStatistics."""
return (0, 0, 0, 0)
return [0, 0, 0, 0]
@dbus_property(access=PropertyAccess.READ)
def DNSSECSupported(self) -> "b":

View File

@@ -2,8 +2,7 @@
# pylint: disable=protected-access
from supervisor.coresys import CoreSys
from supervisor.docker.const import DOCKER_HUB
from supervisor.docker.interface import DockerInterface
from supervisor.docker.interface import DOCKER_HUB, DockerInterface
def test_no_credentials(coresys: CoreSys, test_docker_interface: DockerInterface):

View File

@@ -5,10 +5,10 @@ from pathlib import Path
from typing import Any
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, PropertyMock, call, patch
import aiodocker
from awesomeversion import AwesomeVersion
from docker.errors import DockerException, NotFound
from docker.models.containers import Container
from docker.models.images import Image
import pytest
from requests import RequestException
@@ -16,7 +16,7 @@ from supervisor.addons.manager import Addon
from supervisor.const import BusEvent, CoreState, CpuArch
from supervisor.coresys import CoreSys
from supervisor.docker.const import ContainerState
from supervisor.docker.interface import DOCKER_HUB, DockerInterface
from supervisor.docker.interface import DockerInterface
from supervisor.docker.manager import PullLogEntry, PullProgressDetail
from supervisor.docker.monitor import DockerContainerStateEvent
from supervisor.exceptions import (
@@ -26,12 +26,9 @@ from supervisor.exceptions import (
DockerNotFound,
DockerRequestError,
)
from supervisor.homeassistant.const import WSEvent, WSType
from supervisor.jobs import ChildJobSyncFilter, JobSchedulerOptions, SupervisorJob
from supervisor.jobs.decorator import Job
from supervisor.supervisor import Supervisor
from supervisor.jobs import JobSchedulerOptions, SupervisorJob
from tests.common import AsyncIterator, load_json_fixture
from tests.common import load_json_fixture
@pytest.mark.parametrize(
@@ -51,68 +48,35 @@ async def test_docker_image_platform(
platform: str,
):
"""Test platform set correctly from arch."""
coresys.docker.images.inspect.return_value = {"Id": "test:1.2.3"}
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test", arch=cpu_arch)
coresys.docker.images.pull.assert_called_once_with(
"test", tag="1.2.3", platform=platform, auth=None, stream=True
)
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
with patch.object(
coresys.docker.images, "get", return_value=Mock(id="test:1.2.3")
) as get:
await test_docker_interface.install(
AwesomeVersion("1.2.3"), "test", arch=cpu_arch
)
coresys.docker.docker.api.pull.assert_called_once_with(
"test", tag="1.2.3", platform=platform, stream=True, decode=True
)
get.assert_called_once_with("test:1.2.3")
async def test_docker_image_default_platform(
coresys: CoreSys, test_docker_interface: DockerInterface
):
"""Test platform set using supervisor arch when omitted."""
coresys.docker.images.inspect.return_value = {"Id": "test:1.2.3"}
with (
patch.object(
type(coresys.supervisor), "arch", PropertyMock(return_value="i386")
),
patch.object(
coresys.docker.images, "get", return_value=Mock(id="test:1.2.3")
) as get,
):
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
coresys.docker.images.pull.assert_called_once_with(
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
coresys.docker.docker.api.pull.assert_called_once_with(
"test", tag="1.2.3", platform="linux/386", stream=True, decode=True
)
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
@pytest.mark.parametrize(
"image,registry_key",
[
("homeassistant/amd64-supervisor", DOCKER_HUB),
("ghcr.io/home-assistant/amd64-supervisor", "ghcr.io"),
],
)
async def test_private_registry_credentials_passed_to_pull(
coresys: CoreSys,
test_docker_interface: DockerInterface,
image: str,
registry_key: str,
):
"""Test credentials for private registries are passed to aiodocker pull."""
coresys.docker.images.inspect.return_value = {"Id": f"{image}:1.2.3"}
# Configure registry credentials
coresys.docker.config._data["registries"] = { # pylint: disable=protected-access
registry_key: {"username": "testuser", "password": "testpass"}
}
with patch.object(
type(coresys.supervisor), "arch", PropertyMock(return_value="amd64")
):
await test_docker_interface.install(
AwesomeVersion("1.2.3"), image, arch=CpuArch.AMD64
)
# Verify credentials were passed to aiodocker
expected_auth = {"username": "testuser", "password": "testpass"}
if registry_key != DOCKER_HUB:
expected_auth["registry"] = registry_key
coresys.docker.images.pull.assert_called_once_with(
image, tag="1.2.3", platform="linux/amd64", auth=expected_auth, stream=True
)
get.assert_called_once_with("test:1.2.3")
@pytest.mark.parametrize(
@@ -243,40 +207,57 @@ async def test_attach_existing_container(
async def test_attach_container_failure(coresys: CoreSys):
"""Test attach fails to find container but finds image."""
coresys.docker.containers.get.side_effect = DockerException()
coresys.docker.images.inspect.return_value.setdefault("Config", {})["Image"] = (
"sha256:abc123"
)
with patch.object(type(coresys.bus), "fire_event") as fire_event:
container_collection = MagicMock()
container_collection.get.side_effect = DockerException()
image_collection = MagicMock()
image_config = {"Image": "sha256:abc123"}
image_collection.get.return_value = Image({"Config": image_config})
with (
patch(
"supervisor.docker.manager.DockerAPI.containers",
new=PropertyMock(return_value=container_collection),
),
patch(
"supervisor.docker.manager.DockerAPI.images",
new=PropertyMock(return_value=image_collection),
),
patch.object(type(coresys.bus), "fire_event") as fire_event,
):
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))
assert not [
event
for event in fire_event.call_args_list
if event.args[0] == BusEvent.DOCKER_CONTAINER_STATE_CHANGE
]
assert (
coresys.homeassistant.core.instance.meta_config["Image"] == "sha256:abc123"
)
assert coresys.homeassistant.core.instance.meta_config == image_config
async def test_attach_total_failure(coresys: CoreSys):
"""Test attach fails to find container or image."""
coresys.docker.containers.get.side_effect = DockerException
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
400, {"message": ""}
)
with pytest.raises(DockerError):
container_collection = MagicMock()
container_collection.get.side_effect = DockerException()
image_collection = MagicMock()
image_collection.get.side_effect = DockerException()
with (
patch(
"supervisor.docker.manager.DockerAPI.containers",
new=PropertyMock(return_value=container_collection),
),
patch(
"supervisor.docker.manager.DockerAPI.images",
new=PropertyMock(return_value=image_collection),
),
pytest.raises(DockerError),
):
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))
@pytest.mark.parametrize(
"err", [aiodocker.DockerError(400, {"message": ""}), RequestException()]
)
@pytest.mark.parametrize("err", [DockerException(), RequestException()])
async def test_image_pull_fail(
coresys: CoreSys, capture_exception: Mock, err: Exception
):
"""Test failure to pull image."""
coresys.docker.images.inspect.side_effect = err
coresys.docker.images.get.side_effect = err
with pytest.raises(DockerError):
await coresys.homeassistant.core.instance.install(
AwesomeVersion("2022.7.3"), arch=CpuArch.AMD64
@@ -308,16 +289,15 @@ async def test_install_fires_progress_events(
coresys: CoreSys, test_docker_interface: DockerInterface
):
"""Test progress events are fired during an install for listeners."""
# This is from a sample pull. Filtered log to just one per unique status for test
logs = [
coresys.docker.docker.api.pull.return_value = [
{
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
"id": "2025.7.2",
},
{"status": "Already exists", "progressDetail": {}, "id": "6e771e15690e"},
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1578b14a573c"},
{"status": "Waiting", "progressDetail": {}, "id": "1578b14a573c"},
{"status": "Waiting", "progressDetail": {}, "id": "2488d0e401e1"},
{
"status": "Downloading",
"progressDetail": {"current": 1378, "total": 1486},
@@ -332,11 +312,7 @@ async def test_install_fires_progress_events(
"id": "1578b14a573c",
},
{"status": "Pull complete", "progressDetail": {}, "id": "1578b14a573c"},
{
"status": "Verifying Checksum",
"progressDetail": {},
"id": "6a1e931d8f88",
},
{"status": "Verifying Checksum", "progressDetail": {}, "id": "6a1e931d8f88"},
{
"status": "Digest: sha256:490080d7da0f385928022927990e04f604615f7b8c622ef3e58253d0f089881d"
},
@@ -344,7 +320,6 @@ async def test_install_fires_progress_events(
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/odroid-n2-homeassistant:2025.7.2"
},
]
coresys.docker.images.pull.return_value = AsyncIterator(logs)
events: list[PullLogEntry] = []
@@ -359,10 +334,10 @@ async def test_install_fires_progress_events(
),
):
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
coresys.docker.images.pull.assert_called_once_with(
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
coresys.docker.docker.api.pull.assert_called_once_with(
"test", tag="1.2.3", platform="linux/386", stream=True, decode=True
)
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
coresys.docker.images.get.assert_called_once_with("test:1.2.3")
await asyncio.sleep(1)
assert events == [
@@ -387,7 +362,7 @@ async def test_install_fires_progress_events(
job_id=ANY,
status="Waiting",
progress_detail=PullProgressDetail(),
id="1578b14a573c",
id="2488d0e401e1",
),
PullLogEntry(
job_id=ANY,
@@ -440,11 +415,10 @@ async def test_install_progress_rounding_does_not_cause_misses(
):
"""Test extremely close progress events do not create rounding issues."""
coresys.core.set_state(CoreState.RUNNING)
# Current numbers chosen to create a rounding issue with original code
# Where a progress update came in with a value between the actual previous
# value and what it was rounded to. It should not raise an out of order exception
logs = [
coresys.docker.docker.api.pull.return_value = [
{
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
"id": "2025.7.1",
@@ -484,25 +458,29 @@ async def test_install_progress_rounding_does_not_cause_misses(
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/odroid-n2-homeassistant:2025.7.1"
},
]
coresys.docker.images.pull.return_value = AsyncIterator(logs)
# Schedule job so we can listen for the end. Then we can assert against the WS mock
event = asyncio.Event()
job, install_task = coresys.jobs.schedule_job(
test_docker_interface.install,
JobSchedulerOptions(),
AwesomeVersion("1.2.3"),
"test",
)
with (
patch.object(
type(coresys.supervisor), "arch", PropertyMock(return_value="i386")
),
):
# Schedule job so we can listen for the end. Then we can assert against the WS mock
event = asyncio.Event()
job, install_task = coresys.jobs.schedule_job(
test_docker_interface.install,
JobSchedulerOptions(),
AwesomeVersion("1.2.3"),
"test",
)
async def listen_for_job_end(reference: SupervisorJob):
if reference.uuid != job.uuid:
return
event.set()
async def listen_for_job_end(reference: SupervisorJob):
if reference.uuid != job.uuid:
return
event.set()
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
await install_task
await event.wait()
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
await install_task
await event.wait()
capture_exception.assert_not_called()
@@ -535,13 +513,11 @@ async def test_install_raises_on_pull_error(
exc_msg: str,
):
"""Test exceptions raised from errors in pull log."""
logs = [
coresys.docker.docker.api.pull.return_value = [
{
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
"id": "2025.7.2",
},
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1578b14a573c"},
{
"status": "Downloading",
"progressDetail": {"current": 1378, "total": 1486},
@@ -550,7 +526,6 @@ async def test_install_raises_on_pull_error(
},
error_log,
]
coresys.docker.images.pull.return_value = AsyncIterator(logs)
with pytest.raises(exc_type, match=exc_msg):
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
@@ -564,11 +539,11 @@ async def test_install_progress_handles_download_restart(
):
"""Test install handles docker progress events that include a download restart."""
coresys.core.set_state(CoreState.RUNNING)
# Fixture emulates a download restart as it docker logs it
# A log out of order exception should not be raised
logs = load_json_fixture("docker_pull_image_log_restart.json")
coresys.docker.images.pull.return_value = AsyncIterator(logs)
coresys.docker.docker.api.pull.return_value = load_json_fixture(
"docker_pull_image_log_restart.json"
)
with (
patch.object(
@@ -596,102 +571,59 @@ async def test_install_progress_handles_download_restart(
capture_exception.assert_not_called()
@pytest.mark.parametrize(
"extract_log",
[
{
"status": "Extracting",
"progressDetail": {"current": 96, "total": 96},
"progress": "[==================================================>] 96B/96B",
"id": "02a6e69d8d00",
},
{
"status": "Extracting",
"progressDetail": {"current": 1, "units": "s"},
"progress": "1 s",
"id": "02a6e69d8d00",
},
],
ids=["normal_extract_log", "containerd_snapshot_extract_log"],
)
async def test_install_progress_handles_layers_skipping_download(
coresys: CoreSys,
test_docker_interface: DockerInterface,
capture_exception: Mock,
extract_log: dict[str, Any],
):
"""Test install handles small layers that skip downloading phase and go directly to download complete.
Reproduces the real-world scenario from Supervisor issue #6286:
- Small layer (02a6e69d8d00) completes Download complete at 10:14:08 without ever Downloading
- Normal layer (3f4a84073184) starts Downloading at 10:14:09 with progress updates
Under containerd snapshotter this presumably can still occur and Supervisor will have even less info
since extract logs don't have a total. Supervisor should generally just ignore these and set progress
from the larger images that take all the time.
"""
"""Test install handles small layers that skip downloading phase and go directly to download complete."""
coresys.core.set_state(CoreState.RUNNING)
# Reproduce EXACT sequence from SupervisorNoUpdateProgressLogs.txt:
# Small layer (02a6e69d8d00) completes BEFORE normal layer (3f4a84073184) starts downloading
logs = [
# Simulate multiple layers where one small layer (96 bytes) skips the downloading phase
# This layer should not block progress reporting for the parent job
coresys.docker.docker.api.pull.return_value = [
{"status": "Pulling from test/image", "id": "latest"},
# Small layer that skips downloading (02a6e69d8d00 in logs, 96 bytes)
{"status": "Pulling fs layer", "progressDetail": {}, "id": "02a6e69d8d00"},
{"status": "Pulling fs layer", "progressDetail": {}, "id": "3f4a84073184"},
{"status": "Waiting", "progressDetail": {}, "id": "02a6e69d8d00"},
{"status": "Waiting", "progressDetail": {}, "id": "3f4a84073184"},
# Goes straight to Download complete (10:14:08 in logs) - THIS IS THE KEY MOMENT
{"status": "Download complete", "progressDetail": {}, "id": "02a6e69d8d00"},
# Normal layer that downloads (3f4a84073184 in logs, 25MB)
# Downloading starts (10:14:09 in logs) - progress updates should happen NOW!
# Layer 1: Normal layer with downloading phase
{"status": "Pulling fs layer", "progressDetail": {}, "id": "layer1"},
{
"status": "Downloading",
"progressDetail": {"current": 260937, "total": 25371463},
"progress": "[> ] 260.9kB/25.37MB",
"id": "3f4a84073184",
"progressDetail": {"current": 100, "total": 1000},
"progress": "[=====> ] 100B/1000B",
"id": "layer1",
},
{
"status": "Downloading",
"progressDetail": {"current": 5505024, "total": 25371463},
"progress": "[==========> ] 5.505MB/25.37MB",
"id": "3f4a84073184",
"progressDetail": {"current": 1000, "total": 1000},
"progress": "[==================================================>] 1000B/1000B",
"id": "layer1",
},
{
"status": "Downloading",
"progressDetail": {"current": 11272192, "total": 25371463},
"progress": "[======================> ] 11.27MB/25.37MB",
"id": "3f4a84073184",
},
{"status": "Download complete", "progressDetail": {}, "id": "3f4a84073184"},
{"status": "Download complete", "progressDetail": {}, "id": "layer1"},
{
"status": "Extracting",
"progressDetail": {"current": 25371463, "total": 25371463},
"progress": "[==================================================>] 25.37MB/25.37MB",
"id": "3f4a84073184",
"progressDetail": {"current": 1000, "total": 1000},
"progress": "[==================================================>] 1000B/1000B",
"id": "layer1",
},
{"status": "Pull complete", "progressDetail": {}, "id": "3f4a84073184"},
# Small layer finally extracts (10:14:58 in logs)
extract_log,
{"status": "Pull complete", "progressDetail": {}, "id": "02a6e69d8d00"},
{"status": "Pull complete", "progressDetail": {}, "id": "layer1"},
# Layer 2: Small layer that skips downloading (like 02a6e69d8d00 from the logs)
{"status": "Pulling fs layer", "progressDetail": {}, "id": "layer2"},
{"status": "Waiting", "progressDetail": {}, "id": "layer2"},
# Goes straight to Download complete without Downloading phase
{"status": "Download complete", "progressDetail": {}, "id": "layer2"},
{
"status": "Extracting",
"progressDetail": {"current": 96, "total": 96},
"progress": "[==================================================>] 96B/96B",
"id": "layer2",
},
{"status": "Pull complete", "progressDetail": {}, "id": "layer2"},
{"status": "Digest: sha256:test"},
{"status": "Status: Downloaded newer image for test/image:latest"},
]
coresys.docker.images.pull.return_value = AsyncIterator(logs)
# Capture immutable snapshots of install job progress using job.as_dict()
# This solves the mutable object problem - we snapshot state at call time
install_job_snapshots = []
original_on_job_change = coresys.jobs._on_job_change # pylint: disable=W0212
def capture_and_forward(job_obj, attribute, value):
# Capture immutable snapshot if this is the install job with progress
if job_obj.name == "docker_interface_install" and job_obj.progress > 0:
install_job_snapshots.append(job_obj.as_dict())
# Forward to original to maintain functionality
return original_on_job_change(job_obj, attribute, value)
with patch.object(coresys.jobs, "_on_job_change", side_effect=capture_and_forward):
with patch.object(
type(coresys.supervisor), "arch", PropertyMock(return_value="amd64")
):
# Schedule job so we can observe that it completes successfully
event = asyncio.Event()
job, install_task = coresys.jobs.schedule_job(
test_docker_interface.install,
@@ -709,243 +641,100 @@ async def test_install_progress_handles_layers_skipping_download(
await install_task
await event.wait()
# First update from layer download should have rather low progress ((260937/25371463) ~= 1%)
assert install_job_snapshots[0]["progress"] < 2
# Total 7 events should lead to a progress update on the install job:
# 3 Downloading events + Download complete (70%) + Extracting + Pull complete (100%) + stage change
# Note: The small placeholder layer ({1,1}) is excluded from progress calculation
assert len(install_job_snapshots) == 7
# Job should complete successfully
assert job.done is True
assert job.progress == 100
capture_exception.assert_not_called()
async def test_missing_total_handled_gracefully(
coresys: CoreSys,
test_docker_interface: DockerInterface,
ha_ws_client: AsyncMock,
capture_exception: Mock,
):
"""Test missing 'total' fields in progress details handled gracefully."""
coresys.core.set_state(CoreState.RUNNING)
# Progress details with missing 'total' fields observed in real-world pulls
logs = [
{
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
"id": "2025.7.1",
},
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1e214cd6d7d0"},
{
"status": "Downloading",
"progressDetail": {"current": 436480882},
"progress": "[===================================================] 436.5MB/436.5MB",
"id": "1e214cd6d7d0",
},
{"status": "Verifying Checksum", "progressDetail": {}, "id": "1e214cd6d7d0"},
{"status": "Download complete", "progressDetail": {}, "id": "1e214cd6d7d0"},
{
"status": "Extracting",
"progressDetail": {"current": 436480882},
"progress": "[===================================================] 436.5MB/436.5MB",
"id": "1e214cd6d7d0",
},
{"status": "Pull complete", "progressDetail": {}, "id": "1e214cd6d7d0"},
{
"status": "Digest: sha256:7d97da645f232f82a768d0a537e452536719d56d484d419836e53dbe3e4ec736"
},
{
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/odroid-n2-homeassistant:2025.7.1"
},
]
coresys.docker.images.pull.return_value = AsyncIterator(logs)
# Schedule job so we can listen for the end. Then we can assert against the WS mock
event = asyncio.Event()
job, install_task = coresys.jobs.schedule_job(
test_docker_interface.install,
JobSchedulerOptions(),
AwesomeVersion("1.2.3"),
"test",
)
async def listen_for_job_end(reference: SupervisorJob):
if reference.uuid != job.uuid:
return
event.set()
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
await install_task
await event.wait()
# The key assertion: Job should complete successfully without errors
# Without the fix, layer2 would block all progress reporting until it reached Extracting,
# preventing the aggregate progress calculation from running
assert job.done is True
assert job.progress == 100
capture_exception.assert_not_called()
async def test_install_progress_containerd_snapshot(
coresys: CoreSys, ha_ws_client: AsyncMock
async def test_install_progress_handles_containerd_snapshotter(
coresys: CoreSys,
test_docker_interface: DockerInterface,
capture_exception: Mock,
):
"""Test install handles docker progress events using containerd snapshotter."""
"""Test install handles containerd snapshotter time-based progress (total=None)."""
coresys.core.set_state(CoreState.RUNNING)
class TestDockerInterface(DockerInterface):
"""Test interface for events."""
@property
def name(self) -> str:
"""Name of test interface."""
return "test_interface"
@Job(
name="mock_docker_interface_install",
child_job_syncs=[
ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
],
)
async def mock_install(self) -> None:
"""Mock install."""
await super().install(
AwesomeVersion("1.2.3"), image="test", arch=CpuArch.I386
)
# Fixture emulates log as received when using containerd snapshotter
# Should not error but progress gets choppier once extraction starts
logs = load_json_fixture("docker_pull_image_log_containerd_snapshot.json")
coresys.docker.images.pull.return_value = AsyncIterator(logs)
test_docker_interface = TestDockerInterface(coresys)
with patch.object(Supervisor, "arch", PropertyMock(return_value="i386")):
await test_docker_interface.mock_install()
coresys.docker.images.pull.assert_called_once_with(
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
)
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
await asyncio.sleep(1)
def job_event(progress: float, done: bool = False):
return {
"type": WSType.SUPERVISOR_EVENT,
"data": {
"event": WSEvent.JOB,
"data": {
"name": "mock_docker_interface_install",
"reference": "test_interface",
"uuid": ANY,
"progress": progress,
"stage": None,
"done": done,
"parent_id": None,
"errors": [],
"created": ANY,
"extra": None,
},
},
}
# Get progress values from the events
job_events = [
c.args[0]
for c in ha_ws_client.async_send_command.call_args_list
if c.args[0].get("data", {}).get("event") == WSEvent.JOB
and c.args[0].get("data", {}).get("data", {}).get("name")
== "mock_docker_interface_install"
# Containerd snapshotter reports extraction progress as time elapsed (e.g., "7 s")
# with current=7, total=None instead of byte-based progress
coresys.docker.docker.api.pull.return_value = [
{"status": "Pulling from test/image", "id": "latest"},
{"status": "Pulling fs layer", "progressDetail": {}, "id": "layer1"},
{
"status": "Downloading",
"progressDetail": {"current": 100, "total": 1000},
"progress": "[=====> ] 100B/1000B",
"id": "layer1",
},
{
"status": "Downloading",
"progressDetail": {"current": 1000, "total": 1000},
"progress": "[==================================================>] 1000B/1000B",
"id": "layer1",
},
{"status": "Download complete", "progressDetail": {}, "id": "layer1"},
{
"status": "Extracting",
"progressDetail": {"current": 1000, "total": 1000},
"progress": "[==================================================>] 1000B/1000B",
"id": "layer1",
},
{"status": "Pull complete", "progressDetail": {}, "id": "layer1"},
# Layer 2: Containerd snapshotter with time-based extraction
{"status": "Pulling fs layer", "progressDetail": {}, "id": "layer2"},
{
"status": "Downloading",
"progressDetail": {"current": 50, "total": 500},
"progress": "[=====> ] 50B/500B",
"id": "layer2",
},
{
"status": "Downloading",
"progressDetail": {"current": 500, "total": 500},
"progress": "[==================================================>] 500B/500B",
"id": "layer2",
},
{"status": "Download complete", "progressDetail": {}, "id": "layer2"},
# Time-based extraction progress (containerd snapshotter)
{
"status": "Extracting",
"progressDetail": {"current": 3, "total": None},
"progress": "3 s",
"id": "layer2",
},
{
"status": "Extracting",
"progressDetail": {"current": 7, "total": None},
"progress": "7 s",
"id": "layer2",
},
{"status": "Pull complete", "progressDetail": {}, "id": "layer2"},
{"status": "Digest: sha256:test"},
{"status": "Status: Downloaded newer image for test/image:latest"},
]
progress_values = [e["data"]["data"]["progress"] for e in job_events]
# Should have multiple progress updates (not just 0 and 100)
assert len(progress_values) >= 10, (
f"Expected >=10 progress updates, got {len(progress_values)}"
)
# Progress should be monotonically increasing
for i in range(1, len(progress_values)):
assert progress_values[i] >= progress_values[i - 1], (
f"Progress decreased at index {i}: {progress_values[i - 1]} -> {progress_values[i]}"
with patch.object(
type(coresys.supervisor), "arch", PropertyMock(return_value="amd64")
):
event = asyncio.Event()
job, install_task = coresys.jobs.schedule_job(
test_docker_interface.install,
JobSchedulerOptions(),
AwesomeVersion("1.2.3"),
"test",
)
# Should start at 0 and end at 100
assert progress_values[0] == 0
assert progress_values[-1] == 100
async def listen_for_job_end(reference: SupervisorJob):
if reference.uuid != job.uuid:
return
event.set()
# Should have progress values in the downloading phase (< 70%)
# Note: with layer scaling, early progress may be lower than before
downloading_progress = [p for p in progress_values if 0 < p < 70]
assert len(downloading_progress) > 0, (
"Expected progress updates during downloading phase"
)
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
await install_task
await event.wait()
async def test_install_progress_containerd_snapshotter_real_world(
coresys: CoreSys, ha_ws_client: AsyncMock
):
"""Test install handles real-world containerd snapshotter events.
This test uses real pull events captured from a Home Assistant Core update
where some layers skip the Downloading phase entirely (going directly from
"Pulling fs layer" to "Download complete"). This causes the bug where progress
jumps from 0 to 100 without intermediate updates.
Root cause: _update_install_job_status() returns early if ANY layer has
extra=None. Layers that skip Downloading don't get extra until Download complete,
so progress cannot be calculated until ALL layers reach Download complete.
"""
coresys.core.set_state(CoreState.RUNNING)
class TestDockerInterface(DockerInterface):
"""Test interface for events."""
@property
def name(self) -> str:
"""Name of test interface."""
return "test_interface"
@Job(
name="mock_docker_interface_install_realworld",
child_job_syncs=[
ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
],
)
async def mock_install(self) -> None:
"""Mock install."""
await super().install(
AwesomeVersion("1.2.3"), image="test", arch=CpuArch.I386
)
# Real-world fixture: 12 layers, 262 Downloading events
# Some layers skip Downloading entirely (small layers with containerd snapshotter)
logs = load_json_fixture("docker_pull_image_log_containerd_snapshotter_real.json")
coresys.docker.images.pull.return_value = AsyncIterator(logs)
test_docker_interface = TestDockerInterface(coresys)
with patch.object(Supervisor, "arch", PropertyMock(return_value="i386")):
await test_docker_interface.mock_install()
await asyncio.sleep(1)
# Get progress events for the parent job (what UI sees)
job_events = [
c.args[0]
for c in ha_ws_client.async_send_command.call_args_list
if c.args[0].get("data", {}).get("event") == WSEvent.JOB
and c.args[0].get("data", {}).get("data", {}).get("name")
== "mock_docker_interface_install_realworld"
]
progress_values = [e["data"]["data"]["progress"] for e in job_events]
# We should have intermediate progress updates, not just 0 and 100
assert len(progress_values) > 3, (
f"BUG: Progress jumped 0->100 without intermediate updates. "
f"Got {len(progress_values)} updates: {progress_values}. "
f"Expected intermediate progress during the 262 Downloading events."
)
# Progress should be monotonically increasing
for i in range(1, len(progress_values)):
assert progress_values[i] >= progress_values[i - 1]
# Should see progress in downloading phase (0-70%)
downloading_progress = [p for p in progress_values if 0 < p < 70]
assert len(downloading_progress) > 0
# The key assertion: Job should complete without crashing on None total
assert job.done is True
assert job.progress == 100
capture_exception.assert_not_called()

View File

@@ -1,10 +1,9 @@
"""Test Docker manager."""
import asyncio
from pathlib import Path
from unittest.mock import MagicMock, patch
from docker.errors import APIError, DockerException, NotFound
from docker.errors import DockerException
import pytest
from requests import RequestException
@@ -21,7 +20,7 @@ async def test_run_command_success(docker: DockerAPI):
mock_container.logs.return_value = b"command output"
# Mock docker containers.run to return our mock container
docker.dockerpy.containers.run.return_value = mock_container
docker.docker.containers.run.return_value = mock_container
# Execute the command
result = docker.run_command(
@@ -34,7 +33,7 @@ async def test_run_command_success(docker: DockerAPI):
assert result.output == b"command output"
# Verify docker.containers.run was called correctly
docker.dockerpy.containers.run.assert_called_once_with(
docker.docker.containers.run.assert_called_once_with(
"alpine:3.18",
command="echo hello",
detach=True,
@@ -56,7 +55,7 @@ async def test_run_command_with_defaults(docker: DockerAPI):
mock_container.logs.return_value = b"error output"
# Mock docker containers.run to return our mock container
docker.dockerpy.containers.run.return_value = mock_container
docker.docker.containers.run.return_value = mock_container
# Execute the command with minimal parameters
result = docker.run_command(image="ubuntu")
@@ -67,7 +66,7 @@ async def test_run_command_with_defaults(docker: DockerAPI):
assert result.output == b"error output"
# Verify docker.containers.run was called with defaults
docker.dockerpy.containers.run.assert_called_once_with(
docker.docker.containers.run.assert_called_once_with(
"ubuntu:latest", # default tag
command=None, # default command
detach=True,
@@ -82,7 +81,7 @@ async def test_run_command_with_defaults(docker: DockerAPI):
async def test_run_command_docker_exception(docker: DockerAPI):
"""Test command execution when Docker raises an exception."""
# Mock docker containers.run to raise DockerException
docker.dockerpy.containers.run.side_effect = DockerException("Docker error")
docker.docker.containers.run.side_effect = DockerException("Docker error")
# Execute the command and expect DockerError
with pytest.raises(DockerError, match="Can't execute command: Docker error"):
@@ -92,7 +91,7 @@ async def test_run_command_docker_exception(docker: DockerAPI):
async def test_run_command_request_exception(docker: DockerAPI):
"""Test command execution when requests raises an exception."""
# Mock docker containers.run to raise RequestException
docker.dockerpy.containers.run.side_effect = RequestException("Connection error")
docker.docker.containers.run.side_effect = RequestException("Connection error")
# Execute the command and expect DockerError
with pytest.raises(DockerError, match="Can't execute command: Connection error"):
@@ -105,7 +104,7 @@ async def test_run_command_cleanup_on_exception(docker: DockerAPI):
mock_container = MagicMock()
# Mock docker.containers.run to return container, but container.wait to raise exception
docker.dockerpy.containers.run.return_value = mock_container
docker.docker.containers.run.return_value = mock_container
mock_container.wait.side_effect = DockerException("Wait failed")
# Execute the command and expect DockerError
@@ -124,7 +123,7 @@ async def test_run_command_custom_stdout_stderr(docker: DockerAPI):
mock_container.logs.return_value = b"output"
# Mock docker containers.run to return our mock container
docker.dockerpy.containers.run.return_value = mock_container
docker.docker.containers.run.return_value = mock_container
# Execute the command with custom stdout/stderr
result = docker.run_command(
@@ -151,7 +150,7 @@ async def test_run_container_with_cidfile(
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
extern_cidfile_path = coresys.config.path_extern_cid_files / f"{container_name}.cid"
docker.dockerpy.containers.run.return_value = mock_container
docker.docker.containers.run.return_value = mock_container
# Mock container creation
with patch.object(
@@ -352,101 +351,3 @@ async def test_run_container_with_leftover_cidfile_directory(
assert cidfile_path.read_text() == mock_container.id
assert result == mock_container
async def test_repair(coresys: CoreSys, caplog: pytest.LogCaptureFixture):
"""Test repair API."""
coresys.docker.dockerpy.networks.get.side_effect = [
hassio := MagicMock(
attrs={
"Containers": {
"good": {"Name": "good"},
"corrupt": {"Name": "corrupt"},
"fail": {"Name": "fail"},
}
}
),
host := MagicMock(attrs={"Containers": {}}),
]
coresys.docker.dockerpy.containers.get.side_effect = [
MagicMock(),
NotFound("corrupt"),
DockerException("fail"),
]
await coresys.run_in_executor(coresys.docker.repair)
coresys.docker.dockerpy.api.prune_containers.assert_called_once()
coresys.docker.dockerpy.api.prune_images.assert_called_once_with(
filters={"dangling": False}
)
coresys.docker.dockerpy.api.prune_builds.assert_called_once()
coresys.docker.dockerpy.api.prune_volumes.assert_called_once()
coresys.docker.dockerpy.api.prune_networks.assert_called_once()
hassio.disconnect.assert_called_once_with("corrupt", force=True)
host.disconnect.assert_not_called()
assert "Docker fatal error on container fail on hassio" in caplog.text
async def test_repair_failures(coresys: CoreSys, caplog: pytest.LogCaptureFixture):
"""Test repair proceeds best it can through failures."""
coresys.docker.dockerpy.api.prune_containers.side_effect = APIError("fail")
coresys.docker.dockerpy.api.prune_images.side_effect = APIError("fail")
coresys.docker.dockerpy.api.prune_builds.side_effect = APIError("fail")
coresys.docker.dockerpy.api.prune_volumes.side_effect = APIError("fail")
coresys.docker.dockerpy.api.prune_networks.side_effect = APIError("fail")
coresys.docker.dockerpy.networks.get.side_effect = NotFound("missing")
await coresys.run_in_executor(coresys.docker.repair)
assert "Error for containers prune: fail" in caplog.text
assert "Error for images prune: fail" in caplog.text
assert "Error for builds prune: fail" in caplog.text
assert "Error for volumes prune: fail" in caplog.text
assert "Error for networks prune: fail" in caplog.text
assert "Error for networks hassio prune: missing" in caplog.text
assert "Error for networks host prune: missing" in caplog.text
@pytest.mark.parametrize("log_starter", [("Loaded image ID"), ("Loaded image")])
async def test_import_image(coresys: CoreSys, tmp_path: Path, log_starter: str):
"""Test importing an image into docker."""
(test_tar := tmp_path / "test.tar").touch()
coresys.docker.images.import_image.return_value = [
{"stream": f"{log_starter}: imported"}
]
coresys.docker.images.inspect.return_value = {"Id": "imported"}
image = await coresys.docker.import_image(test_tar)
assert image["Id"] == "imported"
coresys.docker.images.inspect.assert_called_once_with("imported")
async def test_import_image_error(coresys: CoreSys, tmp_path: Path):
"""Test failure importing an image into docker."""
(test_tar := tmp_path / "test.tar").touch()
coresys.docker.images.import_image.return_value = [
{"errorDetail": {"message": "fail"}}
]
with pytest.raises(DockerError, match="Can't import image from tar: fail"):
await coresys.docker.import_image(test_tar)
coresys.docker.images.inspect.assert_not_called()
async def test_import_multiple_images_in_tar(
coresys: CoreSys, tmp_path: Path, caplog: pytest.LogCaptureFixture
):
"""Test importing an image into docker."""
(test_tar := tmp_path / "test.tar").touch()
coresys.docker.images.import_image.return_value = [
{"stream": "Loaded image: imported-1"},
{"stream": "Loaded image: imported-2"},
]
assert await coresys.docker.import_image(test_tar) is None
assert "Unexpected image count 2 while importing image from tar" in caplog.text
coresys.docker.images.inspect.assert_not_called()

View File

@@ -88,7 +88,7 @@ async def test_events(
):
"""Test events created from docker events."""
event["Actor"]["Attributes"]["name"] = "some_container"
event["Actor"]["ID"] = "abc123"
event["id"] = "abc123"
event["time"] = 123
with (
patch(
@@ -131,12 +131,12 @@ async def test_unlabeled_container(coresys: CoreSys):
new=PropertyMock(
return_value=[
{
"id": "abc123",
"time": 123,
"Type": "container",
"Action": "die",
"Actor": {
"ID": "abc123",
"Attributes": {"name": "homeassistant", "exitCode": "137"},
"Attributes": {"name": "homeassistant", "exitCode": "137"}
},
}
]

View File

@@ -1,196 +0,0 @@
[
{
"status": "Pulling from home-assistant/home-assistant",
"id": "2025.12.0.dev202511080235"
},
{ "status": "Pulling fs layer", "progressDetail": {}, "id": "eafecc6b43cc" },
{ "status": "Pulling fs layer", "progressDetail": {}, "id": "333270549f95" },
{
"status": "Downloading",
"progressDetail": { "current": 1048576, "total": 21863319 },
"progress": "[==\u003e ] 1.049MB/21.86MB",
"id": "eafecc6b43cc"
},
{
"status": "Downloading",
"progressDetail": { "current": 1048576, "total": 21179924 },
"progress": "[==\u003e ] 1.049MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Downloading",
"progressDetail": { "current": 4194304, "total": 21863319 },
"progress": "[=========\u003e ] 4.194MB/21.86MB",
"id": "eafecc6b43cc"
},
{
"status": "Downloading",
"progressDetail": { "current": 2097152, "total": 21179924 },
"progress": "[====\u003e ] 2.097MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Downloading",
"progressDetail": { "current": 7340032, "total": 21863319 },
"progress": "[================\u003e ] 7.34MB/21.86MB",
"id": "eafecc6b43cc"
},
{
"status": "Downloading",
"progressDetail": { "current": 4194304, "total": 21179924 },
"progress": "[=========\u003e ] 4.194MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Downloading",
"progressDetail": { "current": 13631488, "total": 21863319 },
"progress": "[===============================\u003e ] 13.63MB/21.86MB",
"id": "eafecc6b43cc"
},
{
"status": "Downloading",
"progressDetail": { "current": 8388608, "total": 21179924 },
"progress": "[===================\u003e ] 8.389MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Downloading",
"progressDetail": { "current": 17825792, "total": 21863319 },
"progress": "[========================================\u003e ] 17.83MB/21.86MB",
"id": "eafecc6b43cc"
},
{
"status": "Downloading",
"progressDetail": { "current": 12582912, "total": 21179924 },
"progress": "[=============================\u003e ] 12.58MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Downloading",
"progressDetail": { "current": 21863319, "total": 21863319 },
"progress": "[==================================================\u003e] 21.86MB/21.86MB",
"id": "eafecc6b43cc"
},
{
"status": "Downloading",
"progressDetail": { "current": 16777216, "total": 21179924 },
"progress": "[=======================================\u003e ] 16.78MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Downloading",
"progressDetail": { "current": 21179924, "total": 21179924 },
"progress": "[==================================================\u003e] 21.18MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Download complete",
"progressDetail": { "hidecounts": true },
"id": "eafecc6b43cc"
},
{
"status": "Download complete",
"progressDetail": { "hidecounts": true },
"id": "333270549f95"
},
{
"status": "Extracting",
"progressDetail": { "current": 1, "units": "s" },
"progress": "1 s",
"id": "333270549f95"
},
{
"status": "Extracting",
"progressDetail": { "current": 1, "units": "s" },
"progress": "1 s",
"id": "333270549f95"
},
{
"status": "Pull complete",
"progressDetail": { "hidecounts": true },
"id": "333270549f95"
},
{
"status": "Extracting",
"progressDetail": { "current": 1, "units": "s" },
"progress": "1 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 1, "units": "s" },
"progress": "1 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 2, "units": "s" },
"progress": "2 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 2, "units": "s" },
"progress": "2 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 3, "units": "s" },
"progress": "3 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 3, "units": "s" },
"progress": "3 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 4, "units": "s" },
"progress": "4 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 4, "units": "s" },
"progress": "4 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 5, "units": "s" },
"progress": "5 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 5, "units": "s" },
"progress": "5 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 6, "units": "s" },
"progress": "6 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 6, "units": "s" },
"progress": "6 s",
"id": "eafecc6b43cc"
},
{
"status": "Pull complete",
"progressDetail": { "hidecounts": true },
"id": "eafecc6b43cc"
},
{
"status": "Digest: sha256:bfc9efc13552c0c228f3d9d35987331cce68b43c9bc79c80a57eeadadd44cccf"
},
{
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/home-assistant:2025.12.0.dev202511080235"
}
]

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +1,11 @@
"""Test Home Assistant core."""
from datetime import datetime, timedelta
from http import HTTPStatus
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, call, patch
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, patch
import aiodocker
from awesomeversion import AwesomeVersion
from docker.errors import APIError, DockerException, NotFound
from docker.errors import APIError, DockerException, ImageNotFound, NotFound
import pytest
from requests import RequestException
from time_machine import travel
from supervisor.const import CpuArch
@@ -26,12 +23,8 @@ from supervisor.exceptions import (
from supervisor.homeassistant.api import APIState
from supervisor.homeassistant.core import HomeAssistantCore
from supervisor.homeassistant.module import HomeAssistant
from supervisor.resolution.const import ContextType, IssueType
from supervisor.resolution.data import Issue
from supervisor.updater import Updater
from tests.common import AsyncIterator
async def test_update_fails_if_out_of_date(coresys: CoreSys):
"""Test update of Home Assistant fails when supervisor or plugin is out of date."""
@@ -59,23 +52,11 @@ async def test_update_fails_if_out_of_date(coresys: CoreSys):
await coresys.homeassistant.core.update()
@pytest.mark.parametrize(
"err",
[
aiodocker.DockerError(HTTPStatus.TOO_MANY_REQUESTS, {"message": "ratelimit"}),
APIError("ratelimit", MagicMock(status_code=HTTPStatus.TOO_MANY_REQUESTS)),
],
)
async def test_install_landingpage_docker_ratelimit_error(
coresys: CoreSys,
capture_exception: Mock,
caplog: pytest.LogCaptureFixture,
err: Exception,
async def test_install_landingpage_docker_error(
coresys: CoreSys, capture_exception: Mock, caplog: pytest.LogCaptureFixture
):
"""Test install landing page fails due to docker ratelimit error."""
"""Test install landing page fails due to docker error."""
coresys.security.force = True
coresys.docker.images.pull.side_effect = [err, AsyncIterator([{}])]
with (
patch.object(DockerHomeAssistant, "attach", side_effect=DockerError),
patch.object(
@@ -88,35 +69,19 @@ async def test_install_landingpage_docker_ratelimit_error(
),
patch("supervisor.homeassistant.core.asyncio.sleep") as sleep,
):
coresys.docker.images.get.side_effect = [APIError("fail"), MagicMock()]
await coresys.homeassistant.core.install_landingpage()
sleep.assert_awaited_once_with(30)
assert "Failed to install landingpage, retrying after 30sec" in caplog.text
capture_exception.assert_not_called()
assert (
Issue(IssueType.DOCKER_RATELIMIT, ContextType.SYSTEM)
in coresys.resolution.issues
)
@pytest.mark.parametrize(
"err",
[
aiodocker.DockerError(HTTPStatus.INTERNAL_SERVER_ERROR, {"message": "fail"}),
APIError("fail"),
DockerException(),
RequestException(),
OSError(),
],
)
async def test_install_landingpage_other_error(
coresys: CoreSys,
capture_exception: Mock,
caplog: pytest.LogCaptureFixture,
err: Exception,
coresys: CoreSys, capture_exception: Mock, caplog: pytest.LogCaptureFixture
):
"""Test install landing page fails due to other error."""
coresys.docker.images.inspect.side_effect = [err, MagicMock()]
coresys.docker.images.get.side_effect = [(err := OSError()), MagicMock()]
with (
patch.object(DockerHomeAssistant, "attach", side_effect=DockerError),
@@ -137,23 +102,11 @@ async def test_install_landingpage_other_error(
capture_exception.assert_called_once_with(err)
@pytest.mark.parametrize(
"err",
[
aiodocker.DockerError(HTTPStatus.TOO_MANY_REQUESTS, {"message": "ratelimit"}),
APIError("ratelimit", MagicMock(status_code=HTTPStatus.TOO_MANY_REQUESTS)),
],
)
async def test_install_docker_ratelimit_error(
coresys: CoreSys,
capture_exception: Mock,
caplog: pytest.LogCaptureFixture,
err: Exception,
async def test_install_docker_error(
coresys: CoreSys, capture_exception: Mock, caplog: pytest.LogCaptureFixture
):
"""Test install fails due to docker ratelimit error."""
"""Test install fails due to docker error."""
coresys.security.force = True
coresys.docker.images.pull.side_effect = [err, AsyncIterator([{}])]
with (
patch.object(HomeAssistantCore, "start"),
patch.object(DockerHomeAssistant, "cleanup"),
@@ -170,35 +123,19 @@ async def test_install_docker_ratelimit_error(
),
patch("supervisor.homeassistant.core.asyncio.sleep") as sleep,
):
coresys.docker.images.get.side_effect = [APIError("fail"), MagicMock()]
await coresys.homeassistant.core.install()
sleep.assert_awaited_once_with(30)
assert "Error on Home Assistant installation. Retrying in 30sec" in caplog.text
capture_exception.assert_not_called()
assert (
Issue(IssueType.DOCKER_RATELIMIT, ContextType.SYSTEM)
in coresys.resolution.issues
)
@pytest.mark.parametrize(
"err",
[
aiodocker.DockerError(HTTPStatus.INTERNAL_SERVER_ERROR, {"message": "fail"}),
APIError("fail"),
DockerException(),
RequestException(),
OSError(),
],
)
async def test_install_other_error(
coresys: CoreSys,
capture_exception: Mock,
caplog: pytest.LogCaptureFixture,
err: Exception,
coresys: CoreSys, capture_exception: Mock, caplog: pytest.LogCaptureFixture
):
"""Test install fails due to other error."""
coresys.docker.images.inspect.side_effect = [err, MagicMock()]
coresys.docker.images.get.side_effect = [(err := OSError()), MagicMock()]
with (
patch.object(HomeAssistantCore, "start"),
@@ -224,29 +161,21 @@ async def test_install_other_error(
@pytest.mark.parametrize(
("container_exc", "image_exc", "remove_calls"),
[
(NotFound("missing"), None, []),
(
None,
aiodocker.DockerError(404, {"message": "missing"}),
[call(force=True, v=True)],
),
(None, None, [call(force=True, v=True)]),
],
"container_exists,image_exists", [(False, True), (True, False), (True, True)]
)
@pytest.mark.usefixtures("path_extern")
async def test_start(
coresys: CoreSys,
container_exc: DockerException | None,
image_exc: aiodocker.DockerError | None,
remove_calls: list[call],
coresys: CoreSys, container_exists: bool, image_exists: bool, path_extern
):
"""Test starting Home Assistant."""
coresys.docker.images.inspect.return_value = {"Id": "123"}
coresys.docker.images.inspect.side_effect = image_exc
coresys.docker.containers.get.return_value.id = "123"
coresys.docker.containers.get.side_effect = container_exc
if image_exists:
coresys.docker.images.get.return_value.id = "123"
else:
coresys.docker.images.get.side_effect = ImageNotFound("missing")
if container_exists:
coresys.docker.containers.get.return_value.image.id = "123"
else:
coresys.docker.containers.get.side_effect = NotFound("missing")
with (
patch.object(
@@ -269,14 +198,18 @@ async def test_start(
assert run.call_args.kwargs["hostname"] == "homeassistant"
coresys.docker.containers.get.return_value.stop.assert_not_called()
assert (
coresys.docker.containers.get.return_value.remove.call_args_list == remove_calls
)
if container_exists:
coresys.docker.containers.get.return_value.remove.assert_called_once_with(
force=True,
v=True,
)
else:
coresys.docker.containers.get.return_value.remove.assert_not_called()
async def test_start_existing_container(coresys: CoreSys, path_extern):
"""Test starting Home Assistant when container exists and is viable."""
coresys.docker.images.inspect.return_value = {"Id": "123"}
coresys.docker.images.get.return_value.id = "123"
coresys.docker.containers.get.return_value.image.id = "123"
coresys.docker.containers.get.return_value.status = "exited"
@@ -461,33 +394,24 @@ async def test_core_loads_wrong_image_for_machine(
"""Test core is loaded with wrong image for machine."""
coresys.homeassistant.set_image("ghcr.io/home-assistant/odroid-n2-homeassistant")
coresys.homeassistant.version = AwesomeVersion("2024.4.0")
container.attrs["Config"] = {"Labels": {"io.hass.version": "2024.4.0"}}
with patch.object(
DockerAPI,
"pull_image",
return_value={
"Id": "abc123",
"Config": {"Labels": {"io.hass.version": "2024.4.0"}},
},
) as pull_image:
container.attrs |= pull_image.return_value
await coresys.homeassistant.core.load()
pull_image.assert_called_once_with(
ANY,
"ghcr.io/home-assistant/qemux86-64-homeassistant",
"2024.4.0",
platform="linux/amd64",
auth=None,
)
await coresys.homeassistant.core.load()
container.remove.assert_called_once_with(force=True, v=True)
assert coresys.docker.images.delete.call_args_list[0] == call(
"ghcr.io/home-assistant/odroid-n2-homeassistant:latest",
force=True,
)
assert coresys.docker.images.delete.call_args_list[1] == call(
"ghcr.io/home-assistant/odroid-n2-homeassistant:2024.4.0",
force=True,
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
"image": "ghcr.io/home-assistant/odroid-n2-homeassistant:latest",
"force": True,
}
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
"image": "ghcr.io/home-assistant/odroid-n2-homeassistant:2024.4.0",
"force": True,
}
coresys.docker.pull_image.assert_called_once_with(
ANY,
"ghcr.io/home-assistant/qemux86-64-homeassistant",
"2024.4.0",
platform="linux/amd64",
)
assert (
coresys.homeassistant.image == "ghcr.io/home-assistant/qemux86-64-homeassistant"
@@ -504,8 +428,8 @@ async def test_core_load_allows_image_override(coresys: CoreSys, container: Magi
await coresys.homeassistant.core.load()
container.remove.assert_not_called()
coresys.docker.images.delete.assert_not_called()
coresys.docker.images.inspect.assert_not_called()
coresys.docker.images.remove.assert_not_called()
coresys.docker.images.get.assert_not_called()
assert (
coresys.homeassistant.image == "ghcr.io/home-assistant/odroid-n2-homeassistant"
)
@@ -516,37 +440,27 @@ async def test_core_loads_wrong_image_for_architecture(
):
"""Test core is loaded with wrong image for architecture."""
coresys.homeassistant.version = AwesomeVersion("2024.4.0")
coresys.docker.images.inspect.return_value = img_data = (
coresys.docker.images.inspect.return_value
| {
"Architecture": "arm64",
"Config": {"Labels": {"io.hass.version": "2024.4.0"}},
}
)
container.attrs |= img_data
container.attrs["Config"] = {"Labels": {"io.hass.version": "2024.4.0"}}
coresys.docker.images.get("ghcr.io/home-assistant/qemux86-64-homeassistant").attrs[
"Architecture"
] = "arm64"
with patch.object(
DockerAPI,
"pull_image",
return_value=img_data | {"Architecture": "amd64"},
) as pull_image:
await coresys.homeassistant.core.load()
pull_image.assert_called_once_with(
ANY,
"ghcr.io/home-assistant/qemux86-64-homeassistant",
"2024.4.0",
platform="linux/amd64",
auth=None,
)
await coresys.homeassistant.core.load()
container.remove.assert_called_once_with(force=True, v=True)
assert coresys.docker.images.delete.call_args_list[0] == call(
"ghcr.io/home-assistant/qemux86-64-homeassistant:latest",
force=True,
)
assert coresys.docker.images.delete.call_args_list[1] == call(
"ghcr.io/home-assistant/qemux86-64-homeassistant:2024.4.0",
force=True,
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
"image": "ghcr.io/home-assistant/qemux86-64-homeassistant:latest",
"force": True,
}
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
"image": "ghcr.io/home-assistant/qemux86-64-homeassistant:2024.4.0",
"force": True,
}
coresys.docker.pull_image.assert_called_once_with(
ANY,
"ghcr.io/home-assistant/qemux86-64-homeassistant",
"2024.4.0",
platform="linux/amd64",
)
assert (
coresys.homeassistant.image == "ghcr.io/home-assistant/qemux86-64-homeassistant"

View File

@@ -90,49 +90,6 @@ async def test_logs_coloured(journald_gateway: MagicMock, coresys: CoreSys):
)
async def test_logs_no_colors(journald_gateway: MagicMock, coresys: CoreSys):
"""Test ANSI color codes being stripped when no_colors=True."""
journald_gateway.content.feed_data(
load_fixture("logs_export_supervisor.txt").encode("utf-8")
)
journald_gateway.content.feed_eof()
async with coresys.host.logs.journald_logs() as resp:
cursor, line = await anext(journal_logs_reader(resp, no_colors=True))
assert (
cursor
== "s=83fee99ca0c3466db5fc120d52ca7dd8;i=2049389;b=f5a5c442fa6548cf97474d2d57c920b3;m=4263828e8c;t=612dda478b01b;x=9ae12394c9326930"
)
# Colors should be stripped
assert (
line == "24-03-04 23:56:56 INFO (MainThread) [__main__] Closing Supervisor"
)
async def test_logs_verbose_no_colors(journald_gateway: MagicMock, coresys: CoreSys):
"""Test ANSI color codes being stripped from verbose formatted logs when no_colors=True."""
journald_gateway.content.feed_data(
load_fixture("logs_export_supervisor.txt").encode("utf-8")
)
journald_gateway.content.feed_eof()
async with coresys.host.logs.journald_logs() as resp:
cursor, line = await anext(
journal_logs_reader(
resp, log_formatter=LogFormatter.VERBOSE, no_colors=True
)
)
assert (
cursor
== "s=83fee99ca0c3466db5fc120d52ca7dd8;i=2049389;b=f5a5c442fa6548cf97474d2d57c920b3;m=4263828e8c;t=612dda478b01b;x=9ae12394c9326930"
)
# Colors should be stripped in verbose format too
assert (
line
== "2024-03-04 22:56:56.709 ha-hloub hassio_supervisor[466]: 24-03-04 23:56:56 INFO (MainThread) [__main__] Closing Supervisor"
)
async def test_boot_ids(
journald_gateway: MagicMock,
coresys: CoreSys,

View File

@@ -1179,6 +1179,7 @@ async def test_job_scheduled_delay(coresys: CoreSys):
async def test_job_scheduled_at(coresys: CoreSys):
"""Test job that schedules a job to start at a specified time."""
dt = datetime.now()
class TestClass:
"""Test class."""
@@ -1188,12 +1189,10 @@ async def test_job_scheduled_at(coresys: CoreSys):
self.coresys = coresys
@Job(name="test_job_scheduled_at_job_scheduler")
async def job_scheduler(
self, scheduled_time: datetime
) -> tuple[SupervisorJob, asyncio.TimerHandle]:
async def job_scheduler(self) -> tuple[SupervisorJob, asyncio.TimerHandle]:
"""Schedule a job to run at specified time."""
return self.coresys.jobs.schedule_job(
self.job_task, JobSchedulerOptions(start_at=scheduled_time)
self.job_task, JobSchedulerOptions(start_at=dt + timedelta(seconds=0.1))
)
@Job(name="test_job_scheduled_at_job_task")
@@ -1202,28 +1201,29 @@ async def test_job_scheduled_at(coresys: CoreSys):
self.coresys.jobs.current.stage = "work"
test = TestClass(coresys)
# Schedule job to run 0.1 seconds from now
scheduled_time = datetime.now() + timedelta(seconds=0.1)
job, _ = await test.job_scheduler(scheduled_time)
started = False
ended = False
job_started = asyncio.Event()
job_ended = asyncio.Event()
async def start_listener(evt_job: SupervisorJob):
nonlocal started
started = started or evt_job.uuid == job.uuid
if evt_job.uuid == job.uuid:
job_started.set()
async def end_listener(evt_job: SupervisorJob):
nonlocal ended
ended = ended or evt_job.uuid == job.uuid
if evt_job.uuid == job.uuid:
job_ended.set()
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_START, start_listener)
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, end_listener)
async with time_machine.travel(dt):
job, _ = await test.job_scheduler()
await asyncio.sleep(0.2)
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_START, start_listener)
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, end_listener)
# Advance time to exactly when job should start and wait for completion
async with time_machine.travel(dt + timedelta(seconds=0.1)):
await asyncio.wait_for(
asyncio.gather(job_started.wait(), job_ended.wait()), timeout=1.0
)
assert started
assert ended
assert job.done
assert job.name == "test_job_scheduled_at_job_task"
assert job.stage == "work"

View File

@@ -115,17 +115,7 @@ async def test_not_started(coresys):
assert filter_data(coresys, SAMPLE_EVENT, {}) == SAMPLE_EVENT
await coresys.core.set_state(CoreState.SETUP)
filtered = filter_data(coresys, SAMPLE_EVENT, {})
# During SETUP, we should have basic system info available
assert "contexts" in filtered
assert "versions" in filtered["contexts"]
assert "docker" in filtered["contexts"]["versions"]
assert "supervisor" in filtered["contexts"]["versions"]
assert "host" in filtered["contexts"]
assert "machine" in filtered["contexts"]["host"]
assert filtered["contexts"]["versions"]["docker"] == coresys.docker.info.version
assert filtered["contexts"]["versions"]["supervisor"] == coresys.supervisor.version
assert filtered["contexts"]["host"]["machine"] == coresys.machine
assert filter_data(coresys, SAMPLE_EVENT, {}) == SAMPLE_EVENT
async def test_defaults(coresys):

View File

@@ -119,10 +119,10 @@ async def test_load(
"mnt-data-supervisor-mounts-backup_test.mount",
"fail",
[
("Options", Variant("s", "noserverino,guest")),
("Type", Variant("s", "cifs")),
("Description", Variant("s", "Supervisor cifs mount: backup_test")),
("What", Variant("s", "//backup.local/backups")),
["Options", Variant("s", "noserverino,guest")],
["Type", Variant("s", "cifs")],
["Description", Variant("s", "Supervisor cifs mount: backup_test")],
["What", Variant("s", "//backup.local/backups")],
],
[],
),
@@ -130,10 +130,10 @@ async def test_load(
"mnt-data-supervisor-mounts-media_test.mount",
"fail",
[
("Options", Variant("s", "soft,timeo=200")),
("Type", Variant("s", "nfs")),
("Description", Variant("s", "Supervisor nfs mount: media_test")),
("What", Variant("s", "media.local:/media")),
["Options", Variant("s", "soft,timeo=200")],
["Type", Variant("s", "nfs")],
["Description", Variant("s", "Supervisor nfs mount: media_test")],
["What", Variant("s", "media.local:/media")],
],
[],
),
@@ -141,12 +141,12 @@ async def test_load(
"mnt-data-supervisor-media-media_test.mount",
"fail",
[
("Options", Variant("s", "bind")),
(
["Options", Variant("s", "bind")],
[
"Description",
Variant("s", "Supervisor bind mount: bind_media_test"),
),
("What", Variant("s", "/mnt/data/supervisor/mounts/media_test")),
],
["What", Variant("s", "/mnt/data/supervisor/mounts/media_test")],
],
[],
),
@@ -198,10 +198,10 @@ async def test_load_share_mount(
"mnt-data-supervisor-mounts-share_test.mount",
"fail",
[
("Options", Variant("s", "soft,timeo=200")),
("Type", Variant("s", "nfs")),
("Description", Variant("s", "Supervisor nfs mount: share_test")),
("What", Variant("s", "share.local:/share")),
["Options", Variant("s", "soft,timeo=200")],
["Type", Variant("s", "nfs")],
["Description", Variant("s", "Supervisor nfs mount: share_test")],
["What", Variant("s", "share.local:/share")],
],
[],
),
@@ -209,9 +209,9 @@ async def test_load_share_mount(
"mnt-data-supervisor-share-share_test.mount",
"fail",
[
("Options", Variant("s", "bind")),
("Description", Variant("s", "Supervisor bind mount: bind_share_test")),
("What", Variant("s", "/mnt/data/supervisor/mounts/share_test")),
["Options", Variant("s", "bind")],
["Description", Variant("s", "Supervisor bind mount: bind_share_test")],
["What", Variant("s", "/mnt/data/supervisor/mounts/share_test")],
],
[],
),
@@ -318,12 +318,12 @@ async def test_mount_failed_during_load(
"mnt-data-supervisor-media-media_test.mount",
"fail",
[
("Options", Variant("s", "ro,bind")),
(
["Options", Variant("s", "ro,bind")],
[
"Description",
Variant("s", "Supervisor bind mount: emergency_media_test"),
),
("What", Variant("s", "/mnt/data/supervisor/emergency/media_test")),
],
["What", Variant("s", "/mnt/data/supervisor/emergency/media_test")],
],
[],
)
@@ -634,10 +634,10 @@ async def test_reload_mounts_attempts_initial_mount(
"mnt-data-supervisor-mounts-media_test.mount",
"fail",
[
("Options", Variant("s", "soft,timeo=200")),
("Type", Variant("s", "nfs")),
("Description", Variant("s", "Supervisor nfs mount: media_test")),
("What", Variant("s", "media.local:/media")),
["Options", Variant("s", "soft,timeo=200")],
["Type", Variant("s", "nfs")],
["Description", Variant("s", "Supervisor nfs mount: media_test")],
["What", Variant("s", "media.local:/media")],
],
[],
),
@@ -645,9 +645,9 @@ async def test_reload_mounts_attempts_initial_mount(
"mnt-data-supervisor-media-media_test.mount",
"fail",
[
("Options", Variant("s", "bind")),
("Description", Variant("s", "Supervisor bind mount: bind_media_test")),
("What", Variant("s", "/mnt/data/supervisor/mounts/media_test")),
["Options", Variant("s", "bind")],
["Description", Variant("s", "Supervisor bind mount: bind_media_test")],
["What", Variant("s", "/mnt/data/supervisor/mounts/media_test")],
],
[],
),

View File

@@ -105,7 +105,7 @@ async def test_cifs_mount(
"mnt-data-supervisor-mounts-test.mount",
"fail",
[
(
[
"Options",
Variant(
"s",
@@ -117,10 +117,10 @@ async def test_cifs_mount(
]
),
),
),
("Type", Variant("s", "cifs")),
("Description", Variant("s", "Supervisor cifs mount: test")),
("What", Variant("s", "//test.local/camera")),
],
["Type", Variant("s", "cifs")],
["Description", Variant("s", "Supervisor cifs mount: test")],
["What", Variant("s", "//test.local/camera")],
],
[],
)
@@ -177,10 +177,10 @@ async def test_cifs_mount_read_only(
"mnt-data-supervisor-mounts-test.mount",
"fail",
[
("Options", Variant("s", "ro,noserverino,guest")),
("Type", Variant("s", "cifs")),
("Description", Variant("s", "Supervisor cifs mount: test")),
("What", Variant("s", "//test.local/camera")),
["Options", Variant("s", "ro,noserverino,guest")],
["Type", Variant("s", "cifs")],
["Description", Variant("s", "Supervisor cifs mount: test")],
["What", Variant("s", "//test.local/camera")],
],
[],
)
@@ -237,10 +237,10 @@ async def test_nfs_mount(
"mnt-data-supervisor-mounts-test.mount",
"fail",
[
("Options", Variant("s", "port=1234,soft,timeo=200")),
("Type", Variant("s", "nfs")),
("Description", Variant("s", "Supervisor nfs mount: test")),
("What", Variant("s", "test.local:/media/camera")),
["Options", Variant("s", "port=1234,soft,timeo=200")],
["Type", Variant("s", "nfs")],
["Description", Variant("s", "Supervisor nfs mount: test")],
["What", Variant("s", "test.local:/media/camera")],
],
[],
)
@@ -283,10 +283,10 @@ async def test_nfs_mount_read_only(
"mnt-data-supervisor-mounts-test.mount",
"fail",
[
("Options", Variant("s", "ro,port=1234,soft,timeo=200")),
("Type", Variant("s", "nfs")),
("Description", Variant("s", "Supervisor nfs mount: test")),
("What", Variant("s", "test.local:/media/camera")),
["Options", Variant("s", "ro,port=1234,soft,timeo=200")],
["Type", Variant("s", "nfs")],
["Description", Variant("s", "Supervisor nfs mount: test")],
["What", Variant("s", "test.local:/media/camera")],
],
[],
)
@@ -331,10 +331,10 @@ async def test_load(
"mnt-data-supervisor-mounts-test.mount",
"fail",
[
("Options", Variant("s", "noserverino,guest")),
("Type", Variant("s", "cifs")),
("Description", Variant("s", "Supervisor cifs mount: test")),
("What", Variant("s", "//test.local/share")),
["Options", Variant("s", "noserverino,guest")],
["Type", Variant("s", "cifs")],
["Description", Variant("s", "Supervisor cifs mount: test")],
["What", Variant("s", "//test.local/share")],
],
[],
)
@@ -736,10 +736,10 @@ async def test_mount_fails_if_down(
"mnt-data-supervisor-mounts-test.mount",
"fail",
[
("Options", Variant("s", "port=1234,soft,timeo=200")),
("Type", Variant("s", "nfs")),
("Description", Variant("s", "Supervisor nfs mount: test")),
("What", Variant("s", "test.local:/media/camera")),
["Options", Variant("s", "port=1234,soft,timeo=200")],
["Type", Variant("s", "nfs")],
["Description", Variant("s", "Supervisor nfs mount: test")],
["What", Variant("s", "test.local:/media/camera")],
],
[],
)

View File

@@ -2,7 +2,7 @@
import asyncio
from pathlib import Path
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, call, patch
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, patch
from awesomeversion import AwesomeVersion
import pytest
@@ -11,7 +11,6 @@ from supervisor.const import BusEvent, CpuArch
from supervisor.coresys import CoreSys
from supervisor.docker.const import ContainerState
from supervisor.docker.interface import DockerInterface
from supervisor.docker.manager import DockerAPI
from supervisor.docker.monitor import DockerContainerStateEvent
from supervisor.exceptions import (
AudioError,
@@ -360,26 +359,21 @@ async def test_load_with_incorrect_image(
plugin.version = AwesomeVersion("2024.4.0")
container.status = "running"
coresys.docker.images.inspect.return_value = img_data = (
coresys.docker.images.inspect.return_value
| {"Config": {"Labels": {"io.hass.version": "2024.4.0"}}}
)
container.attrs |= img_data
container.attrs["Config"] = {"Labels": {"io.hass.version": "2024.4.0"}}
with patch.object(DockerAPI, "pull_image", return_value=img_data) as pull_image:
await plugin.load()
pull_image.assert_called_once_with(
ANY, correct_image, "2024.4.0", platform="linux/amd64", auth=None
)
await plugin.load()
container.remove.assert_called_once_with(force=True, v=True)
assert coresys.docker.images.delete.call_args_list[0] == call(
f"{old_image}:latest",
force=True,
)
assert coresys.docker.images.delete.call_args_list[1] == call(
f"{old_image}:2024.4.0",
force=True,
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
"image": f"{old_image}:latest",
"force": True,
}
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
"image": f"{old_image}:2024.4.0",
"force": True,
}
coresys.docker.pull_image.assert_called_once_with(
ANY, correct_image, "2024.4.0", platform="linux/amd64"
)
assert plugin.image == correct_image

View File

@@ -5,7 +5,10 @@ from unittest.mock import MagicMock, patch
from supervisor.const import CoreState
from supervisor.coresys import CoreSys
from supervisor.resolution.evaluations.operating_system import EvaluateOperatingSystem
from supervisor.resolution.evaluations.operating_system import (
SUPPORTED_OS,
EvaluateOperatingSystem,
)
async def test_evaluation(coresys: CoreSys):
@@ -22,7 +25,13 @@ async def test_evaluation(coresys: CoreSys):
assert operating_system.reason in coresys.resolution.unsupported
coresys.os._available = True
assert coresys.os.available
await operating_system()
assert operating_system.reason not in coresys.resolution.unsupported
coresys.os._available = False
coresys.host._info = MagicMock(
operating_system=SUPPORTED_OS[0], timezone=None, timezone_tzinfo=None
)
await operating_system()
assert operating_system.reason not in coresys.resolution.unsupported

View File

@@ -1,43 +0,0 @@
"""Test evaluation supported system architectures."""
from unittest.mock import PropertyMock, patch
import pytest
from supervisor.const import CoreState
from supervisor.coresys import CoreSys
from supervisor.resolution.evaluations.system_architecture import (
EvaluateSystemArchitecture,
)
@pytest.mark.parametrize("arch", ["i386", "armhf", "armv7"])
async def test_evaluation_unsupported_architectures(
coresys: CoreSys,
arch: str,
):
"""Test evaluation of unsupported system architectures."""
system_architecture = EvaluateSystemArchitecture(coresys)
await coresys.core.set_state(CoreState.INITIALIZE)
with patch.object(
type(coresys.supervisor), "arch", PropertyMock(return_value=arch)
):
await system_architecture()
assert system_architecture.reason in coresys.resolution.unsupported
@pytest.mark.parametrize("arch", ["amd64", "aarch64"])
async def test_evaluation_supported_architectures(
coresys: CoreSys,
arch: str,
):
"""Test evaluation of supported system architectures."""
system_architecture = EvaluateSystemArchitecture(coresys)
await coresys.core.set_state(CoreState.INITIALIZE)
with patch.object(
type(coresys.supervisor), "arch", PropertyMock(return_value=arch)
):
await system_architecture()
assert system_architecture.reason not in coresys.resolution.unsupported

View File

@@ -1,9 +1,8 @@
"""Test fixup addon execute repair."""
from http import HTTPStatus
from unittest.mock import patch
from unittest.mock import MagicMock, patch
import aiodocker
from docker.errors import NotFound
import pytest
from supervisor.addons.addon import Addon
@@ -18,9 +17,7 @@ from supervisor.resolution.fixups.addon_execute_repair import FixupAddonExecuteR
async def test_fixup(docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon):
"""Test fixup rebuilds addon's container."""
docker.images.inspect.side_effect = aiodocker.DockerError(
HTTPStatus.NOT_FOUND, {"message": "missing"}
)
docker.images.get.side_effect = NotFound("missing")
install_addon_ssh.data["image"] = "test_image"
addon_execute_repair = FixupAddonExecuteRepair(coresys)
@@ -44,9 +41,7 @@ async def test_fixup_max_auto_attempts(
docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon
):
"""Test fixup stops being auto-applied after 5 failures."""
docker.images.inspect.side_effect = aiodocker.DockerError(
HTTPStatus.NOT_FOUND, {"message": "missing"}
)
docker.images.get.side_effect = NotFound("missing")
install_addon_ssh.data["image"] = "test_image"
addon_execute_repair = FixupAddonExecuteRepair(coresys)
@@ -87,6 +82,8 @@ async def test_fixup_image_exists(
docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon
):
"""Test fixup dismisses if image exists."""
docker.images.get.return_value = MagicMock()
addon_execute_repair = FixupAddonExecuteRepair(coresys)
assert addon_execute_repair.auto is True

View File

@@ -86,22 +86,6 @@ def test_format_verbose_newlines():
)
def test_format_verbose_colors():
"""Test verbose formatter with ANSI colors in message."""
fields = {
"__REALTIME_TIMESTAMP": "1379403171000000",
"_HOSTNAME": "homeassistant",
"SYSLOG_IDENTIFIER": "python",
"_PID": "666",
"MESSAGE": "\x1b[32mHello, world!\x1b[0m",
}
assert (
journal_verbose_formatter(fields)
== "2013-09-17 07:32:51.000 homeassistant python[666]: \x1b[32mHello, world!\x1b[0m"
)
async def test_parsing_simple():
"""Test plain formatter."""
journal_logs, stream = _journal_logs_mock()
@@ -313,54 +297,3 @@ async def test_parsing_non_utf8_in_binary_message():
)
_, line = await anext(journal_logs_reader(journal_logs))
assert line == "Hello, \ufffd world!"
def test_format_plain_no_colors():
"""Test plain formatter strips ANSI color codes when no_colors=True."""
fields = {"MESSAGE": "\x1b[32mHello, world!\x1b[0m"}
assert journal_plain_formatter(fields, no_colors=True) == "Hello, world!"
def test_format_verbose_no_colors():
"""Test verbose formatter strips ANSI color codes when no_colors=True."""
fields = {
"__REALTIME_TIMESTAMP": "1379403171000000",
"_HOSTNAME": "homeassistant",
"SYSLOG_IDENTIFIER": "python",
"_PID": "666",
"MESSAGE": "\x1b[32mHello, world!\x1b[0m",
}
assert (
journal_verbose_formatter(fields, no_colors=True)
== "2013-09-17 07:32:51.000 homeassistant python[666]: Hello, world!"
)
async def test_parsing_colored_logs_verbose_no_colors():
"""Test verbose formatter strips colors from colored logs."""
journal_logs, stream = _journal_logs_mock()
stream.feed_data(
b"__REALTIME_TIMESTAMP=1379403171000000\n"
b"_HOSTNAME=homeassistant\n"
b"SYSLOG_IDENTIFIER=python\n"
b"_PID=666\n"
b"MESSAGE\n\x0e\x00\x00\x00\x00\x00\x00\x00\x1b[31mERROR\x1b[0m\n"
b"AFTER=after\n\n"
)
_, line = await anext(
journal_logs_reader(
journal_logs, log_formatter=LogFormatter.VERBOSE, no_colors=True
)
)
assert line == "2013-09-17 07:32:51.000 homeassistant python[666]: ERROR"
async def test_parsing_multiple_color_codes():
"""Test stripping multiple ANSI color codes in single message."""
journal_logs, stream = _journal_logs_mock()
stream.feed_data(
b"MESSAGE\n\x29\x00\x00\x00\x00\x00\x00\x00\x1b[31mRed\x1b[0m \x1b[32mGreen\x1b[0m \x1b[34mBlue\x1b[0m\n"
b"AFTER=after\n\n"
)
_, line = await anext(journal_logs_reader(journal_logs, no_colors=True))
assert line == "Red Green Blue"