mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-12-01 13:38:07 +00:00
Compare commits
22 Commits
autoupdate
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a49a64c76 | ||
|
|
72159a0ae2 | ||
|
|
0a7b26187d | ||
|
|
2dc1f9224e | ||
|
|
6302c7d394 | ||
|
|
f55fd891e9 | ||
|
|
8a251e0324 | ||
|
|
62b7b8c399 | ||
|
|
3c87704802 | ||
|
|
ae7700f52c | ||
|
|
e06e792e74 | ||
|
|
5f55ab8de4 | ||
|
|
ca521c24cb | ||
|
|
6042694d84 | ||
|
|
2b2aedae60 | ||
|
|
4b4afd081b | ||
|
|
a3dca10fd8 | ||
|
|
d73682ee8a | ||
|
|
032fa4cdc4 | ||
|
|
7244e447ab | ||
|
|
603ba57846 | ||
|
|
0ff12abdf4 |
62
.github/workflows/builder.yml
vendored
62
.github/workflows/builder.yml
vendored
@@ -34,6 +34,9 @@ on:
|
||||
|
||||
env:
|
||||
DEFAULT_PYTHON: "3.13"
|
||||
COSIGN_VERSION: "v2.5.3"
|
||||
CRANE_VERSION: "v0.20.7"
|
||||
CRANE_SHA256: "8ef3564d264e6b5ca93f7b7f5652704c4dd29d33935aff6947dd5adefd05953e"
|
||||
BUILD_NAME: supervisor
|
||||
BUILD_TYPE: supervisor
|
||||
|
||||
@@ -107,7 +110,7 @@ jobs:
|
||||
# home-assistant/wheels doesn't support sha pinning
|
||||
- name: Build wheels
|
||||
if: needs.init.outputs.requirements == 'true'
|
||||
uses: home-assistant/wheels@2025.10.0
|
||||
uses: home-assistant/wheels@2025.11.0
|
||||
with:
|
||||
abi: cp313
|
||||
tag: musllinux_1_2
|
||||
@@ -126,7 +129,7 @@ jobs:
|
||||
|
||||
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
|
||||
@@ -134,7 +137,7 @@ jobs:
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
with:
|
||||
cosign-release: "v2.5.3"
|
||||
cosign-release: ${{ env.COSIGN_VERSION }}
|
||||
|
||||
- name: Install dirhash and calc hash
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
@@ -162,7 +165,7 @@ jobs:
|
||||
|
||||
# home-assistant/builder doesn't support sha pinning
|
||||
- name: Build supervisor
|
||||
uses: home-assistant/builder@2025.09.0
|
||||
uses: home-assistant/builder@2025.11.0
|
||||
with:
|
||||
args: |
|
||||
$BUILD_ARGS \
|
||||
@@ -173,7 +176,7 @@ jobs:
|
||||
|
||||
version:
|
||||
name: Update version
|
||||
needs: ["init", "run_supervisor"]
|
||||
needs: ["init", "run_supervisor", "retag_deprecated"]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
@@ -208,7 +211,7 @@ jobs:
|
||||
# home-assistant/builder doesn't support sha pinning
|
||||
- name: Build the Supervisor
|
||||
if: needs.init.outputs.publish != 'true'
|
||||
uses: home-assistant/builder@2025.09.0
|
||||
uses: home-assistant/builder@2025.11.0
|
||||
with:
|
||||
args: |
|
||||
--test \
|
||||
@@ -352,3 +355,50 @@ jobs:
|
||||
- name: Get supervisor logs on failiure
|
||||
if: ${{ cancelled() || failure() }}
|
||||
run: docker logs hassio_supervisor
|
||||
|
||||
retag_deprecated:
|
||||
needs: ["build", "init"]
|
||||
name: Re-tag deprecated ${{ matrix.arch }} images
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
packages: write
|
||||
strategy:
|
||||
matrix:
|
||||
arch: ["armhf", "armv7", "i386"]
|
||||
env:
|
||||
# Last available release for deprecated architectures
|
||||
FROZEN_VERSION: "2025.11.5"
|
||||
steps:
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
with:
|
||||
cosign-release: ${{ env.COSIGN_VERSION }}
|
||||
|
||||
- name: Install crane
|
||||
run: |
|
||||
curl -sLO https://github.com/google/go-containerregistry/releases/download/${{ env.CRANE_VERSION }}/go-containerregistry_Linux_x86_64.tar.gz
|
||||
echo "${{ env.CRANE_SHA256 }} go-containerregistry_Linux_x86_64.tar.gz" | sha256sum -c -
|
||||
tar xzf go-containerregistry_Linux_x86_64.tar.gz crane
|
||||
sudo mv crane /usr/local/bin/
|
||||
|
||||
- name: Re-tag deprecated image with updated version label
|
||||
run: |
|
||||
crane auth login ghcr.io -u ${{ github.repository_owner }} -p ${{ secrets.GITHUB_TOKEN }}
|
||||
crane mutate \
|
||||
--label io.hass.version=${{ needs.init.outputs.version }} \
|
||||
--tag ghcr.io/home-assistant/${{ matrix.arch }}-hassio-supervisor:${{ needs.init.outputs.version }} \
|
||||
ghcr.io/home-assistant/${{ matrix.arch }}-hassio-supervisor:${{ env.FROZEN_VERSION }}
|
||||
|
||||
- name: Sign image with Cosign
|
||||
run: |
|
||||
cosign sign --yes ghcr.io/home-assistant/${{ matrix.arch }}-hassio-supervisor:${{ needs.init.outputs.version }}
|
||||
|
||||
18
.github/workflows/ci.yaml
vendored
18
.github/workflows/ci.yaml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python
|
||||
id: python
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
- name: Restore Python virtual environment
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -113,7 +113,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -171,7 +171,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -215,7 +215,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -259,7 +259,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -295,7 +295,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -341,7 +341,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -400,7 +400,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
image: ghcr.io/home-assistant/{arch}-hassio-supervisor
|
||||
build_from:
|
||||
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.13-alpine3.22-2025.11.1
|
||||
armhf: ghcr.io/home-assistant/armhf-base-python:3.13-alpine3.22-2025.11.1
|
||||
armv7: ghcr.io/home-assistant/armv7-base-python:3.13-alpine3.22-2025.11.1
|
||||
amd64: ghcr.io/home-assistant/amd64-base-python:3.13-alpine3.22-2025.11.1
|
||||
i386: ghcr.io/home-assistant/i386-base-python:3.13-alpine3.22-2025.11.1
|
||||
cosign:
|
||||
base_identity: https://github.com/home-assistant/docker-base/.*
|
||||
identity: https://github.com/home-assistant/supervisor/.*
|
||||
|
||||
@@ -25,8 +25,8 @@ pyudev==0.24.4
|
||||
PyYAML==6.0.3
|
||||
requests==2.32.5
|
||||
securetar==2025.2.1
|
||||
sentry-sdk==2.45.0
|
||||
sentry-sdk==2.46.0
|
||||
setuptools==80.9.0
|
||||
voluptuous==0.15.2
|
||||
dbus-fast==2.45.1
|
||||
dbus-fast==3.1.2
|
||||
zlib-fast==0.2.1
|
||||
|
||||
@@ -2,15 +2,15 @@ astroid==4.0.2
|
||||
coverage==7.12.0
|
||||
mypy==1.18.2
|
||||
pre-commit==4.5.0
|
||||
pylint==4.0.3
|
||||
pylint==4.0.4
|
||||
pytest-aiohttp==1.1.0
|
||||
pytest-asyncio==1.3.0
|
||||
pytest-cov==7.0.0
|
||||
pytest-timeout==2.4.0
|
||||
pytest==9.0.1
|
||||
ruff==0.14.6
|
||||
ruff==0.14.7
|
||||
time-machine==3.1.0
|
||||
types-docker==7.1.0.20251009
|
||||
types-docker==7.1.0.20251129
|
||||
types-pyyaml==6.0.12.20250915
|
||||
types-requests==2.32.4.20250913
|
||||
urllib3==2.5.0
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
from functools import cached_property
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
@@ -12,12 +14,15 @@ from ..const import (
|
||||
ATTR_ARGS,
|
||||
ATTR_BUILD_FROM,
|
||||
ATTR_LABELS,
|
||||
ATTR_PASSWORD,
|
||||
ATTR_SQUASH,
|
||||
ATTR_USERNAME,
|
||||
FILE_SUFFIX_CONFIGURATION,
|
||||
META_ADDON,
|
||||
SOCKET_DOCKER,
|
||||
)
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..docker.const import DOCKER_HUB
|
||||
from ..docker.interface import MAP_ARCH
|
||||
from ..exceptions import ConfigurationFileError, HassioArchNotFound
|
||||
from ..utils.common import FileConfiguration, find_one_filetype
|
||||
@@ -122,8 +127,43 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
except HassioArchNotFound:
|
||||
return False
|
||||
|
||||
def get_docker_config_json(self) -> str | None:
|
||||
"""Generate Docker config.json content with registry credentials for base image.
|
||||
|
||||
Returns a JSON string with registry credentials for the base image's registry,
|
||||
or None if no matching registry is configured.
|
||||
|
||||
Raises:
|
||||
HassioArchNotFound: If the add-on is not supported on the current architecture.
|
||||
|
||||
"""
|
||||
# Early return before accessing base_image to avoid unnecessary arch lookup
|
||||
if not self.sys_docker.config.registries:
|
||||
return None
|
||||
|
||||
registry = self.sys_docker.config.get_registry_for_image(self.base_image)
|
||||
if not registry:
|
||||
return None
|
||||
|
||||
stored = self.sys_docker.config.registries[registry]
|
||||
username = stored[ATTR_USERNAME]
|
||||
password = stored[ATTR_PASSWORD]
|
||||
|
||||
# Docker config.json uses base64-encoded "username:password" for auth
|
||||
auth_string = base64.b64encode(f"{username}:{password}".encode()).decode()
|
||||
|
||||
# Use the actual registry URL for the key
|
||||
# Docker Hub uses "https://index.docker.io/v1/" as the key
|
||||
registry_key = (
|
||||
"https://index.docker.io/v1/" if registry == DOCKER_HUB else registry
|
||||
)
|
||||
|
||||
config = {"auths": {registry_key: {"auth": auth_string}}}
|
||||
|
||||
return json.dumps(config)
|
||||
|
||||
def get_docker_args(
|
||||
self, version: AwesomeVersion, image_tag: str
|
||||
self, version: AwesomeVersion, image_tag: str, docker_config_path: Path | None
|
||||
) -> dict[str, Any]:
|
||||
"""Create a dict with Docker run args."""
|
||||
dockerfile_path = self.get_dockerfile().relative_to(self.addon.path_location)
|
||||
@@ -172,12 +212,24 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
self.addon.path_location
|
||||
)
|
||||
|
||||
volumes = {
|
||||
SOCKET_DOCKER: {"bind": "/var/run/docker.sock", "mode": "rw"},
|
||||
addon_extern_path: {"bind": "/addon", "mode": "ro"},
|
||||
}
|
||||
|
||||
# Mount Docker config with registry credentials if available
|
||||
if docker_config_path:
|
||||
docker_config_extern_path = self.sys_config.local_to_extern_path(
|
||||
docker_config_path
|
||||
)
|
||||
volumes[docker_config_extern_path] = {
|
||||
"bind": "/root/.docker/config.json",
|
||||
"mode": "ro",
|
||||
}
|
||||
|
||||
return {
|
||||
"command": build_cmd,
|
||||
"volumes": {
|
||||
SOCKET_DOCKER: {"bind": "/var/run/docker.sock", "mode": "rw"},
|
||||
addon_extern_path: {"bind": "/addon", "mode": "ro"},
|
||||
},
|
||||
"volumes": volumes,
|
||||
"working_dir": "/addon",
|
||||
}
|
||||
|
||||
|
||||
@@ -813,6 +813,10 @@ class RestAPI(CoreSysAttributes):
|
||||
self.webapp.add_routes(
|
||||
[
|
||||
web.get("/docker/info", api_docker.info),
|
||||
web.post(
|
||||
"/docker/migrate-storage-driver",
|
||||
api_docker.migrate_docker_storage_driver,
|
||||
),
|
||||
web.post("/docker/options", api_docker.options),
|
||||
web.get("/docker/registries", api_docker.registries),
|
||||
web.post("/docker/registries", api_docker.create_registry),
|
||||
|
||||
@@ -4,6 +4,7 @@ import logging
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import web
|
||||
from awesomeversion import AwesomeVersion
|
||||
import voluptuous as vol
|
||||
|
||||
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
||||
@@ -16,6 +17,7 @@ from ..const import (
|
||||
ATTR_PASSWORD,
|
||||
ATTR_REGISTRIES,
|
||||
ATTR_STORAGE,
|
||||
ATTR_STORAGE_DRIVER,
|
||||
ATTR_USERNAME,
|
||||
ATTR_VERSION,
|
||||
)
|
||||
@@ -42,6 +44,12 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
}
|
||||
)
|
||||
|
||||
SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_STORAGE_DRIVER): vol.In(["overlayfs", "overlay2"]),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class APIDocker(CoreSysAttributes):
|
||||
"""Handle RESTful API for Docker configuration."""
|
||||
@@ -123,3 +131,27 @@ class APIDocker(CoreSysAttributes):
|
||||
|
||||
del self.sys_docker.config.registries[hostname]
|
||||
await self.sys_docker.config.save_data()
|
||||
|
||||
@api_process
|
||||
async def migrate_docker_storage_driver(self, request: web.Request) -> None:
|
||||
"""Migrate Docker storage driver."""
|
||||
if (
|
||||
not self.coresys.os.available
|
||||
or not self.coresys.os.version
|
||||
or self.coresys.os.version < AwesomeVersion("17.0.dev0")
|
||||
):
|
||||
raise APINotFound(
|
||||
"Home Assistant OS 17.0 or newer required for Docker storage driver migration"
|
||||
)
|
||||
|
||||
body = await api_validate(SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER, request)
|
||||
await self.sys_dbus.agent.system.migrate_docker_storage_driver(
|
||||
body[ATTR_STORAGE_DRIVER]
|
||||
)
|
||||
|
||||
_LOGGER.info("Host system reboot required to apply Docker storage migration")
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.REBOOT_REQUIRED,
|
||||
ContextType.SYSTEM,
|
||||
suggestions=[SuggestionType.EXECUTE_REBOOT],
|
||||
)
|
||||
|
||||
@@ -347,6 +347,10 @@ class APIHost(CoreSysAttributes):
|
||||
disk.disk_usage, self.sys_config.path_supervisor
|
||||
)
|
||||
|
||||
# Calculate used by subtracting free makes sure we include reserved space
|
||||
# in used space reporting.
|
||||
used = total - free
|
||||
|
||||
known_paths = await self.sys_run_in_executor(
|
||||
disk.get_dir_sizes,
|
||||
{
|
||||
@@ -365,13 +369,12 @@ class APIHost(CoreSysAttributes):
|
||||
"id": "root",
|
||||
"label": "Root",
|
||||
"total_bytes": total,
|
||||
"used_bytes": total - free,
|
||||
"used_bytes": used,
|
||||
"children": [
|
||||
{
|
||||
"id": "system",
|
||||
"label": "System",
|
||||
"used_bytes": total
|
||||
- free
|
||||
"used_bytes": used
|
||||
- sum(path["used_bytes"] for path in known_paths),
|
||||
},
|
||||
*known_paths,
|
||||
|
||||
@@ -63,12 +63,10 @@ def json_loads(data: Any) -> dict[str, Any]:
|
||||
def api_process(method):
|
||||
"""Wrap function with true/false calls to rest api."""
|
||||
|
||||
async def wrap_api(
|
||||
api: CoreSysAttributes, *args, **kwargs
|
||||
) -> web.Response | web.StreamResponse:
|
||||
async def wrap_api(*args, **kwargs) -> web.Response | web.StreamResponse:
|
||||
"""Return API information."""
|
||||
try:
|
||||
answer = await method(api, *args, **kwargs)
|
||||
answer = await method(*args, **kwargs)
|
||||
except BackupFileNotFoundError as err:
|
||||
return api_return_error(err, status=404)
|
||||
except APIError as err:
|
||||
@@ -109,12 +107,10 @@ def api_process_raw(content, *, error_type=None):
|
||||
def wrap_method(method):
|
||||
"""Wrap function with raw output to rest api."""
|
||||
|
||||
async def wrap_api(
|
||||
api: CoreSysAttributes, *args, **kwargs
|
||||
) -> web.Response | web.StreamResponse:
|
||||
async def wrap_api(*args, **kwargs) -> web.Response | web.StreamResponse:
|
||||
"""Return api information."""
|
||||
try:
|
||||
msg_data = await method(api, *args, **kwargs)
|
||||
msg_data = await method(*args, **kwargs)
|
||||
except APIError as err:
|
||||
return api_return_error(
|
||||
err,
|
||||
|
||||
@@ -328,6 +328,7 @@ ATTR_STATE = "state"
|
||||
ATTR_STATIC = "static"
|
||||
ATTR_STDIN = "stdin"
|
||||
ATTR_STORAGE = "storage"
|
||||
ATTR_STORAGE_DRIVER = "storage_driver"
|
||||
ATTR_SUGGESTIONS = "suggestions"
|
||||
ATTR_SUPERVISOR = "supervisor"
|
||||
ATTR_SUPERVISOR_INTERNET = "supervisor_internet"
|
||||
|
||||
@@ -15,3 +15,8 @@ class System(DBusInterface):
|
||||
async def schedule_wipe_device(self) -> bool:
|
||||
"""Schedule a factory reset on next system boot."""
|
||||
return await self.connected_dbus.System.call("schedule_wipe_device")
|
||||
|
||||
@dbus_connected
|
||||
async def migrate_docker_storage_driver(self, backend: str) -> None:
|
||||
"""Migrate Docker storage driver."""
|
||||
await self.connected_dbus.System.call("migrate_docker_storage_driver", backend)
|
||||
|
||||
@@ -115,7 +115,7 @@ class DBusManager(CoreSysAttributes):
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Connect interfaces to D-Bus."""
|
||||
if not SOCKET_DBUS.exists():
|
||||
if not await self.sys_run_in_executor(SOCKET_DBUS.exists):
|
||||
_LOGGER.error(
|
||||
"No D-Bus support on Host. Disabled any kind of host control!"
|
||||
)
|
||||
|
||||
@@ -75,7 +75,7 @@ class Resolved(DBusInterfaceProxy):
|
||||
@dbus_property
|
||||
def current_dns_server(
|
||||
self,
|
||||
) -> list[tuple[int, DNSAddressFamily, bytes]] | None:
|
||||
) -> tuple[int, DNSAddressFamily, bytes] | None:
|
||||
"""Return current DNS server."""
|
||||
return self.properties[DBUS_ATTR_CURRENT_DNS_SERVER]
|
||||
|
||||
@@ -83,7 +83,7 @@ class Resolved(DBusInterfaceProxy):
|
||||
@dbus_property
|
||||
def current_dns_server_ex(
|
||||
self,
|
||||
) -> list[tuple[int, DNSAddressFamily, bytes, int, str]] | None:
|
||||
) -> tuple[int, DNSAddressFamily, bytes, int, str] | None:
|
||||
"""Return current DNS server including port and server name."""
|
||||
return self.properties[DBUS_ATTR_CURRENT_DNS_SERVER_EX]
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ class SystemdUnit(DBusInterface):
|
||||
@dbus_connected
|
||||
async def get_active_state(self) -> UnitActiveState:
|
||||
"""Get active state of the unit."""
|
||||
return await self.connected_dbus.Unit.get("active_state")
|
||||
return UnitActiveState(await self.connected_dbus.Unit.get("active_state"))
|
||||
|
||||
@dbus_connected
|
||||
def properties_changed(self) -> DBusSignalWrapper:
|
||||
|
||||
@@ -9,7 +9,7 @@ from dbus_fast import Variant
|
||||
from .const import EncryptType, EraseMode
|
||||
|
||||
|
||||
def udisks2_bytes_to_path(path_bytes: bytearray) -> Path:
|
||||
def udisks2_bytes_to_path(path_bytes: bytes) -> Path:
|
||||
"""Convert bytes to path object without null character on end."""
|
||||
if path_bytes and path_bytes[-1] == 0:
|
||||
return Path(path_bytes[:-1].decode())
|
||||
@@ -73,7 +73,7 @@ FormatOptionsDataType = TypedDict(
|
||||
{
|
||||
"label": NotRequired[str],
|
||||
"take-ownership": NotRequired[bool],
|
||||
"encrypt.passphrase": NotRequired[bytearray],
|
||||
"encrypt.passphrase": NotRequired[bytes],
|
||||
"encrypt.type": NotRequired[str],
|
||||
"erase": NotRequired[str],
|
||||
"update-partition-type": NotRequired[bool],
|
||||
|
||||
@@ -7,6 +7,7 @@ from ipaddress import IPv4Address
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import tempfile
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
import aiodocker
|
||||
@@ -705,12 +706,38 @@ class DockerAddon(DockerInterface):
|
||||
with suppress(docker.errors.NotFound):
|
||||
self.sys_docker.containers.get(builder_name).remove(force=True, v=True)
|
||||
|
||||
result = self.sys_docker.run_command(
|
||||
ADDON_BUILDER_IMAGE,
|
||||
version=builder_version_tag,
|
||||
name=builder_name,
|
||||
**build_env.get_docker_args(version, addon_image_tag),
|
||||
)
|
||||
# Generate Docker config with registry credentials for base image if needed
|
||||
docker_config_path: Path | None = None
|
||||
docker_config_content = build_env.get_docker_config_json()
|
||||
temp_dir: tempfile.TemporaryDirectory | None = None
|
||||
|
||||
try:
|
||||
if docker_config_content:
|
||||
# Create temporary directory for docker config
|
||||
temp_dir = tempfile.TemporaryDirectory(
|
||||
prefix="hassio_build_", dir=self.sys_config.path_tmp
|
||||
)
|
||||
docker_config_path = Path(temp_dir.name) / "config.json"
|
||||
docker_config_path.write_text(
|
||||
docker_config_content, encoding="utf-8"
|
||||
)
|
||||
_LOGGER.debug(
|
||||
"Created temporary Docker config for build at %s",
|
||||
docker_config_path,
|
||||
)
|
||||
|
||||
result = self.sys_docker.run_command(
|
||||
ADDON_BUILDER_IMAGE,
|
||||
version=builder_version_tag,
|
||||
name=builder_name,
|
||||
**build_env.get_docker_args(
|
||||
version, addon_image_tag, docker_config_path
|
||||
),
|
||||
)
|
||||
finally:
|
||||
# Clean up temporary directory
|
||||
if temp_dir:
|
||||
temp_dir.cleanup()
|
||||
|
||||
logs = result.output.decode("utf-8")
|
||||
|
||||
|
||||
@@ -15,6 +15,12 @@ from ..const import MACHINE_ID
|
||||
|
||||
RE_RETRYING_DOWNLOAD_STATUS = re.compile(r"Retrying in \d+ seconds?")
|
||||
|
||||
# Docker Hub registry identifier
|
||||
DOCKER_HUB = "hub.docker.com"
|
||||
|
||||
# Regex to match images with a registry host (e.g., ghcr.io/org/image)
|
||||
IMAGE_WITH_HOST = re.compile(r"^((?:[a-z0-9]+(?:-[a-z0-9]+)*\.)+[a-z]{2,})\/.+")
|
||||
|
||||
|
||||
class Capabilities(StrEnum):
|
||||
"""Linux Capabilities."""
|
||||
|
||||
@@ -8,7 +8,6 @@ from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
from http import HTTPStatus
|
||||
import logging
|
||||
import re
|
||||
from time import time
|
||||
from typing import Any, cast
|
||||
from uuid import uuid4
|
||||
@@ -46,16 +45,13 @@ from ..jobs.decorator import Job
|
||||
from ..jobs.job_group import JobGroup
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import ContainerState, PullImageLayerStage, RestartPolicy
|
||||
from .const import DOCKER_HUB, ContainerState, PullImageLayerStage, RestartPolicy
|
||||
from .manager import CommandReturn, PullLogEntry
|
||||
from .monitor import DockerContainerStateEvent
|
||||
from .stats import DockerStats
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
IMAGE_WITH_HOST = re.compile(r"^((?:[a-z0-9]+(?:-[a-z0-9]+)*\.)+[a-z]{2,})\/.+")
|
||||
DOCKER_HUB = "hub.docker.com"
|
||||
|
||||
MAP_ARCH: dict[CpuArch | str, str] = {
|
||||
CpuArch.ARMV7: "linux/arm/v7",
|
||||
CpuArch.ARMHF: "linux/arm/v6",
|
||||
@@ -180,25 +176,16 @@ class DockerInterface(JobGroup, ABC):
|
||||
return self.meta_config.get("Healthcheck")
|
||||
|
||||
def _get_credentials(self, image: str) -> dict:
|
||||
"""Return a dictionay with credentials for docker login."""
|
||||
registry = None
|
||||
"""Return a dictionary with credentials for docker login."""
|
||||
credentials = {}
|
||||
matcher = IMAGE_WITH_HOST.match(image)
|
||||
|
||||
# Custom registry
|
||||
if matcher:
|
||||
if matcher.group(1) in self.sys_docker.config.registries:
|
||||
registry = matcher.group(1)
|
||||
credentials[ATTR_REGISTRY] = registry
|
||||
|
||||
# If no match assume "dockerhub" as registry
|
||||
elif DOCKER_HUB in self.sys_docker.config.registries:
|
||||
registry = DOCKER_HUB
|
||||
registry = self.sys_docker.config.get_registry_for_image(image)
|
||||
|
||||
if registry:
|
||||
stored = self.sys_docker.config.registries[registry]
|
||||
credentials[ATTR_USERNAME] = stored[ATTR_USERNAME]
|
||||
credentials[ATTR_PASSWORD] = stored[ATTR_PASSWORD]
|
||||
if registry != DOCKER_HUB:
|
||||
credentials[ATTR_REGISTRY] = registry
|
||||
|
||||
_LOGGER.debug(
|
||||
"Logging in to %s as %s",
|
||||
@@ -208,17 +195,6 @@ class DockerInterface(JobGroup, ABC):
|
||||
|
||||
return credentials
|
||||
|
||||
async def _docker_login(self, image: str) -> None:
|
||||
"""Try to log in to the registry if there are credentials available."""
|
||||
if not self.sys_docker.config.registries:
|
||||
return
|
||||
|
||||
credentials = self._get_credentials(image)
|
||||
if not credentials:
|
||||
return
|
||||
|
||||
await self.sys_run_in_executor(self.sys_docker.dockerpy.login, **credentials)
|
||||
|
||||
def _process_pull_image_log( # noqa: C901
|
||||
self, install_job_id: str, reference: PullLogEntry
|
||||
) -> None:
|
||||
@@ -250,28 +226,16 @@ class DockerInterface(JobGroup, ABC):
|
||||
job = j
|
||||
break
|
||||
|
||||
# This likely only occurs if the logs came in out of sync and we got progress before the Pulling FS Layer one
|
||||
# There should no longer be any real risk of logs out of order anymore.
|
||||
# However tests with very small images have shown that sometimes Docker
|
||||
# skips stages in log. So keeping this one as a safety check on null job
|
||||
if not job:
|
||||
raise DockerLogOutOfOrder(
|
||||
f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {install_job_id} but could not find a matching job, skipping",
|
||||
_LOGGER.debug,
|
||||
)
|
||||
|
||||
# Hopefully these come in order but if they sometimes get out of sync, avoid accidentally going backwards
|
||||
# If it happens a lot though we may need to reconsider the value of this feature
|
||||
if job.done:
|
||||
raise DockerLogOutOfOrder(
|
||||
f"Received pull image log with status {reference.status} for job {job.uuid} but job was done, skipping",
|
||||
_LOGGER.debug,
|
||||
)
|
||||
|
||||
if job.stage and stage < PullImageLayerStage.from_status(job.stage):
|
||||
raise DockerLogOutOfOrder(
|
||||
f"Received pull image log with status {reference.status} for job {job.uuid} but job was already on stage {job.stage}, skipping",
|
||||
_LOGGER.debug,
|
||||
)
|
||||
|
||||
# For progress calcuation we assume downloading and extracting are each 50% of the time and others stages negligible
|
||||
# For progress calculation we assume downloading is 70% of time, extracting is 30% and others stages negligible
|
||||
progress = job.progress
|
||||
match stage:
|
||||
case PullImageLayerStage.DOWNLOADING | PullImageLayerStage.EXTRACTING:
|
||||
@@ -280,22 +244,26 @@ class DockerInterface(JobGroup, ABC):
|
||||
and reference.progress_detail.current
|
||||
and reference.progress_detail.total
|
||||
):
|
||||
progress = 50 * (
|
||||
progress = (
|
||||
reference.progress_detail.current
|
||||
/ reference.progress_detail.total
|
||||
)
|
||||
if stage == PullImageLayerStage.EXTRACTING:
|
||||
progress += 50
|
||||
if stage == PullImageLayerStage.DOWNLOADING:
|
||||
progress = 70 * progress
|
||||
else:
|
||||
progress = 70 + 30 * progress
|
||||
case (
|
||||
PullImageLayerStage.VERIFYING_CHECKSUM
|
||||
| PullImageLayerStage.DOWNLOAD_COMPLETE
|
||||
):
|
||||
progress = 50
|
||||
progress = 70
|
||||
case PullImageLayerStage.PULL_COMPLETE:
|
||||
progress = 100
|
||||
case PullImageLayerStage.RETRYING_DOWNLOAD:
|
||||
progress = 0
|
||||
|
||||
# No real risk of getting things out of order in current implementation
|
||||
# but keeping this one in case another change to these trips us up.
|
||||
if stage != PullImageLayerStage.RETRYING_DOWNLOAD and progress < job.progress:
|
||||
raise DockerLogOutOfOrder(
|
||||
f"Received pull image log with status {reference.status} for job {job.uuid} that implied progress was {progress} but current progress is {job.progress}, skipping",
|
||||
@@ -359,7 +327,7 @@ class DockerInterface(JobGroup, ABC):
|
||||
progress = 0.0
|
||||
stage = PullImageLayerStage.PULL_COMPLETE
|
||||
for job in layer_jobs:
|
||||
if not job.extra:
|
||||
if not job.extra or not job.extra.get("total"):
|
||||
return
|
||||
progress += job.progress * (job.extra["total"] / total)
|
||||
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
|
||||
@@ -403,9 +371,8 @@ class DockerInterface(JobGroup, ABC):
|
||||
|
||||
_LOGGER.info("Downloading docker image %s with tag %s.", image, version)
|
||||
try:
|
||||
if self.sys_docker.config.registries:
|
||||
# Try login if we have defined credentials
|
||||
await self._docker_login(image)
|
||||
# Get credentials for private registries to pass to aiodocker
|
||||
credentials = self._get_credentials(image) or None
|
||||
|
||||
curr_job_id = self.sys_jobs.current.uuid
|
||||
|
||||
@@ -421,12 +388,13 @@ class DockerInterface(JobGroup, ABC):
|
||||
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_image_log
|
||||
)
|
||||
|
||||
# Pull new image
|
||||
# Pull new image, passing credentials to aiodocker
|
||||
docker_image = await self.sys_docker.pull_image(
|
||||
self.sys_jobs.current.uuid,
|
||||
image,
|
||||
str(version),
|
||||
platform=MAP_ARCH[image_arch],
|
||||
auth=credentials,
|
||||
)
|
||||
|
||||
# Tag latest
|
||||
|
||||
@@ -49,7 +49,7 @@ from ..exceptions import (
|
||||
)
|
||||
from ..utils.common import FileConfiguration
|
||||
from ..validate import SCHEMA_DOCKER_CONFIG
|
||||
from .const import LABEL_MANAGED
|
||||
from .const import DOCKER_HUB, IMAGE_WITH_HOST, LABEL_MANAGED
|
||||
from .monitor import DockerMonitor
|
||||
from .network import DockerNetwork
|
||||
|
||||
@@ -76,15 +76,25 @@ class DockerInfo:
|
||||
storage: str = attr.ib()
|
||||
logging: str = attr.ib()
|
||||
cgroup: str = attr.ib()
|
||||
support_cpu_realtime: bool = attr.ib()
|
||||
|
||||
@staticmethod
|
||||
def new(data: dict[str, Any]):
|
||||
async def new(data: dict[str, Any]) -> DockerInfo:
|
||||
"""Create a object from docker info."""
|
||||
# Check if CONFIG_RT_GROUP_SCHED is loaded (blocking I/O in executor)
|
||||
cpu_rt_file_exists = await asyncio.get_running_loop().run_in_executor(
|
||||
None, Path("/sys/fs/cgroup/cpu/cpu.rt_runtime_us").exists
|
||||
)
|
||||
cpu_rt_supported = (
|
||||
cpu_rt_file_exists and os.environ.get(ENV_SUPERVISOR_CPU_RT) == "1"
|
||||
)
|
||||
|
||||
return DockerInfo(
|
||||
AwesomeVersion(data.get("ServerVersion", "0.0.0")),
|
||||
data.get("Driver", "unknown"),
|
||||
data.get("LoggingDriver", "unknown"),
|
||||
data.get("CgroupVersion", "1"),
|
||||
cpu_rt_supported,
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -95,23 +105,21 @@ class DockerInfo:
|
||||
except AwesomeVersionCompareException:
|
||||
return False
|
||||
|
||||
@property
|
||||
def support_cpu_realtime(self) -> bool:
|
||||
"""Return true, if CONFIG_RT_GROUP_SCHED is loaded."""
|
||||
if not Path("/sys/fs/cgroup/cpu/cpu.rt_runtime_us").exists():
|
||||
return False
|
||||
return bool(os.environ.get(ENV_SUPERVISOR_CPU_RT) == "1")
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class PullProgressDetail:
|
||||
"""Progress detail information for pull.
|
||||
|
||||
Documentation lacking but both of these seem to be in bytes when populated.
|
||||
|
||||
Containerd-snapshot update - When leveraging this new feature, this information
|
||||
becomes useless to us while extracting. It simply tells elapsed time using
|
||||
current and units.
|
||||
"""
|
||||
|
||||
current: int | None = None
|
||||
total: int | None = None
|
||||
units: str | None = None
|
||||
|
||||
@classmethod
|
||||
def from_pull_log_dict(cls, value: dict[str, int]) -> PullProgressDetail:
|
||||
@@ -199,6 +207,27 @@ class DockerConfig(FileConfiguration):
|
||||
"""Return credentials for docker registries."""
|
||||
return self._data.get(ATTR_REGISTRIES, {})
|
||||
|
||||
def get_registry_for_image(self, image: str) -> str | None:
|
||||
"""Return the registry name if credentials are available for the image.
|
||||
|
||||
Matches the image against configured registries and returns the registry
|
||||
name if found, or None if no matching credentials are configured.
|
||||
"""
|
||||
if not self.registries:
|
||||
return None
|
||||
|
||||
# Check if image uses a custom registry (e.g., ghcr.io/org/image)
|
||||
matcher = IMAGE_WITH_HOST.match(image)
|
||||
if matcher:
|
||||
registry = matcher.group(1)
|
||||
if registry in self.registries:
|
||||
return registry
|
||||
# If no registry prefix, check for Docker Hub credentials
|
||||
elif DOCKER_HUB in self.registries:
|
||||
return DOCKER_HUB
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class DockerAPI(CoreSysAttributes):
|
||||
"""Docker Supervisor wrapper.
|
||||
@@ -234,7 +263,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
timeout=900,
|
||||
),
|
||||
)
|
||||
self._info = DockerInfo.new(self.dockerpy.info())
|
||||
self._info = await DockerInfo.new(self.dockerpy.info())
|
||||
await self.config.read_data()
|
||||
self._network = await DockerNetwork(self.dockerpy).post_init(
|
||||
self.config.enable_ipv6, self.config.mtu
|
||||
@@ -429,6 +458,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
repository: str,
|
||||
tag: str = "latest",
|
||||
platform: str | None = None,
|
||||
auth: dict[str, str] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Pull the specified image and return it.
|
||||
|
||||
@@ -438,7 +468,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
on the bus so listeners can use that to update status for users.
|
||||
"""
|
||||
async for e in self.images.pull(
|
||||
repository, tag=tag, platform=platform, stream=True
|
||||
repository, tag=tag, platform=platform, auth=auth, stream=True
|
||||
):
|
||||
entry = PullLogEntry.from_pull_log_dict(job_id, e)
|
||||
if entry.error:
|
||||
|
||||
@@ -9,7 +9,7 @@ from contextvars import Context, ContextVar, Token
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
import logging
|
||||
from typing import Any, Self
|
||||
from typing import Any, Self, cast
|
||||
from uuid import uuid4
|
||||
|
||||
from attr.validators import gt, lt
|
||||
@@ -196,7 +196,7 @@ class SupervisorJob:
|
||||
self,
|
||||
progress: float | None = None,
|
||||
stage: str | None = None,
|
||||
extra: dict[str, Any] | None = DEFAULT, # type: ignore
|
||||
extra: dict[str, Any] | None | type[DEFAULT] = DEFAULT,
|
||||
done: bool | None = None,
|
||||
) -> None:
|
||||
"""Update multiple fields with one on change event."""
|
||||
@@ -207,8 +207,8 @@ class SupervisorJob:
|
||||
self.progress = progress
|
||||
if stage is not None:
|
||||
self.stage = stage
|
||||
if extra != DEFAULT:
|
||||
self.extra = extra
|
||||
if extra is not DEFAULT:
|
||||
self.extra = cast(dict[str, Any] | None, extra)
|
||||
|
||||
# Done has special event. use that to trigger on change if included
|
||||
# If not then just use any other field to trigger
|
||||
@@ -306,19 +306,21 @@ class JobManager(FileConfiguration, CoreSysAttributes):
|
||||
reference: str | None = None,
|
||||
initial_stage: str | None = None,
|
||||
internal: bool = False,
|
||||
parent_id: str | None = DEFAULT, # type: ignore
|
||||
parent_id: str | None | type[DEFAULT] = DEFAULT,
|
||||
child_job_syncs: list[ChildJobSyncFilter] | None = None,
|
||||
) -> SupervisorJob:
|
||||
"""Create a new job."""
|
||||
job = SupervisorJob(
|
||||
name,
|
||||
reference=reference,
|
||||
stage=initial_stage,
|
||||
on_change=self._on_job_change,
|
||||
internal=internal,
|
||||
child_job_syncs=child_job_syncs,
|
||||
**({} if parent_id == DEFAULT else {"parent_id": parent_id}), # type: ignore
|
||||
)
|
||||
kwargs: dict[str, Any] = {
|
||||
"reference": reference,
|
||||
"stage": initial_stage,
|
||||
"on_change": self._on_job_change,
|
||||
"internal": internal,
|
||||
"child_job_syncs": child_job_syncs,
|
||||
}
|
||||
if parent_id is not DEFAULT:
|
||||
kwargs["parent_id"] = parent_id
|
||||
|
||||
job = SupervisorJob(name, **kwargs)
|
||||
|
||||
# Shouldn't happen but inability to find a parent for progress reporting
|
||||
# shouldn't raise and break the active job
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""A collection of tasks."""
|
||||
|
||||
from contextlib import suppress
|
||||
from datetime import datetime, timedelta
|
||||
import logging
|
||||
from typing import cast
|
||||
@@ -13,6 +14,7 @@ from ..exceptions import (
|
||||
BackupFileNotFoundError,
|
||||
HomeAssistantError,
|
||||
ObserverError,
|
||||
SupervisorUpdateError,
|
||||
)
|
||||
from ..homeassistant.const import LANDINGPAGE, WSType
|
||||
from ..jobs.const import JobConcurrency
|
||||
@@ -174,7 +176,11 @@ class Tasks(CoreSysAttributes):
|
||||
"Found new Supervisor version %s, updating",
|
||||
self.sys_supervisor.latest_version,
|
||||
)
|
||||
await self.sys_supervisor.update()
|
||||
|
||||
# Errors are logged by the exceptions, we can't really do something
|
||||
# if an update fails here.
|
||||
with suppress(SupervisorUpdateError):
|
||||
await self.sys_supervisor.update()
|
||||
|
||||
async def _watchdog_homeassistant_api(self):
|
||||
"""Create scheduler task for monitoring running state of API.
|
||||
|
||||
@@ -135,7 +135,7 @@ class Mount(CoreSysAttributes, ABC):
|
||||
@property
|
||||
def state(self) -> UnitActiveState | None:
|
||||
"""Get state of mount."""
|
||||
return self._state
|
||||
return UnitActiveState(self._state) if self._state is not None else None
|
||||
|
||||
@cached_property
|
||||
def local_where(self) -> Path:
|
||||
|
||||
@@ -7,13 +7,7 @@ from collections.abc import Awaitable, Callable
|
||||
import logging
|
||||
from typing import Any, Protocol, cast
|
||||
|
||||
from dbus_fast import (
|
||||
ErrorType,
|
||||
InvalidIntrospectionError,
|
||||
Message,
|
||||
MessageType,
|
||||
Variant,
|
||||
)
|
||||
from dbus_fast import ErrorType, InvalidIntrospectionError, Message, MessageType
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
from dbus_fast.aio.proxy_object import ProxyInterface, ProxyObject
|
||||
from dbus_fast.errors import DBusError as DBusFastDBusError
|
||||
@@ -265,7 +259,7 @@ class DBus:
|
||||
"""
|
||||
|
||||
async def sync_property_change(
|
||||
prop_interface: str, changed: dict[str, Variant], invalidated: list[str]
|
||||
prop_interface: str, changed: dict[str, Any], invalidated: list[str]
|
||||
) -> None:
|
||||
"""Sync property changes to cache."""
|
||||
if interface != prop_interface:
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
"""Test addon build."""
|
||||
|
||||
import base64
|
||||
import json
|
||||
from pathlib import Path
|
||||
from unittest.mock import PropertyMock, patch
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
@@ -7,6 +10,7 @@ from awesomeversion import AwesomeVersion
|
||||
from supervisor.addons.addon import Addon
|
||||
from supervisor.addons.build import AddonBuild
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import DOCKER_HUB
|
||||
|
||||
from tests.common import is_in_list
|
||||
|
||||
@@ -29,7 +33,7 @@ async def test_platform_set(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
),
|
||||
):
|
||||
args = await coresys.run_in_executor(
|
||||
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest"
|
||||
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
|
||||
)
|
||||
|
||||
assert is_in_list(["--platform", "linux/amd64"], args["command"])
|
||||
@@ -53,7 +57,7 @@ async def test_dockerfile_evaluation(coresys: CoreSys, install_addon_ssh: Addon)
|
||||
),
|
||||
):
|
||||
args = await coresys.run_in_executor(
|
||||
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest"
|
||||
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
|
||||
)
|
||||
|
||||
assert is_in_list(["--file", "Dockerfile"], args["command"])
|
||||
@@ -81,7 +85,7 @@ async def test_dockerfile_evaluation_arch(coresys: CoreSys, install_addon_ssh: A
|
||||
),
|
||||
):
|
||||
args = await coresys.run_in_executor(
|
||||
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest"
|
||||
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
|
||||
)
|
||||
|
||||
assert is_in_list(["--file", "Dockerfile.aarch64"], args["command"])
|
||||
@@ -117,3 +121,158 @@ async def test_build_invalid(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
),
|
||||
):
|
||||
assert not await build.is_valid()
|
||||
|
||||
|
||||
async def test_docker_config_no_registries(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test docker config generation when no registries configured."""
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
|
||||
# No registries configured by default
|
||||
assert build.get_docker_config_json() is None
|
||||
|
||||
|
||||
async def test_docker_config_no_matching_registry(
|
||||
coresys: CoreSys, install_addon_ssh: Addon
|
||||
):
|
||||
"""Test docker config generation when registry doesn't match base image."""
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
|
||||
# Configure a registry that doesn't match the base image
|
||||
# pylint: disable-next=protected-access
|
||||
coresys.docker.config._data["registries"] = {
|
||||
"some.other.registry": {"username": "user", "password": "pass"}
|
||||
}
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
|
||||
),
|
||||
patch.object(
|
||||
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
|
||||
),
|
||||
):
|
||||
# Base image is ghcr.io/home-assistant/... which doesn't match
|
||||
assert build.get_docker_config_json() is None
|
||||
|
||||
|
||||
async def test_docker_config_matching_registry(
|
||||
coresys: CoreSys, install_addon_ssh: Addon
|
||||
):
|
||||
"""Test docker config generation when registry matches base image."""
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
|
||||
# Configure ghcr.io registry which matches the default base image
|
||||
# pylint: disable-next=protected-access
|
||||
coresys.docker.config._data["registries"] = {
|
||||
"ghcr.io": {"username": "testuser", "password": "testpass"}
|
||||
}
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
|
||||
),
|
||||
patch.object(
|
||||
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
|
||||
),
|
||||
):
|
||||
config_json = build.get_docker_config_json()
|
||||
assert config_json is not None
|
||||
|
||||
config = json.loads(config_json)
|
||||
assert "auths" in config
|
||||
assert "ghcr.io" in config["auths"]
|
||||
|
||||
# Verify base64-encoded credentials
|
||||
expected_auth = base64.b64encode(b"testuser:testpass").decode()
|
||||
assert config["auths"]["ghcr.io"]["auth"] == expected_auth
|
||||
|
||||
|
||||
async def test_docker_config_docker_hub(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test docker config generation for Docker Hub registry."""
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
|
||||
# Configure Docker Hub registry
|
||||
# pylint: disable-next=protected-access
|
||||
coresys.docker.config._data["registries"] = {
|
||||
DOCKER_HUB: {"username": "hubuser", "password": "hubpass"}
|
||||
}
|
||||
|
||||
# Mock base_image to return a Docker Hub image (no registry prefix)
|
||||
with patch.object(
|
||||
type(build),
|
||||
"base_image",
|
||||
new=PropertyMock(return_value="library/alpine:latest"),
|
||||
):
|
||||
config_json = build.get_docker_config_json()
|
||||
assert config_json is not None
|
||||
|
||||
config = json.loads(config_json)
|
||||
# Docker Hub uses special URL as key
|
||||
assert "https://index.docker.io/v1/" in config["auths"]
|
||||
|
||||
expected_auth = base64.b64encode(b"hubuser:hubpass").decode()
|
||||
assert config["auths"]["https://index.docker.io/v1/"]["auth"] == expected_auth
|
||||
|
||||
|
||||
async def test_docker_args_with_config_path(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test docker args include config volume when path provided."""
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
|
||||
),
|
||||
patch.object(
|
||||
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
|
||||
),
|
||||
patch.object(
|
||||
type(coresys.config),
|
||||
"local_to_extern_path",
|
||||
side_effect=lambda p: f"/extern{p}",
|
||||
),
|
||||
):
|
||||
config_path = Path("/data/supervisor/tmp/config.json")
|
||||
args = await coresys.run_in_executor(
|
||||
build.get_docker_args,
|
||||
AwesomeVersion("latest"),
|
||||
"test-image:latest",
|
||||
config_path,
|
||||
)
|
||||
|
||||
# Check that config is mounted
|
||||
assert "/extern/data/supervisor/tmp/config.json" in args["volumes"]
|
||||
assert (
|
||||
args["volumes"]["/extern/data/supervisor/tmp/config.json"]["bind"]
|
||||
== "/root/.docker/config.json"
|
||||
)
|
||||
assert args["volumes"]["/extern/data/supervisor/tmp/config.json"]["mode"] == "ro"
|
||||
|
||||
|
||||
async def test_docker_args_without_config_path(
|
||||
coresys: CoreSys, install_addon_ssh: Addon
|
||||
):
|
||||
"""Test docker args don't include config volume when no path provided."""
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
|
||||
),
|
||||
patch.object(
|
||||
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
|
||||
),
|
||||
patch.object(
|
||||
type(coresys.config),
|
||||
"local_to_extern_path",
|
||||
return_value="/addon/path/on/host",
|
||||
),
|
||||
):
|
||||
args = await coresys.run_in_executor(
|
||||
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
|
||||
)
|
||||
|
||||
# Only docker socket and addon path should be mounted
|
||||
assert len(args["volumes"]) == 2
|
||||
# Verify no docker config mount
|
||||
for bind in args["volumes"].values():
|
||||
assert bind["bind"] != "/root/.docker/config.json"
|
||||
|
||||
@@ -4,6 +4,11 @@ from aiohttp.test_utils import TestClient
|
||||
import pytest
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
||||
from supervisor.resolution.data import Issue, Suggestion
|
||||
|
||||
from tests.dbus_service_mocks.agent_system import System as SystemService
|
||||
from tests.dbus_service_mocks.base import DBusServiceMock
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -84,3 +89,79 @@ async def test_registry_not_found(api_client: TestClient):
|
||||
assert resp.status == 404
|
||||
body = await resp.json()
|
||||
assert body["message"] == "Hostname bad does not exist in registries"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
|
||||
async def test_api_migrate_docker_storage_driver(
|
||||
api_client: TestClient,
|
||||
coresys: CoreSys,
|
||||
os_agent_services: dict[str, DBusServiceMock],
|
||||
os_available,
|
||||
):
|
||||
"""Test Docker storage driver migration."""
|
||||
system_service: SystemService = os_agent_services["agent_system"]
|
||||
system_service.MigrateDockerStorageDriver.calls.clear()
|
||||
|
||||
resp = await api_client.post(
|
||||
"/docker/migrate-storage-driver",
|
||||
json={"storage_driver": "overlayfs"},
|
||||
)
|
||||
assert resp.status == 200
|
||||
|
||||
assert system_service.MigrateDockerStorageDriver.calls == [("overlayfs",)]
|
||||
assert (
|
||||
Issue(IssueType.REBOOT_REQUIRED, ContextType.SYSTEM)
|
||||
in coresys.resolution.issues
|
||||
)
|
||||
assert (
|
||||
Suggestion(SuggestionType.EXECUTE_REBOOT, ContextType.SYSTEM)
|
||||
in coresys.resolution.suggestions
|
||||
)
|
||||
|
||||
# Test migration back to overlay2 (graph driver)
|
||||
system_service.MigrateDockerStorageDriver.calls.clear()
|
||||
resp = await api_client.post(
|
||||
"/docker/migrate-storage-driver",
|
||||
json={"storage_driver": "overlay2"},
|
||||
)
|
||||
assert resp.status == 200
|
||||
assert system_service.MigrateDockerStorageDriver.calls == [("overlay2",)]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
|
||||
async def test_api_migrate_docker_storage_driver_invalid_backend(
|
||||
api_client: TestClient,
|
||||
os_available,
|
||||
):
|
||||
"""Test 400 is returned for invalid storage driver."""
|
||||
resp = await api_client.post(
|
||||
"/docker/migrate-storage-driver",
|
||||
json={"storage_driver": "invalid"},
|
||||
)
|
||||
assert resp.status == 400
|
||||
|
||||
|
||||
async def test_api_migrate_docker_storage_driver_not_os(
|
||||
api_client: TestClient,
|
||||
coresys: CoreSys,
|
||||
):
|
||||
"""Test 404 is returned if not running on HAOS."""
|
||||
resp = await api_client.post(
|
||||
"/docker/migrate-storage-driver",
|
||||
json={"storage_driver": "overlayfs"},
|
||||
)
|
||||
assert resp.status == 404
|
||||
|
||||
|
||||
@pytest.mark.parametrize("os_available", ["16.2"], indirect=True)
|
||||
async def test_api_migrate_docker_storage_driver_old_os(
|
||||
api_client: TestClient,
|
||||
coresys: CoreSys,
|
||||
os_available,
|
||||
):
|
||||
"""Test 404 is returned if OS is older than 17.0."""
|
||||
resp = await api_client.post(
|
||||
"/docker/migrate-storage-driver",
|
||||
json={"storage_driver": "overlayfs"},
|
||||
)
|
||||
assert resp.status == 404
|
||||
|
||||
@@ -323,29 +323,29 @@ async def test_api_progress_updates_home_assistant_update(
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 1.2,
|
||||
"progress": 1.7,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 2.8,
|
||||
"progress": 4.0,
|
||||
"done": False,
|
||||
},
|
||||
]
|
||||
assert events[-5:] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 97.2,
|
||||
"progress": 98.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.4,
|
||||
"progress": 98.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 99.4,
|
||||
"progress": 99.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -773,29 +773,29 @@ async def test_api_progress_updates_addon_install_update(
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 1.2,
|
||||
"progress": 1.7,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 2.8,
|
||||
"progress": 4.0,
|
||||
"done": False,
|
||||
},
|
||||
]
|
||||
assert events[-5:] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 97.2,
|
||||
"progress": 98.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.4,
|
||||
"progress": 98.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 99.4,
|
||||
"progress": 99.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -371,29 +371,29 @@ async def test_api_progress_updates_supervisor_update(
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 1.2,
|
||||
"progress": 1.7,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 2.8,
|
||||
"progress": 4.0,
|
||||
"done": False,
|
||||
},
|
||||
]
|
||||
assert events[-5:] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 97.2,
|
||||
"progress": 98.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.4,
|
||||
"progress": 98.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 99.4,
|
||||
"progress": 99.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -41,51 +41,51 @@ async def test_dbus_resolved_info(
|
||||
assert resolved.dns_over_tls == DNSOverTLSEnabled.NO
|
||||
|
||||
assert len(resolved.dns) == 2
|
||||
assert resolved.dns[0] == [0, 2, inet_aton("127.0.0.1")]
|
||||
assert resolved.dns[1] == [0, 10, inet_pton(AF_INET6, "::1")]
|
||||
assert resolved.dns[0] == (0, 2, inet_aton("127.0.0.1"))
|
||||
assert resolved.dns[1] == (0, 10, inet_pton(AF_INET6, "::1"))
|
||||
assert len(resolved.dns_ex) == 2
|
||||
assert resolved.dns_ex[0] == [0, 2, inet_aton("127.0.0.1"), 0, ""]
|
||||
assert resolved.dns_ex[1] == [0, 10, inet_pton(AF_INET6, "::1"), 0, ""]
|
||||
assert resolved.dns_ex[0] == (0, 2, inet_aton("127.0.0.1"), 0, "")
|
||||
assert resolved.dns_ex[1] == (0, 10, inet_pton(AF_INET6, "::1"), 0, "")
|
||||
|
||||
assert len(resolved.fallback_dns) == 2
|
||||
assert resolved.fallback_dns[0] == [0, 2, inet_aton("1.1.1.1")]
|
||||
assert resolved.fallback_dns[1] == [
|
||||
assert resolved.fallback_dns[0] == (0, 2, inet_aton("1.1.1.1"))
|
||||
assert resolved.fallback_dns[1] == (
|
||||
0,
|
||||
10,
|
||||
inet_pton(AF_INET6, "2606:4700:4700::1111"),
|
||||
]
|
||||
)
|
||||
assert len(resolved.fallback_dns_ex) == 2
|
||||
assert resolved.fallback_dns_ex[0] == [
|
||||
assert resolved.fallback_dns_ex[0] == (
|
||||
0,
|
||||
2,
|
||||
inet_aton("1.1.1.1"),
|
||||
0,
|
||||
"cloudflare-dns.com",
|
||||
]
|
||||
assert resolved.fallback_dns_ex[1] == [
|
||||
)
|
||||
assert resolved.fallback_dns_ex[1] == (
|
||||
0,
|
||||
10,
|
||||
inet_pton(AF_INET6, "2606:4700:4700::1111"),
|
||||
0,
|
||||
"cloudflare-dns.com",
|
||||
]
|
||||
)
|
||||
|
||||
assert resolved.current_dns_server == [0, 2, inet_aton("127.0.0.1")]
|
||||
assert resolved.current_dns_server_ex == [
|
||||
assert resolved.current_dns_server == (0, 2, inet_aton("127.0.0.1"))
|
||||
assert resolved.current_dns_server_ex == (
|
||||
0,
|
||||
2,
|
||||
inet_aton("127.0.0.1"),
|
||||
0,
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
assert len(resolved.domains) == 1
|
||||
assert resolved.domains[0] == [0, "local.hass.io", False]
|
||||
assert resolved.domains[0] == (0, "local.hass.io", False)
|
||||
|
||||
assert resolved.transaction_statistics == [0, 100000]
|
||||
assert resolved.cache_statistics == [10, 50000, 10000]
|
||||
assert resolved.transaction_statistics == (0, 100000)
|
||||
assert resolved.cache_statistics == (10, 50000, 10000)
|
||||
assert resolved.dnssec == DNSSECValidation.NO
|
||||
assert resolved.dnssec_statistics == [0, 0, 0, 0]
|
||||
assert resolved.dnssec_statistics == (0, 0, 0, 0)
|
||||
assert resolved.dnssec_supported is False
|
||||
assert resolved.dnssec_negative_trust_anchors == [
|
||||
"168.192.in-addr.arpa",
|
||||
|
||||
@@ -185,10 +185,10 @@ async def test_start_transient_unit(
|
||||
"tmp-test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Description", Variant("s", "Test")],
|
||||
["What", Variant("s", "//homeassistant/config")],
|
||||
["Type", Variant("s", "cifs")],
|
||||
["Options", Variant("s", "username=homeassistant,password=password")],
|
||||
("Description", Variant("s", "Test")),
|
||||
("What", Variant("s", "//homeassistant/config")),
|
||||
("Type", Variant("s", "cifs")),
|
||||
("Options", Variant("s", "username=homeassistant,password=password")),
|
||||
],
|
||||
[],
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Mock of OS Agent System dbus service."""
|
||||
|
||||
from dbus_fast import DBusError
|
||||
from dbus_fast import DBusError, ErrorType
|
||||
|
||||
from .base import DBusServiceMock, dbus_method
|
||||
|
||||
@@ -21,6 +21,7 @@ class System(DBusServiceMock):
|
||||
object_path = "/io/hass/os/System"
|
||||
interface = "io.hass.os.System"
|
||||
response_schedule_wipe_device: bool | DBusError = True
|
||||
response_migrate_docker_storage_driver: None | DBusError = None
|
||||
|
||||
@dbus_method()
|
||||
def ScheduleWipeDevice(self) -> "b":
|
||||
@@ -28,3 +29,14 @@ class System(DBusServiceMock):
|
||||
if isinstance(self.response_schedule_wipe_device, DBusError):
|
||||
raise self.response_schedule_wipe_device # pylint: disable=raising-bad-type
|
||||
return self.response_schedule_wipe_device
|
||||
|
||||
@dbus_method()
|
||||
def MigrateDockerStorageDriver(self, backend: "s") -> None:
|
||||
"""Migrate Docker storage driver."""
|
||||
if isinstance(self.response_migrate_docker_storage_driver, DBusError):
|
||||
raise self.response_migrate_docker_storage_driver # pylint: disable=raising-bad-type
|
||||
if backend not in ("overlayfs", "overlay2"):
|
||||
raise DBusError(
|
||||
ErrorType.FAILED,
|
||||
f"unsupported driver: {backend} (only 'overlayfs' and 'overlay2' are supported)",
|
||||
)
|
||||
|
||||
@@ -45,8 +45,8 @@ class Resolved(DBusServiceMock):
|
||||
def DNS(self) -> "a(iiay)":
|
||||
"""Get DNS."""
|
||||
return [
|
||||
[0, 2, bytes([127, 0, 0, 1])],
|
||||
[
|
||||
(0, 2, bytes([127, 0, 0, 1])),
|
||||
(
|
||||
0,
|
||||
10,
|
||||
bytes(
|
||||
@@ -69,15 +69,15 @@ class Resolved(DBusServiceMock):
|
||||
0x1,
|
||||
]
|
||||
),
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
@dbus_property(access=PropertyAccess.READ)
|
||||
def DNSEx(self) -> "a(iiayqs)":
|
||||
"""Get DNSEx."""
|
||||
return [
|
||||
[0, 2, bytes([127, 0, 0, 1]), 0, ""],
|
||||
[
|
||||
(0, 2, bytes([127, 0, 0, 1]), 0, ""),
|
||||
(
|
||||
0,
|
||||
10,
|
||||
bytes(
|
||||
@@ -102,15 +102,15 @@ class Resolved(DBusServiceMock):
|
||||
),
|
||||
0,
|
||||
"",
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
@dbus_property(access=PropertyAccess.READ)
|
||||
def FallbackDNS(self) -> "a(iiay)":
|
||||
"""Get FallbackDNS."""
|
||||
return [
|
||||
[0, 2, bytes([1, 1, 1, 1])],
|
||||
[
|
||||
(0, 2, bytes([1, 1, 1, 1])),
|
||||
(
|
||||
0,
|
||||
10,
|
||||
bytes(
|
||||
@@ -133,15 +133,15 @@ class Resolved(DBusServiceMock):
|
||||
0x11,
|
||||
]
|
||||
),
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
@dbus_property(access=PropertyAccess.READ)
|
||||
def FallbackDNSEx(self) -> "a(iiayqs)":
|
||||
"""Get FallbackDNSEx."""
|
||||
return [
|
||||
[0, 2, bytes([1, 1, 1, 1]), 0, "cloudflare-dns.com"],
|
||||
[
|
||||
(0, 2, bytes([1, 1, 1, 1]), 0, "cloudflare-dns.com"),
|
||||
(
|
||||
0,
|
||||
10,
|
||||
bytes(
|
||||
@@ -166,33 +166,33 @@ class Resolved(DBusServiceMock):
|
||||
),
|
||||
0,
|
||||
"cloudflare-dns.com",
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
@dbus_property(access=PropertyAccess.READ)
|
||||
def CurrentDNSServer(self) -> "(iiay)":
|
||||
"""Get CurrentDNSServer."""
|
||||
return [0, 2, bytes([127, 0, 0, 1])]
|
||||
return (0, 2, bytes([127, 0, 0, 1]))
|
||||
|
||||
@dbus_property(access=PropertyAccess.READ)
|
||||
def CurrentDNSServerEx(self) -> "(iiayqs)":
|
||||
"""Get CurrentDNSServerEx."""
|
||||
return [0, 2, bytes([127, 0, 0, 1]), 0, ""]
|
||||
return (0, 2, bytes([127, 0, 0, 1]), 0, "")
|
||||
|
||||
@dbus_property(access=PropertyAccess.READ)
|
||||
def Domains(self) -> "a(isb)":
|
||||
"""Get Domains."""
|
||||
return [[0, "local.hass.io", False]]
|
||||
return [(0, "local.hass.io", False)]
|
||||
|
||||
@dbus_property(access=PropertyAccess.READ)
|
||||
def TransactionStatistics(self) -> "(tt)":
|
||||
"""Get TransactionStatistics."""
|
||||
return [0, 100000]
|
||||
return (0, 100000)
|
||||
|
||||
@dbus_property(access=PropertyAccess.READ)
|
||||
def CacheStatistics(self) -> "(ttt)":
|
||||
"""Get CacheStatistics."""
|
||||
return [10, 50000, 10000]
|
||||
return (10, 50000, 10000)
|
||||
|
||||
@dbus_property(access=PropertyAccess.READ)
|
||||
def DNSSEC(self) -> "s":
|
||||
@@ -202,7 +202,7 @@ class Resolved(DBusServiceMock):
|
||||
@dbus_property(access=PropertyAccess.READ)
|
||||
def DNSSECStatistics(self) -> "(tttt)":
|
||||
"""Get DNSSECStatistics."""
|
||||
return [0, 0, 0, 0]
|
||||
return (0, 0, 0, 0)
|
||||
|
||||
@dbus_property(access=PropertyAccess.READ)
|
||||
def DNSSECSupported(self) -> "b":
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
# pylint: disable=protected-access
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.interface import DOCKER_HUB, DockerInterface
|
||||
from supervisor.docker.const import DOCKER_HUB
|
||||
from supervisor.docker.interface import DockerInterface
|
||||
|
||||
|
||||
def test_no_credentials(coresys: CoreSys, test_docker_interface: DockerInterface):
|
||||
|
||||
@@ -16,7 +16,7 @@ from supervisor.addons.manager import Addon
|
||||
from supervisor.const import BusEvent, CoreState, CpuArch
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import ContainerState
|
||||
from supervisor.docker.interface import DockerInterface
|
||||
from supervisor.docker.interface import DOCKER_HUB, DockerInterface
|
||||
from supervisor.docker.manager import PullLogEntry, PullProgressDetail
|
||||
from supervisor.docker.monitor import DockerContainerStateEvent
|
||||
from supervisor.exceptions import (
|
||||
@@ -26,7 +26,10 @@ from supervisor.exceptions import (
|
||||
DockerNotFound,
|
||||
DockerRequestError,
|
||||
)
|
||||
from supervisor.jobs import JobSchedulerOptions, SupervisorJob
|
||||
from supervisor.homeassistant.const import WSEvent, WSType
|
||||
from supervisor.jobs import ChildJobSyncFilter, JobSchedulerOptions, SupervisorJob
|
||||
from supervisor.jobs.decorator import Job
|
||||
from supervisor.supervisor import Supervisor
|
||||
|
||||
from tests.common import AsyncIterator, load_json_fixture
|
||||
|
||||
@@ -51,7 +54,7 @@ async def test_docker_image_platform(
|
||||
coresys.docker.images.inspect.return_value = {"Id": "test:1.2.3"}
|
||||
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test", arch=cpu_arch)
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
"test", tag="1.2.3", platform=platform, stream=True
|
||||
"test", tag="1.2.3", platform=platform, auth=None, stream=True
|
||||
)
|
||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||
|
||||
@@ -68,12 +71,50 @@ async def test_docker_image_default_platform(
|
||||
):
|
||||
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
"test", tag="1.2.3", platform="linux/386", stream=True
|
||||
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
||||
)
|
||||
|
||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"image,registry_key",
|
||||
[
|
||||
("homeassistant/amd64-supervisor", DOCKER_HUB),
|
||||
("ghcr.io/home-assistant/amd64-supervisor", "ghcr.io"),
|
||||
],
|
||||
)
|
||||
async def test_private_registry_credentials_passed_to_pull(
|
||||
coresys: CoreSys,
|
||||
test_docker_interface: DockerInterface,
|
||||
image: str,
|
||||
registry_key: str,
|
||||
):
|
||||
"""Test credentials for private registries are passed to aiodocker pull."""
|
||||
coresys.docker.images.inspect.return_value = {"Id": f"{image}:1.2.3"}
|
||||
|
||||
# Configure registry credentials
|
||||
coresys.docker.config._data["registries"] = { # pylint: disable=protected-access
|
||||
registry_key: {"username": "testuser", "password": "testpass"}
|
||||
}
|
||||
|
||||
with patch.object(
|
||||
type(coresys.supervisor), "arch", PropertyMock(return_value="amd64")
|
||||
):
|
||||
await test_docker_interface.install(
|
||||
AwesomeVersion("1.2.3"), image, arch=CpuArch.AMD64
|
||||
)
|
||||
|
||||
# Verify credentials were passed to aiodocker
|
||||
expected_auth = {"username": "testuser", "password": "testpass"}
|
||||
if registry_key != DOCKER_HUB:
|
||||
expected_auth["registry"] = registry_key
|
||||
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
image, tag="1.2.3", platform="linux/amd64", auth=expected_auth, stream=True
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"attrs,expected",
|
||||
[
|
||||
@@ -276,7 +317,7 @@ async def test_install_fires_progress_events(
|
||||
},
|
||||
{"status": "Already exists", "progressDetail": {}, "id": "6e771e15690e"},
|
||||
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1578b14a573c"},
|
||||
{"status": "Waiting", "progressDetail": {}, "id": "2488d0e401e1"},
|
||||
{"status": "Waiting", "progressDetail": {}, "id": "1578b14a573c"},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": {"current": 1378, "total": 1486},
|
||||
@@ -319,7 +360,7 @@ async def test_install_fires_progress_events(
|
||||
):
|
||||
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
"test", tag="1.2.3", platform="linux/386", stream=True
|
||||
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
||||
)
|
||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||
|
||||
@@ -346,7 +387,7 @@ async def test_install_fires_progress_events(
|
||||
job_id=ANY,
|
||||
status="Waiting",
|
||||
progress_detail=PullProgressDetail(),
|
||||
id="2488d0e401e1",
|
||||
id="1578b14a573c",
|
||||
),
|
||||
PullLogEntry(
|
||||
job_id=ANY,
|
||||
@@ -500,6 +541,7 @@ async def test_install_raises_on_pull_error(
|
||||
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
|
||||
"id": "2025.7.2",
|
||||
},
|
||||
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1578b14a573c"},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": {"current": 1378, "total": 1486},
|
||||
@@ -554,16 +596,39 @@ async def test_install_progress_handles_download_restart(
|
||||
capture_exception.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"extract_log",
|
||||
[
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": {"current": 96, "total": 96},
|
||||
"progress": "[==================================================>] 96B/96B",
|
||||
"id": "02a6e69d8d00",
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": {"current": 1, "units": "s"},
|
||||
"progress": "1 s",
|
||||
"id": "02a6e69d8d00",
|
||||
},
|
||||
],
|
||||
ids=["normal_extract_log", "containerd_snapshot_extract_log"],
|
||||
)
|
||||
async def test_install_progress_handles_layers_skipping_download(
|
||||
coresys: CoreSys,
|
||||
test_docker_interface: DockerInterface,
|
||||
capture_exception: Mock,
|
||||
extract_log: dict[str, Any],
|
||||
):
|
||||
"""Test install handles small layers that skip downloading phase and go directly to download complete.
|
||||
|
||||
Reproduces the real-world scenario from Supervisor issue #6286:
|
||||
- Small layer (02a6e69d8d00) completes Download complete at 10:14:08 without ever Downloading
|
||||
- Normal layer (3f4a84073184) starts Downloading at 10:14:09 with progress updates
|
||||
|
||||
Under containerd snapshotter this presumably can still occur and Supervisor will have even less info
|
||||
since extract logs don't have a total. Supervisor should generally just ignore these and set progress
|
||||
from the larger images that take all the time.
|
||||
"""
|
||||
coresys.core.set_state(CoreState.RUNNING)
|
||||
|
||||
@@ -607,12 +672,7 @@ async def test_install_progress_handles_layers_skipping_download(
|
||||
},
|
||||
{"status": "Pull complete", "progressDetail": {}, "id": "3f4a84073184"},
|
||||
# Small layer finally extracts (10:14:58 in logs)
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": {"current": 96, "total": 96},
|
||||
"progress": "[==================================================>] 96B/96B",
|
||||
"id": "02a6e69d8d00",
|
||||
},
|
||||
extract_log,
|
||||
{"status": "Pull complete", "progressDetail": {}, "id": "02a6e69d8d00"},
|
||||
{"status": "Digest: sha256:test"},
|
||||
{"status": "Status: Downloaded newer image for test/image:latest"},
|
||||
@@ -720,3 +780,88 @@ async def test_missing_total_handled_gracefully(
|
||||
await event.wait()
|
||||
|
||||
capture_exception.assert_not_called()
|
||||
|
||||
|
||||
async def test_install_progress_containerd_snapshot(
|
||||
coresys: CoreSys, ha_ws_client: AsyncMock
|
||||
):
|
||||
"""Test install handles docker progress events using containerd snapshotter."""
|
||||
coresys.core.set_state(CoreState.RUNNING)
|
||||
|
||||
class TestDockerInterface(DockerInterface):
|
||||
"""Test interface for events."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
"""Name of test interface."""
|
||||
return "test_interface"
|
||||
|
||||
@Job(
|
||||
name="mock_docker_interface_install",
|
||||
child_job_syncs=[
|
||||
ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
|
||||
],
|
||||
)
|
||||
async def mock_install(self) -> None:
|
||||
"""Mock install."""
|
||||
await super().install(
|
||||
AwesomeVersion("1.2.3"), image="test", arch=CpuArch.I386
|
||||
)
|
||||
|
||||
# Fixture emulates log as received when using containerd snapshotter
|
||||
# Should not error but progress gets choppier once extraction starts
|
||||
logs = load_json_fixture("docker_pull_image_log_containerd_snapshot.json")
|
||||
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
||||
test_docker_interface = TestDockerInterface(coresys)
|
||||
|
||||
with patch.object(Supervisor, "arch", PropertyMock(return_value="i386")):
|
||||
await test_docker_interface.mock_install()
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
||||
)
|
||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||
|
||||
await asyncio.sleep(1)
|
||||
|
||||
def job_event(progress: float, done: bool = False):
|
||||
return {
|
||||
"type": WSType.SUPERVISOR_EVENT,
|
||||
"data": {
|
||||
"event": WSEvent.JOB,
|
||||
"data": {
|
||||
"name": "mock_docker_interface_install",
|
||||
"reference": "test_interface",
|
||||
"uuid": ANY,
|
||||
"progress": progress,
|
||||
"stage": None,
|
||||
"done": done,
|
||||
"parent_id": None,
|
||||
"errors": [],
|
||||
"created": ANY,
|
||||
"extra": None,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert [c.args[0] for c in ha_ws_client.async_send_command.call_args_list] == [
|
||||
# During downloading we get continuous progress updates from download status
|
||||
job_event(0),
|
||||
job_event(3.4),
|
||||
job_event(8.5),
|
||||
job_event(10.2),
|
||||
job_event(15.3),
|
||||
job_event(18.8),
|
||||
job_event(29.0),
|
||||
job_event(35.8),
|
||||
job_event(42.6),
|
||||
job_event(49.5),
|
||||
job_event(56.0),
|
||||
job_event(62.8),
|
||||
# Downloading phase is considered 70% of total. After we only get one update
|
||||
# per image downloaded when extraction is finished. It uses the total size
|
||||
# received during downloading to determine percent complete then.
|
||||
job_event(70.0),
|
||||
job_event(84.8),
|
||||
job_event(100),
|
||||
job_event(100, True),
|
||||
]
|
||||
|
||||
196
tests/fixtures/docker_pull_image_log_containerd_snapshot.json
vendored
Normal file
196
tests/fixtures/docker_pull_image_log_containerd_snapshot.json
vendored
Normal file
@@ -0,0 +1,196 @@
|
||||
[
|
||||
{
|
||||
"status": "Pulling from home-assistant/home-assistant",
|
||||
"id": "2025.12.0.dev202511080235"
|
||||
},
|
||||
{ "status": "Pulling fs layer", "progressDetail": {}, "id": "eafecc6b43cc" },
|
||||
{ "status": "Pulling fs layer", "progressDetail": {}, "id": "333270549f95" },
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 1048576, "total": 21863319 },
|
||||
"progress": "[==\u003e ] 1.049MB/21.86MB",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 1048576, "total": 21179924 },
|
||||
"progress": "[==\u003e ] 1.049MB/21.18MB",
|
||||
"id": "333270549f95"
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 4194304, "total": 21863319 },
|
||||
"progress": "[=========\u003e ] 4.194MB/21.86MB",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 2097152, "total": 21179924 },
|
||||
"progress": "[====\u003e ] 2.097MB/21.18MB",
|
||||
"id": "333270549f95"
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 7340032, "total": 21863319 },
|
||||
"progress": "[================\u003e ] 7.34MB/21.86MB",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 4194304, "total": 21179924 },
|
||||
"progress": "[=========\u003e ] 4.194MB/21.18MB",
|
||||
"id": "333270549f95"
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 13631488, "total": 21863319 },
|
||||
"progress": "[===============================\u003e ] 13.63MB/21.86MB",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 8388608, "total": 21179924 },
|
||||
"progress": "[===================\u003e ] 8.389MB/21.18MB",
|
||||
"id": "333270549f95"
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 17825792, "total": 21863319 },
|
||||
"progress": "[========================================\u003e ] 17.83MB/21.86MB",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 12582912, "total": 21179924 },
|
||||
"progress": "[=============================\u003e ] 12.58MB/21.18MB",
|
||||
"id": "333270549f95"
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 21863319, "total": 21863319 },
|
||||
"progress": "[==================================================\u003e] 21.86MB/21.86MB",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 16777216, "total": 21179924 },
|
||||
"progress": "[=======================================\u003e ] 16.78MB/21.18MB",
|
||||
"id": "333270549f95"
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": { "current": 21179924, "total": 21179924 },
|
||||
"progress": "[==================================================\u003e] 21.18MB/21.18MB",
|
||||
"id": "333270549f95"
|
||||
},
|
||||
{
|
||||
"status": "Download complete",
|
||||
"progressDetail": { "hidecounts": true },
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Download complete",
|
||||
"progressDetail": { "hidecounts": true },
|
||||
"id": "333270549f95"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 1, "units": "s" },
|
||||
"progress": "1 s",
|
||||
"id": "333270549f95"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 1, "units": "s" },
|
||||
"progress": "1 s",
|
||||
"id": "333270549f95"
|
||||
},
|
||||
{
|
||||
"status": "Pull complete",
|
||||
"progressDetail": { "hidecounts": true },
|
||||
"id": "333270549f95"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 1, "units": "s" },
|
||||
"progress": "1 s",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 1, "units": "s" },
|
||||
"progress": "1 s",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 2, "units": "s" },
|
||||
"progress": "2 s",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 2, "units": "s" },
|
||||
"progress": "2 s",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 3, "units": "s" },
|
||||
"progress": "3 s",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 3, "units": "s" },
|
||||
"progress": "3 s",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 4, "units": "s" },
|
||||
"progress": "4 s",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 4, "units": "s" },
|
||||
"progress": "4 s",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 5, "units": "s" },
|
||||
"progress": "5 s",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 5, "units": "s" },
|
||||
"progress": "5 s",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 6, "units": "s" },
|
||||
"progress": "6 s",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": { "current": 6, "units": "s" },
|
||||
"progress": "6 s",
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Pull complete",
|
||||
"progressDetail": { "hidecounts": true },
|
||||
"id": "eafecc6b43cc"
|
||||
},
|
||||
{
|
||||
"status": "Digest: sha256:bfc9efc13552c0c228f3d9d35987331cce68b43c9bc79c80a57eeadadd44cccf"
|
||||
},
|
||||
{
|
||||
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/home-assistant:2025.12.0.dev202511080235"
|
||||
}
|
||||
]
|
||||
@@ -477,6 +477,7 @@ async def test_core_loads_wrong_image_for_machine(
|
||||
"ghcr.io/home-assistant/qemux86-64-homeassistant",
|
||||
"2024.4.0",
|
||||
platform="linux/amd64",
|
||||
auth=None,
|
||||
)
|
||||
|
||||
container.remove.assert_called_once_with(force=True, v=True)
|
||||
@@ -535,6 +536,7 @@ async def test_core_loads_wrong_image_for_architecture(
|
||||
"ghcr.io/home-assistant/qemux86-64-homeassistant",
|
||||
"2024.4.0",
|
||||
platform="linux/amd64",
|
||||
auth=None,
|
||||
)
|
||||
|
||||
container.remove.assert_called_once_with(force=True, v=True)
|
||||
|
||||
@@ -119,10 +119,10 @@ async def test_load(
|
||||
"mnt-data-supervisor-mounts-backup_test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "noserverino,guest")],
|
||||
["Type", Variant("s", "cifs")],
|
||||
["Description", Variant("s", "Supervisor cifs mount: backup_test")],
|
||||
["What", Variant("s", "//backup.local/backups")],
|
||||
("Options", Variant("s", "noserverino,guest")),
|
||||
("Type", Variant("s", "cifs")),
|
||||
("Description", Variant("s", "Supervisor cifs mount: backup_test")),
|
||||
("What", Variant("s", "//backup.local/backups")),
|
||||
],
|
||||
[],
|
||||
),
|
||||
@@ -130,10 +130,10 @@ async def test_load(
|
||||
"mnt-data-supervisor-mounts-media_test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "soft,timeo=200")],
|
||||
["Type", Variant("s", "nfs")],
|
||||
["Description", Variant("s", "Supervisor nfs mount: media_test")],
|
||||
["What", Variant("s", "media.local:/media")],
|
||||
("Options", Variant("s", "soft,timeo=200")),
|
||||
("Type", Variant("s", "nfs")),
|
||||
("Description", Variant("s", "Supervisor nfs mount: media_test")),
|
||||
("What", Variant("s", "media.local:/media")),
|
||||
],
|
||||
[],
|
||||
),
|
||||
@@ -141,12 +141,12 @@ async def test_load(
|
||||
"mnt-data-supervisor-media-media_test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "bind")],
|
||||
[
|
||||
("Options", Variant("s", "bind")),
|
||||
(
|
||||
"Description",
|
||||
Variant("s", "Supervisor bind mount: bind_media_test"),
|
||||
],
|
||||
["What", Variant("s", "/mnt/data/supervisor/mounts/media_test")],
|
||||
),
|
||||
("What", Variant("s", "/mnt/data/supervisor/mounts/media_test")),
|
||||
],
|
||||
[],
|
||||
),
|
||||
@@ -198,10 +198,10 @@ async def test_load_share_mount(
|
||||
"mnt-data-supervisor-mounts-share_test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "soft,timeo=200")],
|
||||
["Type", Variant("s", "nfs")],
|
||||
["Description", Variant("s", "Supervisor nfs mount: share_test")],
|
||||
["What", Variant("s", "share.local:/share")],
|
||||
("Options", Variant("s", "soft,timeo=200")),
|
||||
("Type", Variant("s", "nfs")),
|
||||
("Description", Variant("s", "Supervisor nfs mount: share_test")),
|
||||
("What", Variant("s", "share.local:/share")),
|
||||
],
|
||||
[],
|
||||
),
|
||||
@@ -209,9 +209,9 @@ async def test_load_share_mount(
|
||||
"mnt-data-supervisor-share-share_test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "bind")],
|
||||
["Description", Variant("s", "Supervisor bind mount: bind_share_test")],
|
||||
["What", Variant("s", "/mnt/data/supervisor/mounts/share_test")],
|
||||
("Options", Variant("s", "bind")),
|
||||
("Description", Variant("s", "Supervisor bind mount: bind_share_test")),
|
||||
("What", Variant("s", "/mnt/data/supervisor/mounts/share_test")),
|
||||
],
|
||||
[],
|
||||
),
|
||||
@@ -318,12 +318,12 @@ async def test_mount_failed_during_load(
|
||||
"mnt-data-supervisor-media-media_test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "ro,bind")],
|
||||
[
|
||||
("Options", Variant("s", "ro,bind")),
|
||||
(
|
||||
"Description",
|
||||
Variant("s", "Supervisor bind mount: emergency_media_test"),
|
||||
],
|
||||
["What", Variant("s", "/mnt/data/supervisor/emergency/media_test")],
|
||||
),
|
||||
("What", Variant("s", "/mnt/data/supervisor/emergency/media_test")),
|
||||
],
|
||||
[],
|
||||
)
|
||||
@@ -634,10 +634,10 @@ async def test_reload_mounts_attempts_initial_mount(
|
||||
"mnt-data-supervisor-mounts-media_test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "soft,timeo=200")],
|
||||
["Type", Variant("s", "nfs")],
|
||||
["Description", Variant("s", "Supervisor nfs mount: media_test")],
|
||||
["What", Variant("s", "media.local:/media")],
|
||||
("Options", Variant("s", "soft,timeo=200")),
|
||||
("Type", Variant("s", "nfs")),
|
||||
("Description", Variant("s", "Supervisor nfs mount: media_test")),
|
||||
("What", Variant("s", "media.local:/media")),
|
||||
],
|
||||
[],
|
||||
),
|
||||
@@ -645,9 +645,9 @@ async def test_reload_mounts_attempts_initial_mount(
|
||||
"mnt-data-supervisor-media-media_test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "bind")],
|
||||
["Description", Variant("s", "Supervisor bind mount: bind_media_test")],
|
||||
["What", Variant("s", "/mnt/data/supervisor/mounts/media_test")],
|
||||
("Options", Variant("s", "bind")),
|
||||
("Description", Variant("s", "Supervisor bind mount: bind_media_test")),
|
||||
("What", Variant("s", "/mnt/data/supervisor/mounts/media_test")),
|
||||
],
|
||||
[],
|
||||
),
|
||||
|
||||
@@ -105,7 +105,7 @@ async def test_cifs_mount(
|
||||
"mnt-data-supervisor-mounts-test.mount",
|
||||
"fail",
|
||||
[
|
||||
[
|
||||
(
|
||||
"Options",
|
||||
Variant(
|
||||
"s",
|
||||
@@ -117,10 +117,10 @@ async def test_cifs_mount(
|
||||
]
|
||||
),
|
||||
),
|
||||
],
|
||||
["Type", Variant("s", "cifs")],
|
||||
["Description", Variant("s", "Supervisor cifs mount: test")],
|
||||
["What", Variant("s", "//test.local/camera")],
|
||||
),
|
||||
("Type", Variant("s", "cifs")),
|
||||
("Description", Variant("s", "Supervisor cifs mount: test")),
|
||||
("What", Variant("s", "//test.local/camera")),
|
||||
],
|
||||
[],
|
||||
)
|
||||
@@ -177,10 +177,10 @@ async def test_cifs_mount_read_only(
|
||||
"mnt-data-supervisor-mounts-test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "ro,noserverino,guest")],
|
||||
["Type", Variant("s", "cifs")],
|
||||
["Description", Variant("s", "Supervisor cifs mount: test")],
|
||||
["What", Variant("s", "//test.local/camera")],
|
||||
("Options", Variant("s", "ro,noserverino,guest")),
|
||||
("Type", Variant("s", "cifs")),
|
||||
("Description", Variant("s", "Supervisor cifs mount: test")),
|
||||
("What", Variant("s", "//test.local/camera")),
|
||||
],
|
||||
[],
|
||||
)
|
||||
@@ -237,10 +237,10 @@ async def test_nfs_mount(
|
||||
"mnt-data-supervisor-mounts-test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "port=1234,soft,timeo=200")],
|
||||
["Type", Variant("s", "nfs")],
|
||||
["Description", Variant("s", "Supervisor nfs mount: test")],
|
||||
["What", Variant("s", "test.local:/media/camera")],
|
||||
("Options", Variant("s", "port=1234,soft,timeo=200")),
|
||||
("Type", Variant("s", "nfs")),
|
||||
("Description", Variant("s", "Supervisor nfs mount: test")),
|
||||
("What", Variant("s", "test.local:/media/camera")),
|
||||
],
|
||||
[],
|
||||
)
|
||||
@@ -283,10 +283,10 @@ async def test_nfs_mount_read_only(
|
||||
"mnt-data-supervisor-mounts-test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "ro,port=1234,soft,timeo=200")],
|
||||
["Type", Variant("s", "nfs")],
|
||||
["Description", Variant("s", "Supervisor nfs mount: test")],
|
||||
["What", Variant("s", "test.local:/media/camera")],
|
||||
("Options", Variant("s", "ro,port=1234,soft,timeo=200")),
|
||||
("Type", Variant("s", "nfs")),
|
||||
("Description", Variant("s", "Supervisor nfs mount: test")),
|
||||
("What", Variant("s", "test.local:/media/camera")),
|
||||
],
|
||||
[],
|
||||
)
|
||||
@@ -331,10 +331,10 @@ async def test_load(
|
||||
"mnt-data-supervisor-mounts-test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "noserverino,guest")],
|
||||
["Type", Variant("s", "cifs")],
|
||||
["Description", Variant("s", "Supervisor cifs mount: test")],
|
||||
["What", Variant("s", "//test.local/share")],
|
||||
("Options", Variant("s", "noserverino,guest")),
|
||||
("Type", Variant("s", "cifs")),
|
||||
("Description", Variant("s", "Supervisor cifs mount: test")),
|
||||
("What", Variant("s", "//test.local/share")),
|
||||
],
|
||||
[],
|
||||
)
|
||||
@@ -736,10 +736,10 @@ async def test_mount_fails_if_down(
|
||||
"mnt-data-supervisor-mounts-test.mount",
|
||||
"fail",
|
||||
[
|
||||
["Options", Variant("s", "port=1234,soft,timeo=200")],
|
||||
["Type", Variant("s", "nfs")],
|
||||
["Description", Variant("s", "Supervisor nfs mount: test")],
|
||||
["What", Variant("s", "test.local:/media/camera")],
|
||||
("Options", Variant("s", "port=1234,soft,timeo=200")),
|
||||
("Type", Variant("s", "nfs")),
|
||||
("Description", Variant("s", "Supervisor nfs mount: test")),
|
||||
("What", Variant("s", "test.local:/media/camera")),
|
||||
],
|
||||
[],
|
||||
)
|
||||
|
||||
@@ -369,7 +369,7 @@ async def test_load_with_incorrect_image(
|
||||
with patch.object(DockerAPI, "pull_image", return_value=img_data) as pull_image:
|
||||
await plugin.load()
|
||||
pull_image.assert_called_once_with(
|
||||
ANY, correct_image, "2024.4.0", platform="linux/amd64"
|
||||
ANY, correct_image, "2024.4.0", platform="linux/amd64", auth=None
|
||||
)
|
||||
|
||||
container.remove.assert_called_once_with(force=True, v=True)
|
||||
|
||||
Reference in New Issue
Block a user