mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-12-21 07:18:08 +00:00
Compare commits
10 Commits
leave-fold
...
refactor-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e2a32f6cc0 | ||
|
|
470f82ea45 | ||
|
|
8ddce6239a | ||
|
|
faec195b88 | ||
|
|
1949eb69ba | ||
|
|
727d6903a2 | ||
|
|
8cd3c30e00 | ||
|
|
12dd16c781 | ||
|
|
24377b51df | ||
|
|
91f1f548e5 |
4
.github/workflows/builder.yml
vendored
4
.github/workflows/builder.yml
vendored
@@ -152,7 +152,7 @@ jobs:
|
||||
|
||||
- name: Upload local wheels artifact
|
||||
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: wheels-${{ matrix.arch }}
|
||||
path: wheels
|
||||
@@ -248,7 +248,7 @@ jobs:
|
||||
|
||||
- name: Download local wheels artifact
|
||||
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: wheels-amd64
|
||||
path: wheels
|
||||
|
||||
36
.github/workflows/ci.yaml
vendored
36
.github/workflows/ci.yaml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
pip install -r requirements.txt -r requirements_tests.txt
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
lookup-only: true
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -88,7 +88,7 @@ jobs:
|
||||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -131,7 +131,7 @@ jobs:
|
||||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
@@ -177,7 +177,7 @@ jobs:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -189,7 +189,7 @@ jobs:
|
||||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
@@ -221,7 +221,7 @@ jobs:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -233,7 +233,7 @@ jobs:
|
||||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
@@ -265,7 +265,7 @@ jobs:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -307,7 +307,7 @@ jobs:
|
||||
echo "key=mypy-${{ env.MYPY_CACHE_VERSION }}-$mypy_version-$(date -u '+%Y-%m-%dT%H:%M:%s')" >> $GITHUB_OUTPUT
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: venv
|
||||
key: >-
|
||||
@@ -318,7 +318,7 @@ jobs:
|
||||
echo "Failed to restore Python virtual environment from cache"
|
||||
exit 1
|
||||
- name: Restore mypy cache
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: .mypy_cache
|
||||
key: >-
|
||||
@@ -351,7 +351,7 @@ jobs:
|
||||
cosign-release: "v2.5.3"
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -386,7 +386,7 @@ jobs:
|
||||
-o console_output_style=count \
|
||||
tests
|
||||
- name: Upload coverage artifact
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: coverage
|
||||
path: .coverage
|
||||
@@ -406,7 +406,7 @@ jobs:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -417,7 +417,7 @@ jobs:
|
||||
echo "Failed to restore Python virtual environment from cache"
|
||||
exit 1
|
||||
- name: Download all coverage artifacts
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: coverage
|
||||
path: coverage/
|
||||
@@ -428,4 +428,4 @@ jobs:
|
||||
coverage report
|
||||
coverage xml
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
|
||||
2
.github/workflows/lock.yml
vendored
2
.github/workflows/lock.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
lock:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@7266a7ce5c1df01b1c6db85bf8cd86c737dadbe7 # v6.0.0
|
||||
- uses: dessant/lock-threads@1bf7ec25051fe7c00bdd17e6a7cf3d7bfb7dc771 # v5.0.1
|
||||
with:
|
||||
github-token: ${{ github.token }}
|
||||
issue-inactive-days: "30"
|
||||
|
||||
2
.github/workflows/update_frontend.yml
vendored
2
.github/workflows/update_frontend.yml
vendored
@@ -68,7 +68,7 @@ jobs:
|
||||
run: |
|
||||
rm -f supervisor/api/panel/home_assistant_frontend_supervisor-*.tar.gz
|
||||
- name: Create PR
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
|
||||
with:
|
||||
commit-message: "Update frontend to version ${{ needs.check-version.outputs.latest_version }}"
|
||||
branch: autoupdate-frontend
|
||||
|
||||
@@ -27,7 +27,7 @@ RUN \
|
||||
\
|
||||
&& curl -Lso /usr/bin/cosign "https://github.com/home-assistant/cosign/releases/download/${COSIGN_VERSION}/cosign_${BUILD_ARCH}" \
|
||||
&& chmod a+x /usr/bin/cosign \
|
||||
&& pip3 install uv==0.9.18
|
||||
&& pip3 install uv==0.8.9
|
||||
|
||||
# Install requirements
|
||||
RUN \
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
image: ghcr.io/home-assistant/{arch}-hassio-supervisor
|
||||
build_from:
|
||||
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.13-alpine3.22-2025.12.2
|
||||
amd64: ghcr.io/home-assistant/amd64-base-python:3.13-alpine3.22-2025.12.2
|
||||
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.13-alpine3.22-2025.11.1
|
||||
amd64: ghcr.io/home-assistant/amd64-base-python:3.13-alpine3.22-2025.11.1
|
||||
cosign:
|
||||
base_identity: https://github.com/home-assistant/docker-base/.*
|
||||
identity: https://github.com/home-assistant/supervisor/.*
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
aiodns==3.6.1
|
||||
aiodns==3.5.0
|
||||
aiodocker==0.24.0
|
||||
aiohttp==3.13.2
|
||||
atomicwrites-homeassistant==1.4.1
|
||||
attrs==25.4.0
|
||||
awesomeversion==25.8.0
|
||||
backports.zstd==1.2.0
|
||||
backports.zstd==1.1.0
|
||||
blockbuster==1.5.26
|
||||
brotli==1.2.0
|
||||
ciso8601==2.3.3
|
||||
colorlog==6.10.1
|
||||
cpe==1.3.1
|
||||
cryptography==46.0.3
|
||||
debugpy==1.8.19
|
||||
debugpy==1.8.17
|
||||
deepmerge==2.0
|
||||
dirhash==0.5.0
|
||||
docker==7.1.0
|
||||
@@ -19,14 +19,14 @@ faust-cchardet==2.1.19
|
||||
gitpython==3.1.45
|
||||
jinja2==3.1.6
|
||||
log-rate-limit==1.4.2
|
||||
orjson==3.11.5
|
||||
orjson==3.11.4
|
||||
pulsectl==24.12.0
|
||||
pyudev==0.24.4
|
||||
PyYAML==6.0.3
|
||||
requests==2.32.5
|
||||
securetar==2025.12.0
|
||||
sentry-sdk==2.48.0
|
||||
sentry-sdk==2.47.0
|
||||
setuptools==80.9.0
|
||||
voluptuous==0.16.0
|
||||
voluptuous==0.15.2
|
||||
dbus-fast==3.1.2
|
||||
zlib-fast==0.2.1
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
astroid==4.0.2
|
||||
coverage==7.13.0
|
||||
mypy==1.19.1
|
||||
pre-commit==4.5.1
|
||||
coverage==7.12.0
|
||||
mypy==1.19.0
|
||||
pre-commit==4.5.0
|
||||
pylint==4.0.4
|
||||
pytest-aiohttp==1.1.0
|
||||
pytest-asyncio==1.3.0
|
||||
pytest-cov==7.0.0
|
||||
pytest-timeout==2.4.0
|
||||
pytest==9.0.2
|
||||
ruff==0.14.10
|
||||
pytest==9.0.1
|
||||
ruff==0.14.8
|
||||
time-machine==3.1.0
|
||||
types-docker==7.1.0.20251202
|
||||
types-pyyaml==6.0.12.20250915
|
||||
types-requests==2.32.4.20250913
|
||||
urllib3==2.6.2
|
||||
urllib3==2.6.0
|
||||
|
||||
@@ -18,7 +18,6 @@ from ..const import (
|
||||
ATTR_BLK_WRITE,
|
||||
ATTR_BOOT,
|
||||
ATTR_CPU_PERCENT,
|
||||
ATTR_DUPLICATE_LOG_FILE,
|
||||
ATTR_IMAGE,
|
||||
ATTR_IP_ADDRESS,
|
||||
ATTR_JOB_ID,
|
||||
@@ -56,7 +55,6 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(str),
|
||||
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
|
||||
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE): vol.Boolean(),
|
||||
vol.Optional(ATTR_DUPLICATE_LOG_FILE): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -114,7 +112,6 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
ATTR_AUDIO_INPUT: self.sys_homeassistant.audio_input,
|
||||
ATTR_AUDIO_OUTPUT: self.sys_homeassistant.audio_output,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE: self.sys_homeassistant.backups_exclude_database,
|
||||
ATTR_DUPLICATE_LOG_FILE: self.sys_homeassistant.duplicate_log_file,
|
||||
}
|
||||
|
||||
@api_process
|
||||
@@ -154,9 +151,6 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE
|
||||
]
|
||||
|
||||
if ATTR_DUPLICATE_LOG_FILE in body:
|
||||
self.sys_homeassistant.duplicate_log_file = body[ATTR_DUPLICATE_LOG_FILE]
|
||||
|
||||
await self.sys_homeassistant.save_data()
|
||||
|
||||
@api_process
|
||||
|
||||
@@ -179,7 +179,6 @@ ATTR_DOCKER = "docker"
|
||||
ATTR_DOCKER_API = "docker_api"
|
||||
ATTR_DOCUMENTATION = "documentation"
|
||||
ATTR_DOMAINS = "domains"
|
||||
ATTR_DUPLICATE_LOG_FILE = "duplicate_log_file"
|
||||
ATTR_ENABLE = "enable"
|
||||
ATTR_ENABLE_IPV6 = "enable_ipv6"
|
||||
ATTR_ENABLED = "enabled"
|
||||
|
||||
@@ -10,13 +10,14 @@ import os
|
||||
from pathlib import Path
|
||||
from socket import SocketIO
|
||||
import tempfile
|
||||
from typing import TYPE_CHECKING, Literal, cast
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
import aiodocker
|
||||
from attr import evolve
|
||||
from awesomeversion import AwesomeVersion
|
||||
import docker
|
||||
import docker.errors
|
||||
from docker.types import Mount
|
||||
import requests
|
||||
|
||||
from ..addons.build import AddonBuild
|
||||
@@ -67,11 +68,8 @@ from .const import (
|
||||
PATH_SHARE,
|
||||
PATH_SSL,
|
||||
Capabilities,
|
||||
DockerMount,
|
||||
MountBindOptions,
|
||||
MountType,
|
||||
PropagationMode,
|
||||
Ulimit,
|
||||
)
|
||||
from .interface import DockerInterface
|
||||
|
||||
@@ -274,7 +272,7 @@ class DockerAddon(DockerInterface):
|
||||
}
|
||||
|
||||
@property
|
||||
def network_mode(self) -> Literal["host"] | None:
|
||||
def network_mode(self) -> str | None:
|
||||
"""Return network mode for add-on."""
|
||||
if self.addon.host_network:
|
||||
return "host"
|
||||
@@ -313,28 +311,28 @@ class DockerAddon(DockerInterface):
|
||||
return None
|
||||
|
||||
@property
|
||||
def ulimits(self) -> list[Ulimit] | None:
|
||||
def ulimits(self) -> list[docker.types.Ulimit] | None:
|
||||
"""Generate ulimits for add-on."""
|
||||
limits: list[Ulimit] = []
|
||||
limits: list[docker.types.Ulimit] = []
|
||||
|
||||
# Need schedule functions
|
||||
if self.addon.with_realtime:
|
||||
limits.append(Ulimit(name="rtprio", soft=90, hard=99))
|
||||
limits.append(docker.types.Ulimit(name="rtprio", soft=90, hard=99))
|
||||
|
||||
# Set available memory for memlock to 128MB
|
||||
mem = 128 * 1024 * 1024
|
||||
limits.append(Ulimit(name="memlock", soft=mem, hard=mem))
|
||||
limits.append(docker.types.Ulimit(name="memlock", soft=mem, hard=mem))
|
||||
|
||||
# Add configurable ulimits from add-on config
|
||||
for name, config in self.addon.ulimits.items():
|
||||
if isinstance(config, int):
|
||||
# Simple format: both soft and hard limits are the same
|
||||
limits.append(Ulimit(name=name, soft=config, hard=config))
|
||||
limits.append(docker.types.Ulimit(name=name, soft=config, hard=config))
|
||||
elif isinstance(config, dict):
|
||||
# Detailed format: both soft and hard limits are mandatory
|
||||
soft = config["soft"]
|
||||
hard = config["hard"]
|
||||
limits.append(Ulimit(name=name, soft=soft, hard=hard))
|
||||
limits.append(docker.types.Ulimit(name=name, soft=soft, hard=hard))
|
||||
|
||||
# Return None if no ulimits are present
|
||||
if limits:
|
||||
@@ -353,7 +351,7 @@ class DockerAddon(DockerInterface):
|
||||
return None
|
||||
|
||||
@property
|
||||
def mounts(self) -> list[DockerMount]:
|
||||
def mounts(self) -> list[Mount]:
|
||||
"""Return mounts for container."""
|
||||
addon_mapping = self.addon.map_volumes
|
||||
|
||||
@@ -363,8 +361,8 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
mounts = [
|
||||
MOUNT_DEV,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.addon.path_extern_data.as_posix(),
|
||||
target=target_data_path or PATH_PRIVATE_DATA.as_posix(),
|
||||
read_only=False,
|
||||
@@ -374,8 +372,8 @@ class DockerAddon(DockerInterface):
|
||||
# setup config mappings
|
||||
if MappingType.CONFIG in addon_mapping:
|
||||
mounts.append(
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target=addon_mapping[MappingType.CONFIG].path
|
||||
or PATH_HOMEASSISTANT_CONFIG_LEGACY.as_posix(),
|
||||
@@ -387,8 +385,8 @@ class DockerAddon(DockerInterface):
|
||||
# Map addon's public config folder if not using deprecated config option
|
||||
if self.addon.addon_config_used:
|
||||
mounts.append(
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.addon.path_extern_config.as_posix(),
|
||||
target=addon_mapping[MappingType.ADDON_CONFIG].path
|
||||
or PATH_PUBLIC_CONFIG.as_posix(),
|
||||
@@ -399,8 +397,8 @@ class DockerAddon(DockerInterface):
|
||||
# Map Home Assistant config in new way
|
||||
if MappingType.HOMEASSISTANT_CONFIG in addon_mapping:
|
||||
mounts.append(
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target=addon_mapping[MappingType.HOMEASSISTANT_CONFIG].path
|
||||
or PATH_HOMEASSISTANT_CONFIG.as_posix(),
|
||||
@@ -412,8 +410,8 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
if MappingType.ALL_ADDON_CONFIGS in addon_mapping:
|
||||
mounts.append(
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_addon_configs.as_posix(),
|
||||
target=addon_mapping[MappingType.ALL_ADDON_CONFIGS].path
|
||||
or PATH_ALL_ADDON_CONFIGS.as_posix(),
|
||||
@@ -423,8 +421,8 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
if MappingType.SSL in addon_mapping:
|
||||
mounts.append(
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target=addon_mapping[MappingType.SSL].path or PATH_SSL.as_posix(),
|
||||
read_only=addon_mapping[MappingType.SSL].read_only,
|
||||
@@ -433,8 +431,8 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
if MappingType.ADDONS in addon_mapping:
|
||||
mounts.append(
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_addons_local.as_posix(),
|
||||
target=addon_mapping[MappingType.ADDONS].path
|
||||
or PATH_LOCAL_ADDONS.as_posix(),
|
||||
@@ -444,8 +442,8 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
if MappingType.BACKUP in addon_mapping:
|
||||
mounts.append(
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_backup.as_posix(),
|
||||
target=addon_mapping[MappingType.BACKUP].path
|
||||
or PATH_BACKUP.as_posix(),
|
||||
@@ -455,25 +453,25 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
if MappingType.SHARE in addon_mapping:
|
||||
mounts.append(
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target=addon_mapping[MappingType.SHARE].path
|
||||
or PATH_SHARE.as_posix(),
|
||||
read_only=addon_mapping[MappingType.SHARE].read_only,
|
||||
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
|
||||
propagation=PropagationMode.RSLAVE,
|
||||
)
|
||||
)
|
||||
|
||||
if MappingType.MEDIA in addon_mapping:
|
||||
mounts.append(
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_media.as_posix(),
|
||||
target=addon_mapping[MappingType.MEDIA].path
|
||||
or PATH_MEDIA.as_posix(),
|
||||
read_only=addon_mapping[MappingType.MEDIA].read_only,
|
||||
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
|
||||
propagation=PropagationMode.RSLAVE,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -485,8 +483,8 @@ class DockerAddon(DockerInterface):
|
||||
if not Path(gpio_path).exists():
|
||||
continue
|
||||
mounts.append(
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=gpio_path,
|
||||
target=gpio_path,
|
||||
read_only=False,
|
||||
@@ -496,8 +494,8 @@ class DockerAddon(DockerInterface):
|
||||
# DeviceTree support
|
||||
if self.addon.with_devicetree:
|
||||
mounts.append(
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source="/sys/firmware/devicetree/base",
|
||||
target="/device-tree",
|
||||
read_only=True,
|
||||
@@ -511,8 +509,8 @@ class DockerAddon(DockerInterface):
|
||||
# Kernel Modules support
|
||||
if self.addon.with_kernel_modules:
|
||||
mounts.append(
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source="/lib/modules",
|
||||
target="/lib/modules",
|
||||
read_only=True,
|
||||
@@ -530,20 +528,20 @@ class DockerAddon(DockerInterface):
|
||||
# Configuration Audio
|
||||
if self.addon.with_audio:
|
||||
mounts += [
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.addon.path_extern_pulse.as_posix(),
|
||||
target="/etc/pulse/client.conf",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
||||
target="/run/audio",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
||||
target="/etc/asound.conf",
|
||||
read_only=True,
|
||||
@@ -553,14 +551,14 @@ class DockerAddon(DockerInterface):
|
||||
# System Journal access
|
||||
if self.addon.with_journald:
|
||||
mounts += [
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
|
||||
target=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
|
||||
target=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
|
||||
read_only=True,
|
||||
@@ -708,9 +706,7 @@ class DockerAddon(DockerInterface):
|
||||
# Remove dangling builder container if it exists by any chance
|
||||
# E.g. because of an abrupt host shutdown/reboot during a build
|
||||
with suppress(docker.errors.NotFound):
|
||||
self.sys_docker.containers_legacy.get(builder_name).remove(
|
||||
force=True, v=True
|
||||
)
|
||||
self.sys_docker.containers.get(builder_name).remove(force=True, v=True)
|
||||
|
||||
# Generate Docker config with registry credentials for base image if needed
|
||||
docker_config_path: Path | None = None
|
||||
@@ -837,7 +833,7 @@ class DockerAddon(DockerInterface):
|
||||
"""
|
||||
try:
|
||||
# Load needed docker objects
|
||||
container = self.sys_docker.containers_legacy.get(self.name)
|
||||
container = self.sys_docker.containers.get(self.name)
|
||||
# attach_socket returns SocketIO for local Docker connections (Unix socket)
|
||||
socket = cast(
|
||||
SocketIO, container.attach_socket(params={"stdin": 1, "stream": 1})
|
||||
@@ -900,7 +896,7 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers_legacy.get, self.name
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
if self._hw_listener:
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
|
||||
import logging
|
||||
|
||||
import docker
|
||||
from docker.types import Mount
|
||||
|
||||
from ..const import DOCKER_CPU_RUNTIME_ALLOCATION
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerJobError
|
||||
@@ -16,9 +19,7 @@ from .const import (
|
||||
MOUNT_UDEV,
|
||||
PATH_PRIVATE_DATA,
|
||||
Capabilities,
|
||||
DockerMount,
|
||||
MountType,
|
||||
Ulimit,
|
||||
)
|
||||
from .interface import DockerInterface
|
||||
|
||||
@@ -41,12 +42,12 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
return AUDIO_DOCKER_NAME
|
||||
|
||||
@property
|
||||
def mounts(self) -> list[DockerMount]:
|
||||
def mounts(self) -> list[Mount]:
|
||||
"""Return mounts for container."""
|
||||
mounts = [
|
||||
MOUNT_DEV,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_audio.as_posix(),
|
||||
target=PATH_PRIVATE_DATA.as_posix(),
|
||||
read_only=False,
|
||||
@@ -74,10 +75,10 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
return [Capabilities.SYS_NICE, Capabilities.SYS_RESOURCE]
|
||||
|
||||
@property
|
||||
def ulimits(self) -> list[Ulimit]:
|
||||
def ulimits(self) -> list[docker.types.Ulimit]:
|
||||
"""Generate ulimits for audio."""
|
||||
# Pulseaudio by default tries to use real-time scheduling with priority of 5.
|
||||
return [Ulimit(name="rtprio", soft=10, hard=10)]
|
||||
return [docker.types.Ulimit(name="rtprio", soft=10, hard=10)]
|
||||
|
||||
@property
|
||||
def cpu_rt_runtime(self) -> int | None:
|
||||
|
||||
@@ -2,13 +2,11 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import suppress
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum, StrEnum
|
||||
from functools import total_ordering
|
||||
from enum import StrEnum
|
||||
from pathlib import PurePath
|
||||
import re
|
||||
from typing import Any, cast
|
||||
|
||||
from docker.types import Mount
|
||||
|
||||
from ..const import MACHINE_ID
|
||||
|
||||
@@ -81,145 +79,33 @@ class PropagationMode(StrEnum):
|
||||
RSLAVE = "rslave"
|
||||
|
||||
|
||||
@total_ordering
|
||||
class PullImageLayerStage(Enum):
|
||||
"""Job stages for pulling an image layer.
|
||||
|
||||
These are a subset of the statuses in a docker pull image log. They
|
||||
are the standardized ones that are the most useful to us.
|
||||
"""
|
||||
|
||||
PULLING_FS_LAYER = 1, "Pulling fs layer"
|
||||
RETRYING_DOWNLOAD = 2, "Retrying download"
|
||||
DOWNLOADING = 2, "Downloading"
|
||||
VERIFYING_CHECKSUM = 3, "Verifying Checksum"
|
||||
DOWNLOAD_COMPLETE = 4, "Download complete"
|
||||
EXTRACTING = 5, "Extracting"
|
||||
PULL_COMPLETE = 6, "Pull complete"
|
||||
|
||||
def __init__(self, order: int, status: str) -> None:
|
||||
"""Set fields from values."""
|
||||
self.order = order
|
||||
self.status = status
|
||||
|
||||
def __eq__(self, value: object, /) -> bool:
|
||||
"""Check equality, allow StrEnum style comparisons on status."""
|
||||
with suppress(AttributeError):
|
||||
return self.status == cast(PullImageLayerStage, value).status
|
||||
return self.status == value
|
||||
|
||||
def __lt__(self, other: object) -> bool:
|
||||
"""Order instances."""
|
||||
with suppress(AttributeError):
|
||||
return self.order < cast(PullImageLayerStage, other).order
|
||||
return False
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Hash instance."""
|
||||
return hash(self.status)
|
||||
|
||||
@classmethod
|
||||
def from_status(cls, status: str) -> PullImageLayerStage | None:
|
||||
"""Return stage instance from pull log status."""
|
||||
for i in cls:
|
||||
if i.status == status:
|
||||
return i
|
||||
|
||||
# This one includes number of seconds until download so its not constant
|
||||
if RE_RETRYING_DOWNLOAD_STATUS.match(status):
|
||||
return cls.RETRYING_DOWNLOAD
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class MountBindOptions:
|
||||
"""Bind options for docker mount."""
|
||||
|
||||
propagation: PropagationMode | None = None
|
||||
read_only_non_recursive: bool | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""To dictionary representation."""
|
||||
out: dict[str, Any] = {}
|
||||
if self.propagation:
|
||||
out["Propagation"] = self.propagation.value
|
||||
if self.read_only_non_recursive is not None:
|
||||
out["ReadOnlyNonRecursive"] = self.read_only_non_recursive
|
||||
return out
|
||||
|
||||
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class DockerMount:
|
||||
"""A docker mount."""
|
||||
|
||||
type: MountType
|
||||
source: str
|
||||
target: str
|
||||
read_only: bool
|
||||
bind_options: MountBindOptions | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""To dictionary representation."""
|
||||
out: dict[str, Any] = {
|
||||
"Type": self.type.value,
|
||||
"Source": self.source,
|
||||
"Target": self.target,
|
||||
"ReadOnly": self.read_only,
|
||||
}
|
||||
if self.bind_options:
|
||||
out["BindOptions"] = self.bind_options.to_dict()
|
||||
return out
|
||||
|
||||
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class Ulimit:
|
||||
"""A linux user limit."""
|
||||
|
||||
name: str
|
||||
soft: int
|
||||
hard: int
|
||||
|
||||
def to_dict(self) -> dict[str, str | int]:
|
||||
"""To dictionary representation."""
|
||||
return {
|
||||
"Name": self.name,
|
||||
"Soft": self.soft,
|
||||
"Hard": self.hard,
|
||||
}
|
||||
|
||||
|
||||
ENV_DUPLICATE_LOG_FILE = "HA_DUPLICATE_LOG_FILE"
|
||||
ENV_TIME = "TZ"
|
||||
ENV_TOKEN = "SUPERVISOR_TOKEN"
|
||||
ENV_TOKEN_OLD = "HASSIO_TOKEN"
|
||||
|
||||
LABEL_MANAGED = "supervisor_managed"
|
||||
|
||||
MOUNT_DBUS = DockerMount(
|
||||
type=MountType.BIND, source="/run/dbus", target="/run/dbus", read_only=True
|
||||
MOUNT_DBUS = Mount(
|
||||
type=MountType.BIND.value, source="/run/dbus", target="/run/dbus", read_only=True
|
||||
)
|
||||
MOUNT_DEV = DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/dev",
|
||||
target="/dev",
|
||||
read_only=True,
|
||||
bind_options=MountBindOptions(read_only_non_recursive=True),
|
||||
MOUNT_DEV = Mount(
|
||||
type=MountType.BIND.value, source="/dev", target="/dev", read_only=True
|
||||
)
|
||||
MOUNT_DOCKER = DockerMount(
|
||||
type=MountType.BIND,
|
||||
MOUNT_DEV.setdefault("BindOptions", {})["ReadOnlyNonRecursive"] = True
|
||||
MOUNT_DOCKER = Mount(
|
||||
type=MountType.BIND.value,
|
||||
source="/run/docker.sock",
|
||||
target="/run/docker.sock",
|
||||
read_only=True,
|
||||
)
|
||||
MOUNT_MACHINE_ID = DockerMount(
|
||||
type=MountType.BIND,
|
||||
MOUNT_MACHINE_ID = Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=MACHINE_ID.as_posix(),
|
||||
target=MACHINE_ID.as_posix(),
|
||||
read_only=True,
|
||||
)
|
||||
MOUNT_UDEV = DockerMount(
|
||||
type=MountType.BIND, source="/run/udev", target="/run/udev", read_only=True
|
||||
MOUNT_UDEV = Mount(
|
||||
type=MountType.BIND.value, source="/run/udev", target="/run/udev", read_only=True
|
||||
)
|
||||
|
||||
PATH_PRIVATE_DATA = PurePath("/data")
|
||||
|
||||
@@ -2,11 +2,13 @@
|
||||
|
||||
import logging
|
||||
|
||||
from docker.types import Mount
|
||||
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerJobError
|
||||
from ..jobs.const import JobConcurrency
|
||||
from ..jobs.decorator import Job
|
||||
from .const import ENV_TIME, MOUNT_DBUS, DockerMount, MountType
|
||||
from .const import ENV_TIME, MOUNT_DBUS, MountType
|
||||
from .interface import DockerInterface
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@@ -45,8 +47,8 @@ class DockerDNS(DockerInterface, CoreSysAttributes):
|
||||
security_opt=self.security_opt,
|
||||
environment={ENV_TIME: self.sys_timezone},
|
||||
mounts=[
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_dns.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
|
||||
@@ -5,6 +5,7 @@ import logging
|
||||
import re
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
from docker.types import Mount
|
||||
|
||||
from ..const import LABEL_MACHINE
|
||||
from ..exceptions import DockerJobError
|
||||
@@ -13,7 +14,6 @@ from ..homeassistant.const import LANDINGPAGE
|
||||
from ..jobs.const import JobConcurrency
|
||||
from ..jobs.decorator import Job
|
||||
from .const import (
|
||||
ENV_DUPLICATE_LOG_FILE,
|
||||
ENV_TIME,
|
||||
ENV_TOKEN,
|
||||
ENV_TOKEN_OLD,
|
||||
@@ -25,8 +25,6 @@ from .const import (
|
||||
PATH_PUBLIC_CONFIG,
|
||||
PATH_SHARE,
|
||||
PATH_SSL,
|
||||
DockerMount,
|
||||
MountBindOptions,
|
||||
MountType,
|
||||
PropagationMode,
|
||||
)
|
||||
@@ -92,15 +90,15 @@ class DockerHomeAssistant(DockerInterface):
|
||||
)
|
||||
|
||||
@property
|
||||
def mounts(self) -> list[DockerMount]:
|
||||
def mounts(self) -> list[Mount]:
|
||||
"""Return mounts for container."""
|
||||
mounts = [
|
||||
MOUNT_DEV,
|
||||
MOUNT_DBUS,
|
||||
MOUNT_UDEV,
|
||||
# HA config folder
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target=PATH_PUBLIC_CONFIG.as_posix(),
|
||||
read_only=False,
|
||||
@@ -112,45 +110,41 @@ class DockerHomeAssistant(DockerInterface):
|
||||
mounts.extend(
|
||||
[
|
||||
# All other folders
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target=PATH_SSL.as_posix(),
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target=PATH_SHARE.as_posix(),
|
||||
read_only=False,
|
||||
bind_options=MountBindOptions(
|
||||
propagation=PropagationMode.RSLAVE
|
||||
),
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_media.as_posix(),
|
||||
target=PATH_MEDIA.as_posix(),
|
||||
read_only=False,
|
||||
bind_options=MountBindOptions(
|
||||
propagation=PropagationMode.RSLAVE
|
||||
),
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
),
|
||||
# Configuration audio
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_homeassistant.path_extern_pulse.as_posix(),
|
||||
target="/etc/pulse/client.conf",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
||||
target="/run/audio",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
||||
target="/etc/asound.conf",
|
||||
read_only=True,
|
||||
@@ -180,8 +174,6 @@ class DockerHomeAssistant(DockerInterface):
|
||||
}
|
||||
if restore_job_id:
|
||||
environment[ENV_RESTORE_JOB_ID] = restore_job_id
|
||||
if self.sys_homeassistant.duplicate_log_file:
|
||||
environment[ENV_DUPLICATE_LOG_FILE] = "1"
|
||||
await self._run(
|
||||
tag=(self.sys_homeassistant.version),
|
||||
name=self.name,
|
||||
@@ -221,20 +213,20 @@ class DockerHomeAssistant(DockerInterface):
|
||||
init=True,
|
||||
entrypoint=[],
|
||||
mounts=[
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target="/ssl",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target="/share",
|
||||
read_only=False,
|
||||
|
||||
@@ -13,13 +13,13 @@ from typing import Any, cast
|
||||
from uuid import uuid4
|
||||
|
||||
import aiodocker
|
||||
import aiohttp
|
||||
from awesomeversion import AwesomeVersion
|
||||
from awesomeversion.strategy import AwesomeVersionStrategy
|
||||
import docker
|
||||
from docker.models.containers import Container
|
||||
import requests
|
||||
|
||||
from ..bus import EventListener
|
||||
from ..const import (
|
||||
ATTR_PASSWORD,
|
||||
ATTR_REGISTRY,
|
||||
@@ -35,25 +35,18 @@ from ..exceptions import (
|
||||
DockerError,
|
||||
DockerHubRateLimitExceeded,
|
||||
DockerJobError,
|
||||
DockerLogOutOfOrder,
|
||||
DockerNotFound,
|
||||
DockerRequestError,
|
||||
)
|
||||
from ..jobs import SupervisorJob
|
||||
from ..jobs.const import JOB_GROUP_DOCKER_INTERFACE, JobConcurrency
|
||||
from ..jobs.decorator import Job
|
||||
from ..jobs.job_group import JobGroup
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import (
|
||||
DOCKER_HUB,
|
||||
DOCKER_HUB_LEGACY,
|
||||
ContainerState,
|
||||
PullImageLayerStage,
|
||||
RestartPolicy,
|
||||
)
|
||||
from .const import DOCKER_HUB, DOCKER_HUB_LEGACY, ContainerState, RestartPolicy
|
||||
from .manager import CommandReturn, PullLogEntry
|
||||
from .monitor import DockerContainerStateEvent
|
||||
from .pull_progress import ImagePullProgress
|
||||
from .stats import DockerStats
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@@ -202,159 +195,6 @@ class DockerInterface(JobGroup, ABC):
|
||||
|
||||
return credentials
|
||||
|
||||
def _process_pull_image_log( # noqa: C901
|
||||
self, install_job_id: str, reference: PullLogEntry
|
||||
) -> None:
|
||||
"""Process events fired from a docker while pulling an image, filtered to a given job id."""
|
||||
if (
|
||||
reference.job_id != install_job_id
|
||||
or not reference.id
|
||||
or not reference.status
|
||||
or not (stage := PullImageLayerStage.from_status(reference.status))
|
||||
):
|
||||
return
|
||||
|
||||
# Pulling FS Layer is our marker for a layer that needs to be downloaded and extracted. Otherwise it already exists and we can ignore
|
||||
job: SupervisorJob | None = None
|
||||
if stage == PullImageLayerStage.PULLING_FS_LAYER:
|
||||
job = self.sys_jobs.new_job(
|
||||
name="Pulling container image layer",
|
||||
initial_stage=stage.status,
|
||||
reference=reference.id,
|
||||
parent_id=install_job_id,
|
||||
internal=True,
|
||||
)
|
||||
job.done = False
|
||||
return
|
||||
|
||||
# Find our sub job to update details of
|
||||
for j in self.sys_jobs.jobs:
|
||||
if j.parent_id == install_job_id and j.reference == reference.id:
|
||||
job = j
|
||||
break
|
||||
|
||||
# There should no longer be any real risk of logs out of order anymore.
|
||||
# However tests with very small images have shown that sometimes Docker
|
||||
# skips stages in log. So keeping this one as a safety check on null job
|
||||
if not job:
|
||||
raise DockerLogOutOfOrder(
|
||||
f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {install_job_id} but could not find a matching job, skipping",
|
||||
_LOGGER.debug,
|
||||
)
|
||||
|
||||
# For progress calculation we assume downloading is 70% of time, extracting is 30% and others stages negligible
|
||||
progress = job.progress
|
||||
match stage:
|
||||
case PullImageLayerStage.DOWNLOADING | PullImageLayerStage.EXTRACTING:
|
||||
if (
|
||||
reference.progress_detail
|
||||
and reference.progress_detail.current
|
||||
and reference.progress_detail.total
|
||||
):
|
||||
progress = (
|
||||
reference.progress_detail.current
|
||||
/ reference.progress_detail.total
|
||||
)
|
||||
if stage == PullImageLayerStage.DOWNLOADING:
|
||||
progress = 70 * progress
|
||||
else:
|
||||
progress = 70 + 30 * progress
|
||||
case (
|
||||
PullImageLayerStage.VERIFYING_CHECKSUM
|
||||
| PullImageLayerStage.DOWNLOAD_COMPLETE
|
||||
):
|
||||
progress = 70
|
||||
case PullImageLayerStage.PULL_COMPLETE:
|
||||
progress = 100
|
||||
case PullImageLayerStage.RETRYING_DOWNLOAD:
|
||||
progress = 0
|
||||
|
||||
# No real risk of getting things out of order in current implementation
|
||||
# but keeping this one in case another change to these trips us up.
|
||||
if stage != PullImageLayerStage.RETRYING_DOWNLOAD and progress < job.progress:
|
||||
raise DockerLogOutOfOrder(
|
||||
f"Received pull image log with status {reference.status} for job {job.uuid} that implied progress was {progress} but current progress is {job.progress}, skipping",
|
||||
_LOGGER.debug,
|
||||
)
|
||||
|
||||
# Our filters have all passed. Time to update the job
|
||||
# Only downloading and extracting have progress details. Use that to set extra
|
||||
# We'll leave it around on later stages as the total bytes may be useful after that stage
|
||||
# Enforce range to prevent float drift error
|
||||
progress = max(0, min(progress, 100))
|
||||
if (
|
||||
stage in {PullImageLayerStage.DOWNLOADING, PullImageLayerStage.EXTRACTING}
|
||||
and reference.progress_detail
|
||||
and reference.progress_detail.current is not None
|
||||
and reference.progress_detail.total is not None
|
||||
):
|
||||
job.update(
|
||||
progress=progress,
|
||||
stage=stage.status,
|
||||
extra={
|
||||
"current": reference.progress_detail.current,
|
||||
"total": reference.progress_detail.total,
|
||||
},
|
||||
)
|
||||
else:
|
||||
# If we reach DOWNLOAD_COMPLETE without ever having set extra (small layers that skip
|
||||
# the downloading phase), set a minimal extra so aggregate progress calculation can proceed
|
||||
extra = job.extra
|
||||
if stage == PullImageLayerStage.DOWNLOAD_COMPLETE and not job.extra:
|
||||
extra = {"current": 1, "total": 1}
|
||||
|
||||
job.update(
|
||||
progress=progress,
|
||||
stage=stage.status,
|
||||
done=stage == PullImageLayerStage.PULL_COMPLETE,
|
||||
extra=None if stage == PullImageLayerStage.RETRYING_DOWNLOAD else extra,
|
||||
)
|
||||
|
||||
# Once we have received a progress update for every child job, start to set status of the main one
|
||||
install_job = self.sys_jobs.get_job(install_job_id)
|
||||
layer_jobs = [
|
||||
job
|
||||
for job in self.sys_jobs.jobs
|
||||
if job.parent_id == install_job.uuid
|
||||
and job.name == "Pulling container image layer"
|
||||
]
|
||||
|
||||
# First set the total bytes to be downloaded/extracted on the main job
|
||||
if not install_job.extra:
|
||||
total = 0
|
||||
for job in layer_jobs:
|
||||
if not job.extra:
|
||||
return
|
||||
total += job.extra["total"]
|
||||
install_job.extra = {"total": total}
|
||||
else:
|
||||
total = install_job.extra["total"]
|
||||
|
||||
# Then determine total progress based on progress of each sub-job, factoring in size of each compared to total
|
||||
progress = 0.0
|
||||
stage = PullImageLayerStage.PULL_COMPLETE
|
||||
for job in layer_jobs:
|
||||
if not job.extra or not job.extra.get("total"):
|
||||
return
|
||||
progress += job.progress * (job.extra["total"] / total)
|
||||
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
|
||||
|
||||
if job_stage < PullImageLayerStage.EXTRACTING:
|
||||
stage = PullImageLayerStage.DOWNLOADING
|
||||
elif (
|
||||
stage == PullImageLayerStage.PULL_COMPLETE
|
||||
and job_stage < PullImageLayerStage.PULL_COMPLETE
|
||||
):
|
||||
stage = PullImageLayerStage.EXTRACTING
|
||||
|
||||
# Ensure progress is 100 at this point to prevent float drift
|
||||
if stage == PullImageLayerStage.PULL_COMPLETE:
|
||||
progress = 100
|
||||
|
||||
# To reduce noise, limit updates to when result has changed by an entire percent or when stage changed
|
||||
if stage != install_job.stage or progress >= install_job.progress + 1:
|
||||
install_job.update(stage=stage.status, progress=max(0, min(progress, 100)))
|
||||
|
||||
@Job(
|
||||
name="docker_interface_install",
|
||||
on_condition=DockerJobError,
|
||||
@@ -374,33 +214,55 @@ class DockerInterface(JobGroup, ABC):
|
||||
raise ValueError("Cannot pull without an image!")
|
||||
|
||||
image_arch = arch or self.sys_arch.supervisor
|
||||
listener: EventListener | None = None
|
||||
platform = MAP_ARCH[image_arch]
|
||||
pull_progress = ImagePullProgress()
|
||||
current_job = self.sys_jobs.current
|
||||
|
||||
# Try to fetch manifest for accurate size-based progress
|
||||
# This is optional - if it fails, we fall back to count-based progress
|
||||
try:
|
||||
manifest = await self.sys_docker.manifest_fetcher.get_manifest(
|
||||
image, str(version), platform=platform
|
||||
)
|
||||
if manifest:
|
||||
pull_progress.set_manifest(manifest)
|
||||
_LOGGER.debug(
|
||||
"Using manifest for progress: %d layers, %d bytes",
|
||||
manifest.layer_count,
|
||||
manifest.total_size,
|
||||
)
|
||||
except (aiohttp.ClientError, TimeoutError) as err:
|
||||
_LOGGER.warning("Could not fetch manifest for progress: %s", err)
|
||||
|
||||
async def process_pull_event(event: PullLogEntry) -> None:
|
||||
"""Process pull event and update job progress."""
|
||||
if event.job_id != current_job.uuid:
|
||||
return
|
||||
|
||||
# Process event through progress tracker
|
||||
pull_progress.process_event(event)
|
||||
|
||||
# Update job if progress changed significantly (>= 1%)
|
||||
should_update, progress = pull_progress.should_update_job()
|
||||
if should_update:
|
||||
stage = pull_progress.get_stage()
|
||||
current_job.update(progress=progress, stage=stage)
|
||||
|
||||
listener = self.sys_bus.register_event(
|
||||
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_event
|
||||
)
|
||||
|
||||
_LOGGER.info("Downloading docker image %s with tag %s.", image, version)
|
||||
try:
|
||||
# Get credentials for private registries to pass to aiodocker
|
||||
credentials = self._get_credentials(image) or None
|
||||
|
||||
curr_job_id = self.sys_jobs.current.uuid
|
||||
|
||||
async def process_pull_image_log(reference: PullLogEntry) -> None:
|
||||
try:
|
||||
self._process_pull_image_log(curr_job_id, reference)
|
||||
except DockerLogOutOfOrder as err:
|
||||
# Send all these to sentry. Missing a few progress updates
|
||||
# shouldn't matter to users but matters to us
|
||||
await async_capture_exception(err)
|
||||
|
||||
listener = self.sys_bus.register_event(
|
||||
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_image_log
|
||||
)
|
||||
|
||||
# Pull new image, passing credentials to aiodocker
|
||||
docker_image = await self.sys_docker.pull_image(
|
||||
self.sys_jobs.current.uuid,
|
||||
current_job.uuid,
|
||||
image,
|
||||
str(version),
|
||||
platform=MAP_ARCH[image_arch],
|
||||
platform=platform,
|
||||
auth=credentials,
|
||||
)
|
||||
|
||||
@@ -445,8 +307,7 @@ class DockerInterface(JobGroup, ABC):
|
||||
f"Unknown error with {image}:{version!s} -> {err!s}", _LOGGER.error
|
||||
) from err
|
||||
finally:
|
||||
if listener:
|
||||
self.sys_bus.remove_listener(listener)
|
||||
self.sys_bus.remove_listener(listener)
|
||||
|
||||
self._meta = docker_image
|
||||
|
||||
@@ -461,7 +322,7 @@ class DockerInterface(JobGroup, ABC):
|
||||
"""Get docker container, returns None if not found."""
|
||||
try:
|
||||
return await self.sys_run_in_executor(
|
||||
self.sys_docker.containers_legacy.get, self.name
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return None
|
||||
@@ -493,7 +354,7 @@ class DockerInterface(JobGroup, ABC):
|
||||
"""Attach to running Docker container."""
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers_legacy.get, self.name
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
self._meta = docker_container.attrs
|
||||
self.sys_docker.monitor.watch_container(docker_container)
|
||||
@@ -533,11 +394,8 @@ class DockerInterface(JobGroup, ABC):
|
||||
"""Run Docker image."""
|
||||
raise NotImplementedError()
|
||||
|
||||
async def _run(self, *, name: str, **kwargs) -> None:
|
||||
"""Run Docker image with retry if necessary."""
|
||||
if not (image := self.image):
|
||||
raise ValueError(f"Cannot determine image to use to run {self.name}!")
|
||||
|
||||
async def _run(self, **kwargs) -> None:
|
||||
"""Run Docker image with retry inf necessary."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
@@ -546,14 +404,16 @@ class DockerInterface(JobGroup, ABC):
|
||||
|
||||
# Create & Run container
|
||||
try:
|
||||
container_metadata = await self.sys_docker.run(image, name=name, **kwargs)
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run, self.image, **kwargs
|
||||
)
|
||||
except DockerNotFound as err:
|
||||
# If image is missing, capture the exception as this shouldn't happen
|
||||
await async_capture_exception(err)
|
||||
raise
|
||||
|
||||
# Store metadata
|
||||
self._meta = container_metadata
|
||||
self._meta = docker_container.attrs
|
||||
|
||||
@Job(
|
||||
name="docker_interface_stop",
|
||||
|
||||
@@ -13,12 +13,10 @@ import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import re
|
||||
from typing import Any, Final, Literal, Self, cast
|
||||
from typing import Any, Final, Self, cast
|
||||
|
||||
import aiodocker
|
||||
from aiodocker.containers import DockerContainers
|
||||
from aiodocker.images import DockerImages
|
||||
from aiodocker.types import JSONObject
|
||||
from aiohttp import ClientSession, ClientTimeout, UnixConnector
|
||||
import attr
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
@@ -51,16 +49,8 @@ from ..exceptions import (
|
||||
)
|
||||
from ..utils.common import FileConfiguration
|
||||
from ..validate import SCHEMA_DOCKER_CONFIG
|
||||
from .const import (
|
||||
DOCKER_HUB,
|
||||
DOCKER_HUB_LEGACY,
|
||||
LABEL_MANAGED,
|
||||
Capabilities,
|
||||
DockerMount,
|
||||
MountType,
|
||||
RestartPolicy,
|
||||
Ulimit,
|
||||
)
|
||||
from .const import DOCKER_HUB, DOCKER_HUB_LEGACY, LABEL_MANAGED
|
||||
from .manifest import RegistryManifestFetcher
|
||||
from .monitor import DockerMonitor
|
||||
from .network import DockerNetwork
|
||||
from .utils import get_registry_from_image
|
||||
@@ -269,6 +259,9 @@ class DockerAPI(CoreSysAttributes):
|
||||
self._info: DockerInfo | None = None
|
||||
self.config: DockerConfig = DockerConfig()
|
||||
self._monitor: DockerMonitor = DockerMonitor(coresys)
|
||||
self._manifest_fetcher: RegistryManifestFetcher = RegistryManifestFetcher(
|
||||
coresys
|
||||
)
|
||||
|
||||
async def post_init(self) -> Self:
|
||||
"""Post init actions that must be done in event loop."""
|
||||
@@ -308,13 +301,8 @@ class DockerAPI(CoreSysAttributes):
|
||||
return self.docker.images
|
||||
|
||||
@property
|
||||
def containers(self) -> DockerContainers:
|
||||
def containers(self) -> ContainerCollection:
|
||||
"""Return API containers."""
|
||||
return self.docker.containers
|
||||
|
||||
@property
|
||||
def containers_legacy(self) -> ContainerCollection:
|
||||
"""Return API containers from Dockerpy."""
|
||||
return self.dockerpy.containers
|
||||
|
||||
@property
|
||||
@@ -339,6 +327,11 @@ class DockerAPI(CoreSysAttributes):
|
||||
"""Return docker events monitor."""
|
||||
return self._monitor
|
||||
|
||||
@property
|
||||
def manifest_fetcher(self) -> RegistryManifestFetcher:
|
||||
"""Return manifest fetcher for registry access."""
|
||||
return self._manifest_fetcher
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Start docker events monitor."""
|
||||
await self.monitor.load()
|
||||
@@ -347,137 +340,50 @@ class DockerAPI(CoreSysAttributes):
|
||||
"""Stop docker events monitor."""
|
||||
await self.monitor.unload()
|
||||
|
||||
def _create_container_config(
|
||||
def run(
|
||||
self,
|
||||
image: str,
|
||||
*,
|
||||
tag: str = "latest",
|
||||
dns: bool = True,
|
||||
init: bool = False,
|
||||
hostname: str | None = None,
|
||||
detach: bool = True,
|
||||
security_opt: list[str] | None = None,
|
||||
restart_policy: dict[str, RestartPolicy] | None = None,
|
||||
extra_hosts: dict[str, IPv4Address] | None = None,
|
||||
environment: dict[str, str | None] | None = None,
|
||||
mounts: list[DockerMount] | None = None,
|
||||
ports: dict[str, str | int | None] | None = None,
|
||||
oom_score_adj: int | None = None,
|
||||
network_mode: Literal["host"] | None = None,
|
||||
privileged: bool = False,
|
||||
device_cgroup_rules: list[str] | None = None,
|
||||
tmpfs: dict[str, str] | None = None,
|
||||
entrypoint: list[str] | None = None,
|
||||
cap_add: list[Capabilities] | None = None,
|
||||
ulimits: list[Ulimit] | None = None,
|
||||
cpu_rt_runtime: int | None = None,
|
||||
stdin_open: bool = False,
|
||||
pid_mode: str | None = None,
|
||||
uts_mode: str | None = None,
|
||||
) -> JSONObject:
|
||||
"""Map kwargs to create container config.
|
||||
ipv4: IPv4Address | None = None,
|
||||
**kwargs: Any,
|
||||
) -> Container:
|
||||
"""Create a Docker container and run it.
|
||||
|
||||
This only covers the docker options we currently use. It is not intended
|
||||
to be exhaustive as its dockerpy equivalent was. We'll add to it as we
|
||||
make use of new feature.
|
||||
Need run inside executor.
|
||||
"""
|
||||
# Set up host dependent config for container
|
||||
host_config: dict[str, Any] = {
|
||||
"NetworkMode": network_mode if network_mode else "default",
|
||||
"Init": init,
|
||||
"Privileged": privileged,
|
||||
}
|
||||
if security_opt:
|
||||
host_config["SecurityOpt"] = security_opt
|
||||
if restart_policy:
|
||||
host_config["RestartPolicy"] = restart_policy
|
||||
if extra_hosts:
|
||||
host_config["ExtraHosts"] = [f"{k}:{v}" for k, v in extra_hosts.items()]
|
||||
if mounts:
|
||||
host_config["Mounts"] = [mount.to_dict() for mount in mounts]
|
||||
if oom_score_adj is not None:
|
||||
host_config["OomScoreAdj"] = oom_score_adj
|
||||
if device_cgroup_rules:
|
||||
host_config["DeviceCgroupRules"] = device_cgroup_rules
|
||||
if tmpfs:
|
||||
host_config["Tmpfs"] = tmpfs
|
||||
if cap_add:
|
||||
host_config["CapAdd"] = cap_add
|
||||
if cpu_rt_runtime is not None:
|
||||
host_config["CPURealtimeRuntime"] = cpu_rt_runtime
|
||||
if pid_mode:
|
||||
host_config["PidMode"] = pid_mode
|
||||
if uts_mode:
|
||||
host_config["UtsMode"] = uts_mode
|
||||
if ulimits:
|
||||
host_config["Ulimits"] = [limit.to_dict() for limit in ulimits]
|
||||
name: str | None = kwargs.get("name")
|
||||
network_mode: str | None = kwargs.get("network_mode")
|
||||
hostname: str | None = kwargs.get("hostname")
|
||||
|
||||
# Full container config
|
||||
config: dict[str, Any] = {
|
||||
"Image": f"{image}:{tag}",
|
||||
"Labels": {LABEL_MANAGED: ""},
|
||||
"OpenStdin": stdin_open,
|
||||
"StdinOnce": not detach and stdin_open,
|
||||
"AttachStdin": not detach and stdin_open,
|
||||
"AttachStdout": not detach,
|
||||
"AttachStderr": not detach,
|
||||
"HostConfig": host_config,
|
||||
}
|
||||
if hostname:
|
||||
config["Hostname"] = hostname
|
||||
if environment:
|
||||
config["Env"] = [
|
||||
env if val is None else f"{env}={val}"
|
||||
for env, val in environment.items()
|
||||
]
|
||||
if entrypoint:
|
||||
config["Entrypoint"] = entrypoint
|
||||
if "labels" not in kwargs:
|
||||
kwargs["labels"] = {}
|
||||
elif isinstance(kwargs["labels"], list):
|
||||
kwargs["labels"] = dict.fromkeys(kwargs["labels"], "")
|
||||
|
||||
# Set up networking
|
||||
kwargs["labels"][LABEL_MANAGED] = ""
|
||||
|
||||
# Setup DNS
|
||||
if dns:
|
||||
host_config["Dns"] = [str(self.network.dns)]
|
||||
host_config["DnsSearch"] = [DNS_SUFFIX]
|
||||
kwargs["dns"] = [str(self.network.dns)]
|
||||
kwargs["dns_search"] = [DNS_SUFFIX]
|
||||
# CoreDNS forward plug-in fails in ~6s, then fallback triggers.
|
||||
# However, the default timeout of glibc and musl is 5s. Increase
|
||||
# default timeout to make sure CoreDNS fallback is working
|
||||
# on first query.
|
||||
host_config["DnsOptions"] = ["timeout:10"]
|
||||
kwargs["dns_opt"] = ["timeout:10"]
|
||||
if hostname:
|
||||
config["Domainname"] = DNS_SUFFIX
|
||||
kwargs["domainname"] = DNS_SUFFIX
|
||||
|
||||
# Setup ports
|
||||
if ports:
|
||||
port_bindings = {
|
||||
port if "/" in port else f"{port}/tcp": [
|
||||
{"HostIp": "", "HostPort": str(host_port) if host_port else ""}
|
||||
]
|
||||
for port, host_port in ports.items()
|
||||
}
|
||||
config["ExposedPorts"] = {port: {} for port in port_bindings}
|
||||
host_config["PortBindings"] = port_bindings
|
||||
|
||||
return config
|
||||
|
||||
async def run(
|
||||
self,
|
||||
image: str,
|
||||
*,
|
||||
name: str,
|
||||
tag: str = "latest",
|
||||
hostname: str | None = None,
|
||||
mounts: list[DockerMount] | None = None,
|
||||
network_mode: Literal["host"] | None = None,
|
||||
ipv4: IPv4Address | None = None,
|
||||
**kwargs,
|
||||
) -> dict[str, Any]:
|
||||
"""Create a Docker container and run it."""
|
||||
if not image or not name:
|
||||
raise ValueError("image, name and tag cannot be an empty string!")
|
||||
# Setup network
|
||||
if not network_mode:
|
||||
kwargs["network"] = None
|
||||
|
||||
# Setup cidfile and bind mount it
|
||||
cidfile_path = self.coresys.config.path_cid_files / f"{name}.cid"
|
||||
cidfile_path = None
|
||||
if name:
|
||||
cidfile_path = self.coresys.config.path_cid_files / f"{name}.cid"
|
||||
|
||||
def create_cidfile() -> None:
|
||||
# Remove the file/directory if it exists e.g. as a leftover from unclean shutdown
|
||||
# Note: Can be a directory if Docker auto-started container with restart policy
|
||||
# before Supervisor could write the CID file
|
||||
@@ -491,37 +397,31 @@ class DockerAPI(CoreSysAttributes):
|
||||
# from creating it as a directory if container auto-starts
|
||||
cidfile_path.touch()
|
||||
|
||||
await self.sys_run_in_executor(create_cidfile)
|
||||
extern_cidfile_path = (
|
||||
self.coresys.config.path_extern_cid_files / f"{name}.cid"
|
||||
)
|
||||
|
||||
# Bind mount to /run/cid in container
|
||||
extern_cidfile_path = self.coresys.config.path_extern_cid_files / f"{name}.cid"
|
||||
cid_mount = DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=extern_cidfile_path.as_posix(),
|
||||
target="/run/cid",
|
||||
read_only=True,
|
||||
)
|
||||
if mounts is None:
|
||||
mounts = [cid_mount]
|
||||
else:
|
||||
mounts = [*mounts, cid_mount]
|
||||
# Bind mount to /run/cid in container
|
||||
if "volumes" not in kwargs:
|
||||
kwargs["volumes"] = {}
|
||||
kwargs["volumes"][str(extern_cidfile_path)] = {
|
||||
"bind": "/run/cid",
|
||||
"mode": "ro",
|
||||
}
|
||||
|
||||
# Create container
|
||||
config = self._create_container_config(
|
||||
image,
|
||||
tag=tag,
|
||||
hostname=hostname,
|
||||
mounts=mounts,
|
||||
network_mode=network_mode,
|
||||
**kwargs,
|
||||
)
|
||||
try:
|
||||
container = await self.containers.create(config, name=name)
|
||||
except aiodocker.DockerError as err:
|
||||
if err.status == HTTPStatus.NOT_FOUND:
|
||||
raise DockerNotFound(
|
||||
f"Image {image}:{tag} does not exist for {name}", _LOGGER.error
|
||||
) from err
|
||||
container = self.containers.create(
|
||||
f"{image}:{tag}", use_config_proxy=False, **kwargs
|
||||
)
|
||||
if cidfile_path:
|
||||
with cidfile_path.open("w", encoding="ascii") as cidfile:
|
||||
cidfile.write(str(container.id))
|
||||
except docker_errors.NotFound as err:
|
||||
raise DockerNotFound(
|
||||
f"Image {image}:{tag} does not exist for {name}", _LOGGER.error
|
||||
) from err
|
||||
except docker_errors.DockerException as err:
|
||||
raise DockerAPIError(
|
||||
f"Can't create container from {name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
@@ -530,61 +430,43 @@ class DockerAPI(CoreSysAttributes):
|
||||
f"Dockerd connection issue for {name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
# Setup network and store container id in cidfile
|
||||
def setup_network_and_cidfile() -> None:
|
||||
# Write cidfile
|
||||
with cidfile_path.open("w", encoding="ascii") as cidfile:
|
||||
cidfile.write(str(container.id))
|
||||
|
||||
# Attach network
|
||||
if not network_mode:
|
||||
alias = [hostname] if hostname else None
|
||||
try:
|
||||
self.network.attach_container(
|
||||
container.id, name, alias=alias, ipv4=ipv4
|
||||
)
|
||||
except DockerError:
|
||||
_LOGGER.warning("Can't attach %s to hassio-network!", name)
|
||||
else:
|
||||
with suppress(DockerError):
|
||||
self.network.detach_default_bridge(container.id, name)
|
||||
# Attach network
|
||||
if not network_mode:
|
||||
alias = [hostname] if hostname else None
|
||||
try:
|
||||
self.network.attach_container(container, alias=alias, ipv4=ipv4)
|
||||
except DockerError:
|
||||
_LOGGER.warning("Can't attach %s to hassio-network!", name)
|
||||
else:
|
||||
host_network: Network = self.dockerpy.networks.get(DOCKER_NETWORK_HOST)
|
||||
with suppress(DockerError):
|
||||
self.network.detach_default_bridge(container)
|
||||
else:
|
||||
host_network: Network = self.dockerpy.networks.get(DOCKER_NETWORK_HOST)
|
||||
|
||||
# Check if container is register on host
|
||||
# https://github.com/moby/moby/issues/23302
|
||||
if name and name in (
|
||||
val.get("Name")
|
||||
for val in host_network.attrs.get("Containers", {}).values()
|
||||
):
|
||||
with suppress(docker_errors.NotFound):
|
||||
host_network.disconnect(name, force=True)
|
||||
|
||||
await self.sys_run_in_executor(setup_network_and_cidfile)
|
||||
# Check if container is register on host
|
||||
# https://github.com/moby/moby/issues/23302
|
||||
if name and name in (
|
||||
val.get("Name")
|
||||
for val in host_network.attrs.get("Containers", {}).values()
|
||||
):
|
||||
with suppress(docker_errors.NotFound):
|
||||
host_network.disconnect(name, force=True)
|
||||
|
||||
# Run container
|
||||
try:
|
||||
await container.start()
|
||||
except aiodocker.DockerError as err:
|
||||
container.start()
|
||||
except docker_errors.DockerException as err:
|
||||
raise DockerAPIError(f"Can't start {name}: {err}", _LOGGER.error) from err
|
||||
except requests.RequestException as err:
|
||||
raise DockerRequestError(
|
||||
f"Dockerd connection issue for {name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
# Get container metadata after the container is started
|
||||
try:
|
||||
container_attrs = await container.show()
|
||||
except aiodocker.DockerError as err:
|
||||
raise DockerAPIError(
|
||||
f"Can't inspect started container {name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
except requests.RequestException as err:
|
||||
raise DockerRequestError(
|
||||
f"Dockerd connection issue for {name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
# Update metadata
|
||||
with suppress(docker_errors.DockerException, requests.RequestException):
|
||||
container.reload()
|
||||
|
||||
return container_attrs
|
||||
return container
|
||||
|
||||
async def pull_image(
|
||||
self,
|
||||
@@ -737,9 +619,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
) -> bool:
|
||||
"""Return True if docker container exists in good state and is built from expected image."""
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.containers_legacy.get, name
|
||||
)
|
||||
docker_container = await self.sys_run_in_executor(self.containers.get, name)
|
||||
docker_image = await self.images.inspect(f"{image}:{version}")
|
||||
except docker_errors.NotFound:
|
||||
return False
|
||||
@@ -768,7 +648,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
) -> None:
|
||||
"""Stop/remove Docker container."""
|
||||
try:
|
||||
docker_container: Container = self.containers_legacy.get(name)
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except docker_errors.NotFound:
|
||||
# Generally suppressed so we don't log this
|
||||
raise DockerNotFound() from None
|
||||
@@ -795,7 +675,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
def start_container(self, name: str) -> None:
|
||||
"""Start Docker container."""
|
||||
try:
|
||||
docker_container: Container = self.containers_legacy.get(name)
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound(
|
||||
f"{name} not found for starting up", _LOGGER.error
|
||||
@@ -814,7 +694,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
def restart_container(self, name: str, timeout: int) -> None:
|
||||
"""Restart docker container."""
|
||||
try:
|
||||
container: Container = self.containers_legacy.get(name)
|
||||
container: Container = self.containers.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound(
|
||||
f"Container {name} not found for restarting", _LOGGER.warning
|
||||
@@ -833,7 +713,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
def container_logs(self, name: str, tail: int = 100) -> bytes:
|
||||
"""Return Docker logs of container."""
|
||||
try:
|
||||
docker_container: Container = self.containers_legacy.get(name)
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound(
|
||||
f"Container {name} not found for logs", _LOGGER.warning
|
||||
@@ -853,7 +733,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
def container_stats(self, name: str) -> dict[str, Any]:
|
||||
"""Read and return stats from container."""
|
||||
try:
|
||||
docker_container: Container = self.containers_legacy.get(name)
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound(
|
||||
f"Container {name} not found for stats", _LOGGER.warning
|
||||
@@ -878,7 +758,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
def container_run_inside(self, name: str, command: str) -> CommandReturn:
|
||||
"""Execute a command inside Docker container."""
|
||||
try:
|
||||
docker_container: Container = self.containers_legacy.get(name)
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound(
|
||||
f"Container {name} not found for running command", _LOGGER.warning
|
||||
|
||||
339
supervisor/docker/manifest.py
Normal file
339
supervisor/docker/manifest.py
Normal file
@@ -0,0 +1,339 @@
|
||||
"""Docker registry manifest fetcher.
|
||||
|
||||
Fetches image manifests directly from container registries to get layer sizes
|
||||
before pulling an image. This enables accurate size-based progress tracking.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
import logging
|
||||
import re
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import aiohttp
|
||||
|
||||
from supervisor.docker.utils import get_registry_from_image
|
||||
|
||||
from .const import DOCKER_HUB, DOCKER_HUB_LEGACY
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..coresys import CoreSys
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
# Media types for manifest requests
|
||||
MANIFEST_MEDIA_TYPES = (
|
||||
"application/vnd.docker.distribution.manifest.v2+json",
|
||||
"application/vnd.oci.image.manifest.v1+json",
|
||||
"application/vnd.docker.distribution.manifest.list.v2+json",
|
||||
"application/vnd.oci.image.index.v1+json",
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageManifest:
|
||||
"""Container image manifest with layer information."""
|
||||
|
||||
digest: str
|
||||
total_size: int
|
||||
layers: dict[str, int] # digest -> size in bytes
|
||||
|
||||
@property
|
||||
def layer_count(self) -> int:
|
||||
"""Return number of layers."""
|
||||
return len(self.layers)
|
||||
|
||||
|
||||
def parse_image_reference(image: str, tag: str) -> tuple[str, str, str]:
|
||||
"""Parse image reference into (registry, repository, tag).
|
||||
|
||||
Examples:
|
||||
ghcr.io/home-assistant/home-assistant:2025.1.0
|
||||
-> (ghcr.io, home-assistant/home-assistant, 2025.1.0)
|
||||
homeassistant/home-assistant:latest
|
||||
-> (registry-1.docker.io, homeassistant/home-assistant, latest)
|
||||
alpine:3.18
|
||||
-> (registry-1.docker.io, library/alpine, 3.18)
|
||||
|
||||
"""
|
||||
# Check if image has explicit registry host
|
||||
registry = get_registry_from_image(image)
|
||||
if registry:
|
||||
repository = image[len(registry) + 1 :] # Remove "registry/" prefix
|
||||
else:
|
||||
registry = DOCKER_HUB
|
||||
repository = image
|
||||
# Docker Hub requires "library/" prefix for official images
|
||||
if "/" not in repository:
|
||||
repository = f"library/{repository}"
|
||||
|
||||
return registry, repository, tag
|
||||
|
||||
|
||||
class RegistryManifestFetcher:
|
||||
"""Fetches manifests from container registries."""
|
||||
|
||||
def __init__(self, coresys: CoreSys) -> None:
|
||||
"""Initialize the fetcher."""
|
||||
self.coresys = coresys
|
||||
|
||||
@property
|
||||
def _session(self) -> aiohttp.ClientSession:
|
||||
"""Return the websession for HTTP requests."""
|
||||
return self.coresys.websession
|
||||
|
||||
def _get_credentials(self, registry: str) -> tuple[str, str] | None:
|
||||
"""Get credentials for registry from Docker config.
|
||||
|
||||
Returns (username, password) tuple or None if no credentials.
|
||||
"""
|
||||
registries = self.coresys.docker.config.registries
|
||||
|
||||
# Map registry hostname to config key
|
||||
# Docker Hub can be stored as "hub.docker.com" in config
|
||||
if registry in (DOCKER_HUB, DOCKER_HUB_LEGACY):
|
||||
if DOCKER_HUB in registries:
|
||||
creds = registries[DOCKER_HUB]
|
||||
return creds.get("username"), creds.get("password")
|
||||
elif registry in registries:
|
||||
creds = registries[registry]
|
||||
return creds.get("username"), creds.get("password")
|
||||
|
||||
return None
|
||||
|
||||
async def _get_auth_token(
|
||||
self,
|
||||
registry: str,
|
||||
repository: str,
|
||||
) -> str | None:
|
||||
"""Get authentication token for registry.
|
||||
|
||||
Uses the WWW-Authenticate header from a 401 response to discover
|
||||
the token endpoint, then requests a token with appropriate scope.
|
||||
"""
|
||||
# First, make an unauthenticated request to get WWW-Authenticate header
|
||||
manifest_url = f"https://{registry}/v2/{repository}/manifests/latest"
|
||||
|
||||
try:
|
||||
async with self._session.get(manifest_url) as resp:
|
||||
if resp.status == 200:
|
||||
# No auth required
|
||||
return None
|
||||
|
||||
if resp.status != 401:
|
||||
_LOGGER.warning(
|
||||
"Unexpected status %d from registry %s", resp.status, registry
|
||||
)
|
||||
return None
|
||||
|
||||
www_auth = resp.headers.get("WWW-Authenticate", "")
|
||||
except aiohttp.ClientError as err:
|
||||
_LOGGER.warning("Failed to connect to registry %s: %s", registry, err)
|
||||
return None
|
||||
|
||||
# Parse WWW-Authenticate: Bearer realm="...",service="...",scope="..."
|
||||
if not www_auth.startswith("Bearer "):
|
||||
_LOGGER.warning("Unsupported auth type from %s: %s", registry, www_auth)
|
||||
return None
|
||||
|
||||
params = {}
|
||||
for match in re.finditer(r'(\w+)="([^"]*)"', www_auth):
|
||||
params[match.group(1)] = match.group(2)
|
||||
|
||||
realm = params.get("realm")
|
||||
service = params.get("service")
|
||||
|
||||
if not realm:
|
||||
_LOGGER.warning("No realm in WWW-Authenticate from %s", registry)
|
||||
return None
|
||||
|
||||
# Build token request URL
|
||||
token_url = f"{realm}?scope=repository:{repository}:pull"
|
||||
if service:
|
||||
token_url += f"&service={service}"
|
||||
|
||||
# Check for credentials
|
||||
auth = None
|
||||
credentials = self._get_credentials(registry)
|
||||
if credentials:
|
||||
username, password = credentials
|
||||
if username and password:
|
||||
auth = aiohttp.BasicAuth(username, password)
|
||||
_LOGGER.debug("Using credentials for %s", registry)
|
||||
|
||||
try:
|
||||
async with self._session.get(token_url, auth=auth) as resp:
|
||||
if resp.status != 200:
|
||||
_LOGGER.warning(
|
||||
"Failed to get token from %s: %d", realm, resp.status
|
||||
)
|
||||
return None
|
||||
|
||||
data = await resp.json()
|
||||
return data.get("token") or data.get("access_token")
|
||||
except aiohttp.ClientError as err:
|
||||
_LOGGER.warning("Failed to get auth token: %s", err)
|
||||
return None
|
||||
|
||||
async def _fetch_manifest(
|
||||
self,
|
||||
registry: str,
|
||||
repository: str,
|
||||
reference: str,
|
||||
token: str | None,
|
||||
platform: str,
|
||||
) -> dict | None:
|
||||
"""Fetch manifest from registry.
|
||||
|
||||
If the manifest is a manifest list (multi-arch), fetches the
|
||||
platform-specific manifest.
|
||||
"""
|
||||
manifest_url = f"https://{registry}/v2/{repository}/manifests/{reference}"
|
||||
|
||||
headers = {"Accept": ", ".join(MANIFEST_MEDIA_TYPES)}
|
||||
if token:
|
||||
headers["Authorization"] = f"Bearer {token}"
|
||||
|
||||
try:
|
||||
async with self._session.get(manifest_url, headers=headers) as resp:
|
||||
if resp.status != 200:
|
||||
_LOGGER.warning(
|
||||
"Failed to fetch manifest for %s/%s:%s - %d",
|
||||
registry,
|
||||
repository,
|
||||
reference,
|
||||
resp.status,
|
||||
)
|
||||
return None
|
||||
|
||||
manifest = await resp.json()
|
||||
except aiohttp.ClientError as err:
|
||||
_LOGGER.warning("Failed to fetch manifest: %s", err)
|
||||
return None
|
||||
|
||||
media_type = manifest.get("mediaType", "")
|
||||
|
||||
# Check if this is a manifest list (multi-arch image)
|
||||
if "list" in media_type or "index" in media_type:
|
||||
manifests = manifest.get("manifests", [])
|
||||
if not manifests:
|
||||
_LOGGER.warning("Empty manifest list for %s/%s", registry, repository)
|
||||
return None
|
||||
|
||||
# Platform format is "linux/amd64", "linux/arm64", etc.
|
||||
parts = platform.split("/")
|
||||
if len(parts) < 2:
|
||||
_LOGGER.warning("Invalid platform format: %s", platform)
|
||||
return None
|
||||
|
||||
target_os, target_arch = parts[0], parts[1]
|
||||
|
||||
platform_manifest = None
|
||||
for m in manifests:
|
||||
plat = m.get("platform", {})
|
||||
if (
|
||||
plat.get("os") == target_os
|
||||
and plat.get("architecture") == target_arch
|
||||
):
|
||||
platform_manifest = m
|
||||
break
|
||||
|
||||
if not platform_manifest:
|
||||
_LOGGER.warning(
|
||||
"Platform %s/%s not found in manifest list for %s/%s, "
|
||||
"cannot use manifest for progress tracking",
|
||||
target_os,
|
||||
target_arch,
|
||||
registry,
|
||||
repository,
|
||||
)
|
||||
return None
|
||||
|
||||
# Fetch the platform-specific manifest
|
||||
return await self._fetch_manifest(
|
||||
registry,
|
||||
repository,
|
||||
platform_manifest["digest"],
|
||||
token,
|
||||
platform,
|
||||
)
|
||||
|
||||
return manifest
|
||||
|
||||
async def get_manifest(
|
||||
self,
|
||||
image: str,
|
||||
tag: str,
|
||||
platform: str,
|
||||
) -> ImageManifest | None:
|
||||
"""Fetch manifest and extract layer sizes.
|
||||
|
||||
Args:
|
||||
image: Image name (e.g., "ghcr.io/home-assistant/home-assistant")
|
||||
tag: Image tag (e.g., "2025.1.0")
|
||||
platform: Target platform (e.g., "linux/amd64")
|
||||
|
||||
Returns:
|
||||
ImageManifest with layer sizes, or None if fetch failed.
|
||||
|
||||
"""
|
||||
registry, repository, tag = parse_image_reference(image, tag)
|
||||
|
||||
_LOGGER.debug(
|
||||
"Fetching manifest for %s/%s:%s (platform=%s)",
|
||||
registry,
|
||||
repository,
|
||||
tag,
|
||||
platform,
|
||||
)
|
||||
|
||||
# Get auth token
|
||||
token = await self._get_auth_token(registry, repository)
|
||||
|
||||
# Fetch manifest
|
||||
manifest = await self._fetch_manifest(
|
||||
registry, repository, tag, token, platform
|
||||
)
|
||||
|
||||
if not manifest:
|
||||
return None
|
||||
|
||||
# Extract layer information
|
||||
layers = manifest.get("layers", [])
|
||||
if not layers:
|
||||
_LOGGER.warning(
|
||||
"No layers in manifest for %s/%s:%s", registry, repository, tag
|
||||
)
|
||||
return None
|
||||
|
||||
layer_sizes: dict[str, int] = {}
|
||||
total_size = 0
|
||||
|
||||
for layer in layers:
|
||||
digest = layer.get("digest", "")
|
||||
size = layer.get("size", 0)
|
||||
if digest and size:
|
||||
# Store by short digest (first 12 chars after sha256:)
|
||||
short_digest = (
|
||||
digest.split(":")[1][:12] if ":" in digest else digest[:12]
|
||||
)
|
||||
layer_sizes[short_digest] = size
|
||||
total_size += size
|
||||
|
||||
digest = manifest.get("config", {}).get("digest", "")
|
||||
|
||||
_LOGGER.debug(
|
||||
"Manifest for %s/%s:%s - %d layers, %d bytes total",
|
||||
registry,
|
||||
repository,
|
||||
tag,
|
||||
len(layer_sizes),
|
||||
total_size,
|
||||
)
|
||||
|
||||
return ImageManifest(
|
||||
digest=digest,
|
||||
total_size=total_size,
|
||||
layers=layer_sizes,
|
||||
)
|
||||
@@ -7,6 +7,7 @@ import logging
|
||||
from typing import Self, cast
|
||||
|
||||
import docker
|
||||
from docker.models.containers import Container
|
||||
from docker.models.networks import Network
|
||||
import requests
|
||||
|
||||
@@ -219,8 +220,7 @@ class DockerNetwork:
|
||||
|
||||
def attach_container(
|
||||
self,
|
||||
container_id: str,
|
||||
name: str,
|
||||
container: Container,
|
||||
alias: list[str] | None = None,
|
||||
ipv4: IPv4Address | None = None,
|
||||
) -> None:
|
||||
@@ -233,15 +233,15 @@ class DockerNetwork:
|
||||
self.network.reload()
|
||||
|
||||
# Check stale Network
|
||||
if name in (
|
||||
if container.name and container.name in (
|
||||
val.get("Name") for val in self.network.attrs.get("Containers", {}).values()
|
||||
):
|
||||
self.stale_cleanup(name)
|
||||
self.stale_cleanup(container.name)
|
||||
|
||||
# Attach Network
|
||||
try:
|
||||
self.network.connect(
|
||||
container_id, aliases=alias, ipv4_address=str(ipv4) if ipv4 else None
|
||||
container, aliases=alias, ipv4_address=str(ipv4) if ipv4 else None
|
||||
)
|
||||
except (
|
||||
docker.errors.NotFound,
|
||||
@@ -250,7 +250,7 @@ class DockerNetwork:
|
||||
requests.RequestException,
|
||||
) as err:
|
||||
raise DockerError(
|
||||
f"Can't connect {name} to Supervisor network: {err}",
|
||||
f"Can't connect {container.name} to Supervisor network: {err}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
@@ -274,20 +274,17 @@ class DockerNetwork:
|
||||
) as err:
|
||||
raise DockerError(f"Can't find {name}: {err}", _LOGGER.error) from err
|
||||
|
||||
if not (container_id := container.id):
|
||||
raise DockerError(f"Received invalid metadata from docker for {name}")
|
||||
if container.id not in self.containers:
|
||||
self.attach_container(container, alias, ipv4)
|
||||
|
||||
if container_id not in self.containers:
|
||||
self.attach_container(container_id, name, alias, ipv4)
|
||||
|
||||
def detach_default_bridge(self, container_id: str, name: str) -> None:
|
||||
def detach_default_bridge(self, container: Container) -> None:
|
||||
"""Detach default Docker bridge.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
default_network = self.docker.networks.get(DOCKER_NETWORK_DRIVER)
|
||||
default_network.disconnect(container_id)
|
||||
default_network.disconnect(container)
|
||||
except docker.errors.NotFound:
|
||||
pass
|
||||
except (
|
||||
@@ -296,7 +293,7 @@ class DockerNetwork:
|
||||
requests.RequestException,
|
||||
) as err:
|
||||
raise DockerError(
|
||||
f"Can't disconnect {name} from default network: {err}",
|
||||
f"Can't disconnect {container.name} from default network: {err}",
|
||||
_LOGGER.warning,
|
||||
) from err
|
||||
|
||||
|
||||
368
supervisor/docker/pull_progress.py
Normal file
368
supervisor/docker/pull_progress.py
Normal file
@@ -0,0 +1,368 @@
|
||||
"""Image pull progress tracking."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import suppress
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .manager import PullLogEntry
|
||||
from .manifest import ImageManifest
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
# Progress weight distribution: 70% downloading, 30% extraction
|
||||
DOWNLOAD_WEIGHT = 70.0
|
||||
EXTRACT_WEIGHT = 30.0
|
||||
|
||||
|
||||
class LayerPullStatus(Enum):
|
||||
"""Status values for pulling an image layer.
|
||||
|
||||
These are a subset of the statuses in a docker pull image log.
|
||||
The order field allows comparing which stage is further along.
|
||||
"""
|
||||
|
||||
PULLING_FS_LAYER = 1, "Pulling fs layer"
|
||||
WAITING = 1, "Waiting"
|
||||
RETRYING = 2, "Retrying" # Matches "Retrying in N seconds"
|
||||
DOWNLOADING = 3, "Downloading"
|
||||
VERIFYING_CHECKSUM = 4, "Verifying Checksum"
|
||||
DOWNLOAD_COMPLETE = 5, "Download complete"
|
||||
EXTRACTING = 6, "Extracting"
|
||||
PULL_COMPLETE = 7, "Pull complete"
|
||||
ALREADY_EXISTS = 7, "Already exists"
|
||||
|
||||
def __init__(self, order: int, status: str) -> None:
|
||||
"""Set fields from values."""
|
||||
self.order = order
|
||||
self.status = status
|
||||
|
||||
def __eq__(self, value: object, /) -> bool:
|
||||
"""Check equality, allow string comparisons on status."""
|
||||
with suppress(AttributeError):
|
||||
return self.status == cast(LayerPullStatus, value).status
|
||||
return self.status == value
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Return hash based on status string."""
|
||||
return hash(self.status)
|
||||
|
||||
def __lt__(self, other: object) -> bool:
|
||||
"""Order instances by stage progression."""
|
||||
with suppress(AttributeError):
|
||||
return self.order < cast(LayerPullStatus, other).order
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def from_status(cls, status: str) -> LayerPullStatus | None:
|
||||
"""Get enum from status string, or None if not recognized."""
|
||||
# Handle "Retrying in N seconds" pattern
|
||||
if status.startswith("Retrying in "):
|
||||
return cls.RETRYING
|
||||
for member in cls:
|
||||
if member.status == status:
|
||||
return member
|
||||
return None
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayerProgress:
|
||||
"""Track progress of a single layer."""
|
||||
|
||||
layer_id: str
|
||||
total_size: int = 0 # Size in bytes (from downloading, reused for extraction)
|
||||
download_current: int = 0
|
||||
extract_current: int = 0 # Extraction progress in bytes (overlay2 only)
|
||||
download_complete: bool = False
|
||||
extract_complete: bool = False
|
||||
already_exists: bool = False # Layer was already locally available
|
||||
|
||||
def calculate_progress(self) -> float:
|
||||
"""Calculate layer progress 0-100.
|
||||
|
||||
Progress is weighted: 70% download, 30% extraction.
|
||||
For overlay2, we have byte-based extraction progress.
|
||||
For containerd, extraction jumps from 70% to 100% on completion.
|
||||
"""
|
||||
if self.already_exists or self.extract_complete:
|
||||
return 100.0
|
||||
|
||||
if self.download_complete:
|
||||
# Check if we have extraction progress (overlay2)
|
||||
if self.extract_current > 0 and self.total_size > 0:
|
||||
extract_pct = min(1.0, self.extract_current / self.total_size)
|
||||
return DOWNLOAD_WEIGHT + (extract_pct * EXTRACT_WEIGHT)
|
||||
# No extraction progress yet - return 70%
|
||||
return DOWNLOAD_WEIGHT
|
||||
|
||||
if self.total_size > 0:
|
||||
download_pct = min(1.0, self.download_current / self.total_size)
|
||||
return download_pct * DOWNLOAD_WEIGHT
|
||||
|
||||
return 0.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImagePullProgress:
|
||||
"""Track overall progress of pulling an image.
|
||||
|
||||
When manifest layer sizes are provided, uses size-weighted progress where
|
||||
each layer contributes proportionally to its size. This gives accurate
|
||||
progress based on actual bytes to download.
|
||||
|
||||
When manifest is not available, falls back to count-based progress where
|
||||
each layer contributes equally.
|
||||
|
||||
Layers that already exist locally are excluded from the progress calculation.
|
||||
"""
|
||||
|
||||
layers: dict[str, LayerProgress] = field(default_factory=dict)
|
||||
_last_reported_progress: float = field(default=0.0, repr=False)
|
||||
_seen_downloading: bool = field(default=False, repr=False)
|
||||
_manifest_layer_sizes: dict[str, int] = field(default_factory=dict, repr=False)
|
||||
_total_manifest_size: int = field(default=0, repr=False)
|
||||
|
||||
def set_manifest(self, manifest: ImageManifest) -> None:
|
||||
"""Set manifest layer sizes for accurate size-based progress.
|
||||
|
||||
Should be called before processing pull events.
|
||||
"""
|
||||
self._manifest_layer_sizes = dict(manifest.layers)
|
||||
self._total_manifest_size = manifest.total_size
|
||||
_LOGGER.debug(
|
||||
"Manifest set: %d layers, %d bytes total",
|
||||
len(self._manifest_layer_sizes),
|
||||
self._total_manifest_size,
|
||||
)
|
||||
|
||||
def get_or_create_layer(self, layer_id: str) -> LayerProgress:
|
||||
"""Get existing layer or create new one."""
|
||||
if layer_id not in self.layers:
|
||||
# If we have manifest sizes, pre-populate the layer's total_size
|
||||
manifest_size = self._manifest_layer_sizes.get(layer_id, 0)
|
||||
self.layers[layer_id] = LayerProgress(
|
||||
layer_id=layer_id, total_size=manifest_size
|
||||
)
|
||||
return self.layers[layer_id]
|
||||
|
||||
def process_event(self, entry: PullLogEntry) -> None:
|
||||
"""Process a pull log event and update layer state."""
|
||||
# Skip events without layer ID or status
|
||||
if not entry.id or not entry.status:
|
||||
return
|
||||
|
||||
# Skip metadata events that aren't layer-specific
|
||||
# "Pulling from X" has id=tag but isn't a layer
|
||||
if entry.status.startswith("Pulling from "):
|
||||
return
|
||||
|
||||
# Parse status to enum (returns None for unrecognized statuses)
|
||||
status = LayerPullStatus.from_status(entry.status)
|
||||
if status is None:
|
||||
return
|
||||
|
||||
layer = self.get_or_create_layer(entry.id)
|
||||
|
||||
# Handle "Already exists" - layer is locally available
|
||||
if status is LayerPullStatus.ALREADY_EXISTS:
|
||||
layer.already_exists = True
|
||||
layer.download_complete = True
|
||||
layer.extract_complete = True
|
||||
return
|
||||
|
||||
# Handle "Pulling fs layer" / "Waiting" - layer is being tracked
|
||||
if status in (LayerPullStatus.PULLING_FS_LAYER, LayerPullStatus.WAITING):
|
||||
return
|
||||
|
||||
# Handle "Downloading" - update download progress
|
||||
if status is LayerPullStatus.DOWNLOADING:
|
||||
# Mark that we've seen downloading - now we know layer count is complete
|
||||
self._seen_downloading = True
|
||||
if entry.progress_detail and entry.progress_detail.current is not None:
|
||||
layer.download_current = entry.progress_detail.current
|
||||
if entry.progress_detail and entry.progress_detail.total is not None:
|
||||
# Only set total_size if not already set or if this is larger
|
||||
# (handles case where total changes during download)
|
||||
layer.total_size = max(layer.total_size, entry.progress_detail.total)
|
||||
return
|
||||
|
||||
# Handle "Verifying Checksum" - download is essentially complete
|
||||
if status is LayerPullStatus.VERIFYING_CHECKSUM:
|
||||
if layer.total_size > 0:
|
||||
layer.download_current = layer.total_size
|
||||
return
|
||||
|
||||
# Handle "Download complete" - download phase done
|
||||
if status is LayerPullStatus.DOWNLOAD_COMPLETE:
|
||||
layer.download_complete = True
|
||||
if layer.total_size > 0:
|
||||
layer.download_current = layer.total_size
|
||||
elif layer.total_size == 0:
|
||||
# Small layer that skipped downloading phase
|
||||
# Set minimal size so it doesn't distort weighted average
|
||||
layer.total_size = 1
|
||||
layer.download_current = 1
|
||||
return
|
||||
|
||||
# Handle "Extracting" - extraction in progress
|
||||
if status is LayerPullStatus.EXTRACTING:
|
||||
# For overlay2: progressDetail has {current, total} in bytes
|
||||
# For containerd: progressDetail has {current, units: "s"} (time elapsed)
|
||||
# We can only use byte-based progress (overlay2)
|
||||
layer.download_complete = True
|
||||
if layer.total_size > 0:
|
||||
layer.download_current = layer.total_size
|
||||
|
||||
# Check if this is byte-based extraction progress (overlay2)
|
||||
# Overlay2 has {current, total} in bytes, no units field
|
||||
# Containerd has {current, units: "s"} which is useless for progress
|
||||
if (
|
||||
entry.progress_detail
|
||||
and entry.progress_detail.current is not None
|
||||
and entry.progress_detail.units is None
|
||||
):
|
||||
# Use layer's total_size from downloading phase (doesn't change)
|
||||
layer.extract_current = entry.progress_detail.current
|
||||
_LOGGER.debug(
|
||||
"Layer %s extracting: %d/%d (%.1f%%)",
|
||||
layer.layer_id,
|
||||
layer.extract_current,
|
||||
layer.total_size,
|
||||
(layer.extract_current / layer.total_size * 100)
|
||||
if layer.total_size > 0
|
||||
else 0,
|
||||
)
|
||||
return
|
||||
|
||||
# Handle "Pull complete" - layer is fully done
|
||||
if status is LayerPullStatus.PULL_COMPLETE:
|
||||
layer.download_complete = True
|
||||
layer.extract_complete = True
|
||||
if layer.total_size > 0:
|
||||
layer.download_current = layer.total_size
|
||||
return
|
||||
|
||||
# Handle "Retrying in N seconds" - reset download progress
|
||||
if status is LayerPullStatus.RETRYING:
|
||||
layer.download_current = 0
|
||||
layer.download_complete = False
|
||||
return
|
||||
|
||||
def calculate_progress(self) -> float:
|
||||
"""Calculate overall progress 0-100.
|
||||
|
||||
When manifest layer sizes are available, uses size-weighted progress
|
||||
where each layer contributes proportionally to its size.
|
||||
|
||||
When manifest is not available, falls back to count-based progress
|
||||
where each layer contributes equally.
|
||||
|
||||
Layers that already exist locally are excluded from the calculation.
|
||||
|
||||
Returns 0 until we've seen the first "Downloading" event, since Docker
|
||||
reports "Already exists" and "Pulling fs layer" events before we know
|
||||
the complete layer count.
|
||||
"""
|
||||
# Don't report progress until we've seen downloading start
|
||||
# This ensures we know the full layer count before calculating progress
|
||||
if not self._seen_downloading or not self.layers:
|
||||
return 0.0
|
||||
|
||||
# Only count layers that need pulling (exclude already_exists)
|
||||
layers_to_pull = [
|
||||
layer for layer in self.layers.values() if not layer.already_exists
|
||||
]
|
||||
|
||||
if not layers_to_pull:
|
||||
# All layers already exist, nothing to download
|
||||
return 100.0
|
||||
|
||||
# Use size-weighted progress if manifest sizes are available
|
||||
if self._manifest_layer_sizes:
|
||||
return min(100, self._calculate_size_weighted_progress(layers_to_pull))
|
||||
|
||||
# Fall back to count-based progress
|
||||
total_progress = sum(layer.calculate_progress() for layer in layers_to_pull)
|
||||
return min(100, total_progress / len(layers_to_pull))
|
||||
|
||||
def _calculate_size_weighted_progress(
|
||||
self, layers_to_pull: list[LayerProgress]
|
||||
) -> float:
|
||||
"""Calculate size-weighted progress.
|
||||
|
||||
Each layer contributes to progress proportionally to its size.
|
||||
Progress = sum(layer_progress * layer_size) / total_size
|
||||
"""
|
||||
# Calculate total size of layers that need pulling
|
||||
total_size = sum(layer.total_size for layer in layers_to_pull)
|
||||
|
||||
if total_size == 0:
|
||||
# No size info available, fall back to count-based
|
||||
total_progress = sum(layer.calculate_progress() for layer in layers_to_pull)
|
||||
return total_progress / len(layers_to_pull)
|
||||
|
||||
# Weight each layer's progress by its size
|
||||
weighted_progress = 0.0
|
||||
for layer in layers_to_pull:
|
||||
if layer.total_size > 0:
|
||||
layer_weight = layer.total_size / total_size
|
||||
weighted_progress += layer.calculate_progress() * layer_weight
|
||||
|
||||
return weighted_progress
|
||||
|
||||
def get_stage(self) -> str | None:
|
||||
"""Get current stage based on layer states."""
|
||||
if not self.layers:
|
||||
return None
|
||||
|
||||
# Check if any layer is still downloading
|
||||
for layer in self.layers.values():
|
||||
if layer.already_exists:
|
||||
continue
|
||||
if not layer.download_complete:
|
||||
return "Downloading"
|
||||
|
||||
# All downloads complete, check if extracting
|
||||
for layer in self.layers.values():
|
||||
if layer.already_exists:
|
||||
continue
|
||||
if not layer.extract_complete:
|
||||
return "Extracting"
|
||||
|
||||
# All done
|
||||
return "Pull complete"
|
||||
|
||||
def should_update_job(self, threshold: float = 1.0) -> tuple[bool, float]:
|
||||
"""Check if job should be updated based on progress change.
|
||||
|
||||
Returns (should_update, current_progress).
|
||||
Updates are triggered when progress changes by at least threshold%.
|
||||
Progress is guaranteed to only increase (monotonic).
|
||||
"""
|
||||
current_progress = self.calculate_progress()
|
||||
|
||||
# Ensure monotonic progress - never report a decrease
|
||||
# This can happen when new layers get size info and change the weighted average
|
||||
if current_progress < self._last_reported_progress:
|
||||
_LOGGER.debug(
|
||||
"Progress decreased from %.1f%% to %.1f%%, keeping last reported",
|
||||
self._last_reported_progress,
|
||||
current_progress,
|
||||
)
|
||||
return False, self._last_reported_progress
|
||||
|
||||
if current_progress >= self._last_reported_progress + threshold:
|
||||
_LOGGER.debug(
|
||||
"Progress update: %.1f%% -> %.1f%% (delta: %.1f%%)",
|
||||
self._last_reported_progress,
|
||||
current_progress,
|
||||
current_progress - self._last_reported_progress,
|
||||
)
|
||||
self._last_reported_progress = current_progress
|
||||
return True, current_progress
|
||||
|
||||
return False, self._last_reported_progress
|
||||
@@ -54,7 +54,7 @@ class DockerSupervisor(DockerInterface):
|
||||
"""Attach to running docker container."""
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers_legacy.get, self.name
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
@@ -74,8 +74,7 @@ class DockerSupervisor(DockerInterface):
|
||||
_LOGGER.info("Connecting Supervisor to hassio-network")
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.network.attach_container,
|
||||
docker_container.id,
|
||||
self.name,
|
||||
docker_container,
|
||||
alias=["supervisor"],
|
||||
ipv4=self.sys_docker.network.supervisor,
|
||||
)
|
||||
@@ -91,7 +90,7 @@ class DockerSupervisor(DockerInterface):
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers_legacy.get(self.name)
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Could not get Supervisor container for retag: {err}", _LOGGER.error
|
||||
@@ -119,7 +118,7 @@ class DockerSupervisor(DockerInterface):
|
||||
"""Update start tag to new version."""
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers_legacy.get, self.name
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
docker_image = await self.sys_docker.images.inspect(f"{image}:{version!s}")
|
||||
except (
|
||||
|
||||
@@ -855,10 +855,6 @@ class DockerNotFound(DockerError):
|
||||
"""Docker object don't Exists."""
|
||||
|
||||
|
||||
class DockerLogOutOfOrder(DockerError):
|
||||
"""Raise when log from docker action was out of order."""
|
||||
|
||||
|
||||
class DockerNoSpaceOnDevice(DockerError):
|
||||
"""Raise if a docker pull fails due to available space."""
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
SECONDS_BETWEEN_API_CHECKS: Final[int] = 5
|
||||
# Core Stage 1 and some wiggle room
|
||||
STARTUP_API_RESPONSE_TIMEOUT: Final[timedelta] = timedelta(minutes=10)
|
||||
STARTUP_API_RESPONSE_TIMEOUT: Final[timedelta] = timedelta(minutes=3)
|
||||
# All stages plus event start timeout and some wiggle rooom
|
||||
STARTUP_API_CHECK_RUNNING_TIMEOUT: Final[timedelta] = timedelta(minutes=15)
|
||||
# While database migration is running, the timeout will be extended
|
||||
|
||||
@@ -23,7 +23,6 @@ from ..const import (
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE,
|
||||
ATTR_BOOT,
|
||||
ATTR_DUPLICATE_LOG_FILE,
|
||||
ATTR_IMAGE,
|
||||
ATTR_MESSAGE,
|
||||
ATTR_PORT,
|
||||
@@ -300,16 +299,6 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
"""Set whether backups should exclude database by default."""
|
||||
self._data[ATTR_BACKUPS_EXCLUDE_DATABASE] = value
|
||||
|
||||
@property
|
||||
def duplicate_log_file(self) -> bool:
|
||||
"""Return True if Home Assistant should duplicate logs to file."""
|
||||
return self._data[ATTR_DUPLICATE_LOG_FILE]
|
||||
|
||||
@duplicate_log_file.setter
|
||||
def duplicate_log_file(self, value: bool) -> None:
|
||||
"""Set whether Home Assistant should duplicate logs to file."""
|
||||
self._data[ATTR_DUPLICATE_LOG_FILE] = value
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Prepare Home Assistant object."""
|
||||
await asyncio.wait(
|
||||
@@ -508,7 +497,7 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
tmp_dir=self.sys_config.path_tmp,
|
||||
)
|
||||
else:
|
||||
remove_folder(self.sys_config.path_homeassistant, content_only=True)
|
||||
remove_folder(self.sys_config.path_homeassistant)
|
||||
|
||||
try:
|
||||
shutil.copytree(
|
||||
|
||||
@@ -10,7 +10,6 @@ from ..const import (
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE,
|
||||
ATTR_BOOT,
|
||||
ATTR_DUPLICATE_LOG_FILE,
|
||||
ATTR_IMAGE,
|
||||
ATTR_PORT,
|
||||
ATTR_REFRESH_TOKEN,
|
||||
@@ -37,7 +36,6 @@ SCHEMA_HASS_CONFIG = vol.Schema(
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT, default=None): vol.Maybe(str),
|
||||
vol.Optional(ATTR_AUDIO_INPUT, default=None): vol.Maybe(str),
|
||||
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_DUPLICATE_LOG_FILE, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_OVERRIDE_IMAGE, default=False): vol.Boolean(),
|
||||
},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
|
||||
@@ -74,9 +74,7 @@ class EvaluateContainer(EvaluateBase):
|
||||
self._images.clear()
|
||||
|
||||
try:
|
||||
containers = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers_legacy.list
|
||||
)
|
||||
containers = await self.sys_run_in_executor(self.sys_docker.containers.list)
|
||||
except (DockerException, RequestException) as err:
|
||||
_LOGGER.error("Corrupt docker overlayfs detect: %s", err)
|
||||
self.sys_resolution.create_issue(
|
||||
|
||||
@@ -227,7 +227,7 @@ async def test_listener_attached_on_install(
|
||||
container_collection.get.side_effect = DockerException()
|
||||
with (
|
||||
patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers_legacy",
|
||||
"supervisor.docker.manager.DockerAPI.containers",
|
||||
new=PropertyMock(return_value=container_collection),
|
||||
),
|
||||
patch("pathlib.Path.is_dir", return_value=True),
|
||||
@@ -527,7 +527,7 @@ async def test_backup_with_pre_command_error(
|
||||
exc_type_raised: type[HassioError],
|
||||
) -> None:
|
||||
"""Test backing up an addon with error running pre command."""
|
||||
coresys.docker.containers_legacy.get.side_effect = container_get_side_effect
|
||||
coresys.docker.containers.get.side_effect = container_get_side_effect
|
||||
container.exec_run.side_effect = exec_run_side_effect
|
||||
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
|
||||
@@ -679,7 +679,7 @@ async def test_addon_write_stdin_not_supported_error(api_client: TestClient):
|
||||
async def test_addon_rebuild_fails_error(api_client: TestClient, coresys: CoreSys):
|
||||
"""Test error when build fails during rebuild for addon."""
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
coresys.docker.containers_legacy.run.side_effect = DockerException("fail")
|
||||
coresys.docker.containers.run.side_effect = DockerException("fail")
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
|
||||
@@ -1201,8 +1201,10 @@ async def test_restore_homeassistant_adds_env(
|
||||
|
||||
assert docker.containers.create.call_args.kwargs["name"] == "homeassistant"
|
||||
assert (
|
||||
f"SUPERVISOR_RESTORE_JOB_ID={job.uuid}"
|
||||
in docker.containers.create.call_args.args[0]["Env"]
|
||||
docker.containers.create.call_args.kwargs["environment"][
|
||||
"SUPERVISOR_RESTORE_JOB_ID"
|
||||
]
|
||||
== job.uuid
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -35,9 +35,9 @@ async def test_api_core_logs(
|
||||
|
||||
async def test_api_stats(api_client: TestClient, coresys: CoreSys):
|
||||
"""Test stats."""
|
||||
coresys.docker.containers_legacy.get.return_value.status = "running"
|
||||
coresys.docker.containers_legacy.get.return_value.stats.return_value = (
|
||||
load_json_fixture("container_stats.json")
|
||||
coresys.docker.containers.get.return_value.status = "running"
|
||||
coresys.docker.containers.get.return_value.stats.return_value = load_json_fixture(
|
||||
"container_stats.json"
|
||||
)
|
||||
|
||||
resp = await api_client.get("/homeassistant/stats")
|
||||
@@ -138,14 +138,14 @@ async def test_api_rebuild(
|
||||
await api_client.post("/homeassistant/rebuild")
|
||||
|
||||
assert container.remove.call_count == 2
|
||||
coresys.docker.containers.create.return_value.start.assert_called_once()
|
||||
container.start.assert_called_once()
|
||||
assert not safe_mode_marker.exists()
|
||||
|
||||
with patch.object(HomeAssistantCore, "_block_till_run"):
|
||||
await api_client.post("/homeassistant/rebuild", json={"safe_mode": True})
|
||||
|
||||
assert container.remove.call_count == 4
|
||||
assert coresys.docker.containers.create.return_value.start.call_count == 2
|
||||
assert container.start.call_count == 2
|
||||
assert safe_mode_marker.exists()
|
||||
|
||||
|
||||
@@ -305,6 +305,8 @@ async def test_api_progress_updates_home_assistant_update(
|
||||
and evt.args[0]["data"]["event"] == WSEvent.JOB
|
||||
and evt.args[0]["data"]["data"]["name"] == "home_assistant_core_update"
|
||||
]
|
||||
# Count-based progress: 2 layers need pulling (each worth 50%)
|
||||
# Layers that already exist are excluded from progress calculation
|
||||
assert events[:5] == [
|
||||
{
|
||||
"stage": None,
|
||||
@@ -318,36 +320,36 @@ async def test_api_progress_updates_home_assistant_update(
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0.1,
|
||||
"progress": 9.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 1.7,
|
||||
"progress": 25.6,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 4.0,
|
||||
"progress": 35.4,
|
||||
"done": False,
|
||||
},
|
||||
]
|
||||
assert events[-5:] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 95.5,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 96.9,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 99.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 100,
|
||||
|
||||
@@ -761,6 +761,8 @@ async def test_api_progress_updates_addon_install_update(
|
||||
and evt.args[0]["data"]["data"]["name"] == job_name
|
||||
and evt.args[0]["data"]["data"]["reference"] == addon_slug
|
||||
]
|
||||
# Count-based progress: 2 layers need pulling (each worth 50%)
|
||||
# Layers that already exist are excluded from progress calculation
|
||||
assert events[:4] == [
|
||||
{
|
||||
"stage": None,
|
||||
@@ -769,36 +771,36 @@ async def test_api_progress_updates_addon_install_update(
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0.1,
|
||||
"progress": 9.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 1.7,
|
||||
"progress": 25.6,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 4.0,
|
||||
"progress": 35.4,
|
||||
"done": False,
|
||||
},
|
||||
]
|
||||
assert events[-5:] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 95.5,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 96.9,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 99.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 100,
|
||||
|
||||
@@ -359,6 +359,8 @@ async def test_api_progress_updates_supervisor_update(
|
||||
and evt.args[0]["data"]["event"] == WSEvent.JOB
|
||||
and evt.args[0]["data"]["data"]["name"] == "supervisor_update"
|
||||
]
|
||||
# Count-based progress: 2 layers need pulling (each worth 50%)
|
||||
# Layers that already exist are excluded from progress calculation
|
||||
assert events[:4] == [
|
||||
{
|
||||
"stage": None,
|
||||
@@ -367,36 +369,36 @@ async def test_api_progress_updates_supervisor_update(
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0.1,
|
||||
"progress": 9.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 1.7,
|
||||
"progress": 25.6,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 4.0,
|
||||
"progress": 35.4,
|
||||
"done": False,
|
||||
},
|
||||
]
|
||||
assert events[-5:] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 95.5,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 96.9,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 99.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 100,
|
||||
@@ -412,9 +414,9 @@ async def test_api_progress_updates_supervisor_update(
|
||||
|
||||
async def test_api_supervisor_stats(api_client: TestClient, coresys: CoreSys):
|
||||
"""Test supervisor stats."""
|
||||
coresys.docker.containers_legacy.get.return_value.status = "running"
|
||||
coresys.docker.containers_legacy.get.return_value.stats.return_value = (
|
||||
load_json_fixture("container_stats.json")
|
||||
coresys.docker.containers.get.return_value.status = "running"
|
||||
coresys.docker.containers.get.return_value.stats.return_value = load_json_fixture(
|
||||
"container_stats.json"
|
||||
)
|
||||
|
||||
resp = await api_client.get("/supervisor/stats")
|
||||
@@ -430,7 +432,7 @@ async def test_supervisor_api_stats_failure(
|
||||
api_client: TestClient, coresys: CoreSys, caplog: pytest.LogCaptureFixture
|
||||
):
|
||||
"""Test supervisor stats failure."""
|
||||
coresys.docker.containers_legacy.get.side_effect = DockerException("fail")
|
||||
coresys.docker.containers.get.side_effect = DockerException("fail")
|
||||
|
||||
resp = await api_client.get("/supervisor/stats")
|
||||
assert resp.status == 500
|
||||
|
||||
@@ -9,7 +9,6 @@ import subprocess
|
||||
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
|
||||
from uuid import uuid4
|
||||
|
||||
from aiodocker.containers import DockerContainer, DockerContainers
|
||||
from aiodocker.docker import DockerImages
|
||||
from aiohttp import ClientSession, web
|
||||
from aiohttp.test_utils import TestClient
|
||||
@@ -121,13 +120,11 @@ async def docker() -> DockerAPI:
|
||||
"Id": "test123",
|
||||
"RepoTags": ["ghcr.io/home-assistant/amd64-hassio-supervisor:latest"],
|
||||
}
|
||||
container_inspect = image_inspect | {"State": {"ExitCode": 0}}
|
||||
|
||||
with (
|
||||
patch("supervisor.docker.manager.DockerClient", return_value=MagicMock()),
|
||||
patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers_legacy",
|
||||
return_value=MagicMock(),
|
||||
"supervisor.docker.manager.DockerAPI.containers", return_value=MagicMock()
|
||||
),
|
||||
patch("supervisor.docker.manager.DockerAPI.api", return_value=MagicMock()),
|
||||
patch("supervisor.docker.manager.DockerAPI.info", return_value=MagicMock()),
|
||||
@@ -139,12 +136,6 @@ async def docker() -> DockerAPI:
|
||||
return_value=(docker_images := MagicMock(spec=DockerImages))
|
||||
),
|
||||
),
|
||||
patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers",
|
||||
new=PropertyMock(
|
||||
return_value=(docker_containers := MagicMock(spec=DockerContainers))
|
||||
),
|
||||
),
|
||||
):
|
||||
docker_obj = await DockerAPI(MagicMock()).post_init()
|
||||
docker_obj.config._data = {"registries": {}}
|
||||
@@ -156,19 +147,16 @@ async def docker() -> DockerAPI:
|
||||
docker_images.import_image = AsyncMock(
|
||||
return_value=[{"stream": "Loaded image: test:latest\n"}]
|
||||
)
|
||||
docker_images.pull.return_value = AsyncIterator([{}])
|
||||
|
||||
docker_containers.get.return_value = docker_container = MagicMock(
|
||||
spec=DockerContainer
|
||||
)
|
||||
docker_containers.list.return_value = [docker_container]
|
||||
docker_containers.create.return_value = docker_container
|
||||
docker_container.show.return_value = container_inspect
|
||||
docker_images.pull.return_value = AsyncIterator([{}])
|
||||
|
||||
docker_obj.info.logging = "journald"
|
||||
docker_obj.info.storage = "overlay2"
|
||||
docker_obj.info.version = AwesomeVersion("1.0.0")
|
||||
|
||||
# Mock manifest fetcher to return None (falls back to count-based progress)
|
||||
docker_obj._manifest_fetcher.get_manifest = AsyncMock(return_value=None)
|
||||
|
||||
yield docker_obj
|
||||
|
||||
|
||||
@@ -805,7 +793,7 @@ async def docker_logs(docker: DockerAPI, supervisor_name) -> MagicMock:
|
||||
"""Mock log output for a container from docker."""
|
||||
container_mock = MagicMock()
|
||||
container_mock.logs.return_value = load_binary_fixture("logs_docker_container.txt")
|
||||
docker.containers_legacy.get.return_value = container_mock
|
||||
docker.containers.get.return_value = container_mock
|
||||
yield container_mock.logs
|
||||
|
||||
|
||||
@@ -839,7 +827,7 @@ async def os_available(request: pytest.FixtureRequest) -> None:
|
||||
@pytest.fixture
|
||||
async def mount_propagation(docker: DockerAPI, coresys: CoreSys) -> None:
|
||||
"""Mock supervisor connected to container with propagation set."""
|
||||
docker.containers_legacy.get.return_value = supervisor = MagicMock()
|
||||
docker.containers.get.return_value = supervisor = MagicMock()
|
||||
supervisor.attrs = {
|
||||
"Mounts": [
|
||||
{
|
||||
@@ -859,11 +847,10 @@ async def mount_propagation(docker: DockerAPI, coresys: CoreSys) -> None:
|
||||
@pytest.fixture
|
||||
async def container(docker: DockerAPI) -> MagicMock:
|
||||
"""Mock attrs and status for container on attach."""
|
||||
attrs = {"State": {"ExitCode": 0}}
|
||||
docker.containers_legacy.get.return_value = addon = MagicMock(
|
||||
status="stopped", attrs=attrs
|
||||
)
|
||||
docker.containers.create.return_value.show.return_value = attrs
|
||||
docker.containers.get.return_value = addon = MagicMock()
|
||||
docker.containers.create.return_value = addon
|
||||
addon.status = "stopped"
|
||||
addon.attrs = {"State": {"ExitCode": 0}}
|
||||
yield addon
|
||||
|
||||
|
||||
|
||||
@@ -1,12 +1,7 @@
|
||||
"""Docker tests."""
|
||||
|
||||
from supervisor.docker.const import DockerMount, MountBindOptions, MountType
|
||||
from docker.types import Mount
|
||||
|
||||
# dev mount with equivalent of bind-recursive=writable specified via dict value
|
||||
DEV_MOUNT = DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/dev",
|
||||
target="/dev",
|
||||
read_only=True,
|
||||
bind_options=MountBindOptions(read_only_non_recursive=True),
|
||||
)
|
||||
DEV_MOUNT = Mount(type="bind", source="/dev", target="/dev", read_only=True)
|
||||
DEV_MOUNT["BindOptions"] = {"ReadOnlyNonRecursive": True}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
"""Test docker addon setup."""
|
||||
|
||||
import asyncio
|
||||
from http import HTTPStatus
|
||||
from ipaddress import IPv4Address
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import MagicMock, Mock, PropertyMock, patch
|
||||
|
||||
import aiodocker
|
||||
from docker.errors import NotFound
|
||||
from docker.types import Mount
|
||||
import pytest
|
||||
|
||||
from supervisor.addons import validate as vd
|
||||
@@ -18,12 +18,6 @@ from supervisor.const import BusEvent
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.dbus.agent.cgroup import CGroup
|
||||
from supervisor.docker.addon import DockerAddon
|
||||
from supervisor.docker.const import (
|
||||
DockerMount,
|
||||
MountBindOptions,
|
||||
MountType,
|
||||
PropagationMode,
|
||||
)
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
from supervisor.exceptions import CoreDNSError, DockerNotFound
|
||||
from supervisor.hardware.data import Device
|
||||
@@ -86,8 +80,8 @@ def test_base_volumes_included(
|
||||
|
||||
# Data added as rw
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=docker_addon.addon.path_extern_data.as_posix(),
|
||||
target="/data",
|
||||
read_only=False,
|
||||
@@ -105,8 +99,8 @@ def test_addon_map_folder_defaults(
|
||||
)
|
||||
# Config added and is marked rw
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
@@ -116,8 +110,8 @@ def test_addon_map_folder_defaults(
|
||||
|
||||
# SSL added and defaults to ro
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_ssl.as_posix(),
|
||||
target="/ssl",
|
||||
read_only=True,
|
||||
@@ -127,30 +121,30 @@ def test_addon_map_folder_defaults(
|
||||
|
||||
# Media added and propagation set
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_media.as_posix(),
|
||||
target="/media",
|
||||
read_only=True,
|
||||
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
|
||||
propagation="rslave",
|
||||
)
|
||||
in docker_addon.mounts
|
||||
)
|
||||
|
||||
# Share added and propagation set
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_share.as_posix(),
|
||||
target="/share",
|
||||
read_only=True,
|
||||
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
|
||||
propagation="rslave",
|
||||
)
|
||||
in docker_addon.mounts
|
||||
)
|
||||
|
||||
# Backup not added
|
||||
assert "/backup" not in [mount.target for mount in docker_addon.mounts]
|
||||
assert "/backup" not in [mount["Target"] for mount in docker_addon.mounts]
|
||||
|
||||
|
||||
def test_addon_map_homeassistant_folder(
|
||||
@@ -163,8 +157,8 @@ def test_addon_map_homeassistant_folder(
|
||||
|
||||
# Home Assistant config folder mounted to /homeassistant, not /config
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_homeassistant.as_posix(),
|
||||
target="/homeassistant",
|
||||
read_only=True,
|
||||
@@ -183,8 +177,8 @@ def test_addon_map_addon_configs_folder(
|
||||
|
||||
# Addon configs folder included
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_addon_configs.as_posix(),
|
||||
target="/addon_configs",
|
||||
read_only=True,
|
||||
@@ -203,8 +197,8 @@ def test_addon_map_addon_config_folder(
|
||||
|
||||
# Addon config folder included
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=docker_addon.addon.path_extern_config.as_posix(),
|
||||
target="/config",
|
||||
read_only=True,
|
||||
@@ -226,8 +220,8 @@ def test_addon_map_addon_config_folder_with_custom_target(
|
||||
|
||||
# Addon config folder included
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=docker_addon.addon.path_extern_config.as_posix(),
|
||||
target="/custom/target/path",
|
||||
read_only=False,
|
||||
@@ -246,8 +240,8 @@ def test_addon_map_data_folder_with_custom_target(
|
||||
|
||||
# Addon config folder included
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=docker_addon.addon.path_extern_data.as_posix(),
|
||||
target="/custom/data/path",
|
||||
read_only=False,
|
||||
@@ -266,8 +260,8 @@ def test_addon_ignore_on_config_map(
|
||||
|
||||
# Config added and is marked rw
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
@@ -277,10 +271,11 @@ def test_addon_ignore_on_config_map(
|
||||
|
||||
# Mount for addon's specific config folder omitted since config in map field
|
||||
assert (
|
||||
len([mount for mount in docker_addon.mounts if mount.target == "/config"]) == 1
|
||||
len([mount for mount in docker_addon.mounts if mount["Target"] == "/config"])
|
||||
== 1
|
||||
)
|
||||
# Home Assistant mount omitted since config in map field
|
||||
assert "/homeassistant" not in [mount.target for mount in docker_addon.mounts]
|
||||
assert "/homeassistant" not in [mount["Target"] for mount in docker_addon.mounts]
|
||||
|
||||
|
||||
def test_journald_addon(
|
||||
@@ -292,8 +287,8 @@ def test_journald_addon(
|
||||
)
|
||||
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source="/var/log/journal",
|
||||
target="/var/log/journal",
|
||||
read_only=True,
|
||||
@@ -301,8 +296,8 @@ def test_journald_addon(
|
||||
in docker_addon.mounts
|
||||
)
|
||||
assert (
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source="/run/log/journal",
|
||||
target="/run/log/journal",
|
||||
read_only=True,
|
||||
@@ -319,7 +314,7 @@ def test_not_journald_addon(
|
||||
coresys, addonsdata_system, "basic-addon-config.json"
|
||||
)
|
||||
|
||||
assert "/var/log/journal" not in [mount.target for mount in docker_addon.mounts]
|
||||
assert "/var/log/journal" not in [mount["Target"] for mount in docker_addon.mounts]
|
||||
|
||||
|
||||
async def test_addon_run_docker_error(
|
||||
@@ -330,9 +325,7 @@ async def test_addon_run_docker_error(
|
||||
):
|
||||
"""Test docker error when addon is run."""
|
||||
await coresys.dbus.timedate.connect(coresys.dbus.bus)
|
||||
coresys.docker.containers.create.side_effect = aiodocker.DockerError(
|
||||
HTTPStatus.NOT_FOUND, {"message": "missing"}
|
||||
)
|
||||
coresys.docker.containers.create.side_effect = NotFound("Missing")
|
||||
docker_addon = get_docker_addon(
|
||||
coresys, addonsdata_system, "basic-addon-config.json"
|
||||
)
|
||||
|
||||
@@ -2,24 +2,22 @@
|
||||
|
||||
from ipaddress import IPv4Address
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from docker.types import Mount
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import DockerMount, MountType, Ulimit
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
|
||||
from . import DEV_MOUNT
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("path_extern")
|
||||
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, container: MagicMock):
|
||||
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, path_extern):
|
||||
"""Test starting audio plugin."""
|
||||
config_file = tmp_supervisor_data / "audio" / "pulse_audio.json"
|
||||
assert not config_file.exists()
|
||||
|
||||
with patch.object(DockerAPI, "run", return_value=container.attrs) as run:
|
||||
with patch.object(DockerAPI, "run") as run:
|
||||
await coresys.plugins.audio.start()
|
||||
|
||||
run.assert_called_once()
|
||||
@@ -28,31 +26,21 @@ async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, container: Mag
|
||||
assert run.call_args.kwargs["hostname"] == "hassio-audio"
|
||||
assert run.call_args.kwargs["cap_add"] == ["SYS_NICE", "SYS_RESOURCE"]
|
||||
assert run.call_args.kwargs["ulimits"] == [
|
||||
Ulimit(name="rtprio", soft=10, hard=10)
|
||||
{"Name": "rtprio", "Soft": 10, "Hard": 10}
|
||||
]
|
||||
|
||||
assert run.call_args.kwargs["mounts"] == [
|
||||
DEV_MOUNT,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_audio.as_posix(),
|
||||
target="/data",
|
||||
read_only=False,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/dbus",
|
||||
target="/run/dbus",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/udev",
|
||||
target="/run/udev",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(type="bind", source="/run/dbus", target="/run/dbus", read_only=True),
|
||||
Mount(type="bind", source="/run/udev", target="/run/udev", read_only=True),
|
||||
Mount(
|
||||
type="bind",
|
||||
source="/etc/machine-id",
|
||||
target="/etc/machine-id",
|
||||
read_only=True,
|
||||
|
||||
@@ -2,22 +2,20 @@
|
||||
|
||||
from ipaddress import IPv4Address
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from docker.types import Mount
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import DockerMount, MountType
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("path_extern")
|
||||
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, container: MagicMock):
|
||||
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, path_extern):
|
||||
"""Test starting dns plugin."""
|
||||
config_file = tmp_supervisor_data / "dns" / "coredns.json"
|
||||
assert not config_file.exists()
|
||||
|
||||
with patch.object(DockerAPI, "run", return_value=container.attrs) as run:
|
||||
with patch.object(DockerAPI, "run") as run:
|
||||
await coresys.plugins.dns.start()
|
||||
|
||||
run.assert_called_once()
|
||||
@@ -27,18 +25,13 @@ async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, container: Mag
|
||||
assert run.call_args.kwargs["dns"] is False
|
||||
assert run.call_args.kwargs["oom_score_adj"] == -300
|
||||
assert run.call_args.kwargs["mounts"] == [
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_dns.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/dbus",
|
||||
target="/run/dbus",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(type="bind", source="/run/dbus", target="/run/dbus", read_only=True),
|
||||
]
|
||||
assert "volumes" not in run.call_args.kwargs
|
||||
|
||||
|
||||
@@ -1,18 +1,13 @@
|
||||
"""Test Home Assistant container."""
|
||||
|
||||
from ipaddress import IPv4Address
|
||||
from pathlib import Path
|
||||
from unittest.mock import ANY, MagicMock, patch
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
import pytest
|
||||
from docker.types import Mount
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import (
|
||||
DockerMount,
|
||||
MountBindOptions,
|
||||
MountType,
|
||||
PropagationMode,
|
||||
)
|
||||
from supervisor.docker.homeassistant import DockerHomeAssistant
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
from supervisor.homeassistant.const import LANDINGPAGE
|
||||
@@ -20,13 +15,14 @@ from supervisor.homeassistant.const import LANDINGPAGE
|
||||
from . import DEV_MOUNT
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
|
||||
async def test_homeassistant_start(coresys: CoreSys, container: MagicMock):
|
||||
async def test_homeassistant_start(
|
||||
coresys: CoreSys, tmp_supervisor_data: Path, path_extern
|
||||
):
|
||||
"""Test starting homeassistant."""
|
||||
coresys.homeassistant.version = AwesomeVersion("2023.8.1")
|
||||
|
||||
with (
|
||||
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
|
||||
patch.object(DockerAPI, "run") as run,
|
||||
patch.object(
|
||||
DockerHomeAssistant, "is_running", side_effect=[False, False, True]
|
||||
),
|
||||
@@ -50,68 +46,57 @@ async def test_homeassistant_start(coresys: CoreSys, container: MagicMock):
|
||||
"TZ": ANY,
|
||||
"SUPERVISOR_TOKEN": ANY,
|
||||
"HASSIO_TOKEN": ANY,
|
||||
# no "HA_DUPLICATE_LOG_FILE"
|
||||
}
|
||||
assert run.call_args.kwargs["mounts"] == [
|
||||
DEV_MOUNT,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/dbus",
|
||||
target="/run/dbus",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/udev",
|
||||
target="/run/udev",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(type="bind", source="/run/dbus", target="/run/dbus", read_only=True),
|
||||
Mount(type="bind", source="/run/udev", target="/run/udev", read_only=True),
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_ssl.as_posix(),
|
||||
target="/ssl",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_share.as_posix(),
|
||||
target="/share",
|
||||
read_only=False,
|
||||
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
|
||||
propagation="rslave",
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_media.as_posix(),
|
||||
target="/media",
|
||||
read_only=False,
|
||||
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
|
||||
propagation="rslave",
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.homeassistant.path_extern_pulse.as_posix(),
|
||||
target="/etc/pulse/client.conf",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.plugins.audio.path_extern_pulse.as_posix(),
|
||||
target="/run/audio",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.plugins.audio.path_extern_asound.as_posix(),
|
||||
target="/etc/asound.conf",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source="/etc/machine-id",
|
||||
target="/etc/machine-id",
|
||||
read_only=True,
|
||||
@@ -120,36 +105,14 @@ async def test_homeassistant_start(coresys: CoreSys, container: MagicMock):
|
||||
assert "volumes" not in run.call_args.kwargs
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
|
||||
async def test_homeassistant_start_with_duplicate_log_file(
|
||||
coresys: CoreSys, container: MagicMock
|
||||
async def test_landingpage_start(
|
||||
coresys: CoreSys, tmp_supervisor_data: Path, path_extern
|
||||
):
|
||||
"""Test starting homeassistant with duplicate_log_file enabled."""
|
||||
coresys.homeassistant.version = AwesomeVersion("2025.12.0")
|
||||
coresys.homeassistant.duplicate_log_file = True
|
||||
|
||||
with (
|
||||
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
|
||||
patch.object(
|
||||
DockerHomeAssistant, "is_running", side_effect=[False, False, True]
|
||||
),
|
||||
patch("supervisor.homeassistant.core.asyncio.sleep"),
|
||||
):
|
||||
await coresys.homeassistant.core.start()
|
||||
|
||||
run.assert_called_once()
|
||||
env = run.call_args.kwargs["environment"]
|
||||
assert "HA_DUPLICATE_LOG_FILE" in env
|
||||
assert env["HA_DUPLICATE_LOG_FILE"] == "1"
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
|
||||
async def test_landingpage_start(coresys: CoreSys, container: MagicMock):
|
||||
"""Test starting landingpage."""
|
||||
coresys.homeassistant.version = LANDINGPAGE
|
||||
|
||||
with (
|
||||
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
|
||||
patch.object(DockerAPI, "run") as run,
|
||||
patch.object(DockerHomeAssistant, "is_running", return_value=False),
|
||||
):
|
||||
await coresys.homeassistant.core.start()
|
||||
@@ -170,30 +133,19 @@ async def test_landingpage_start(coresys: CoreSys, container: MagicMock):
|
||||
"TZ": ANY,
|
||||
"SUPERVISOR_TOKEN": ANY,
|
||||
"HASSIO_TOKEN": ANY,
|
||||
# no "HA_DUPLICATE_LOG_FILE"
|
||||
}
|
||||
assert run.call_args.kwargs["mounts"] == [
|
||||
DEV_MOUNT,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/dbus",
|
||||
target="/run/dbus",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/udev",
|
||||
target="/run/udev",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(type="bind", source="/run/dbus", target="/run/dbus", read_only=True),
|
||||
Mount(type="bind", source="/run/udev", target="/run/udev", read_only=True),
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source="/etc/machine-id",
|
||||
target="/etc/machine-id",
|
||||
read_only=True,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Test Docker interface."""
|
||||
|
||||
import asyncio
|
||||
from http import HTTPStatus
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, PropertyMock, call, patch
|
||||
@@ -149,7 +148,7 @@ async def test_current_state(
|
||||
container_collection = MagicMock()
|
||||
container_collection.get.return_value = Container(attrs)
|
||||
with patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers_legacy",
|
||||
"supervisor.docker.manager.DockerAPI.containers",
|
||||
new=PropertyMock(return_value=container_collection),
|
||||
):
|
||||
assert await coresys.homeassistant.core.instance.current_state() == expected
|
||||
@@ -159,7 +158,7 @@ async def test_current_state_failures(coresys: CoreSys):
|
||||
"""Test failure states for current state."""
|
||||
container_collection = MagicMock()
|
||||
with patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers_legacy",
|
||||
"supervisor.docker.manager.DockerAPI.containers",
|
||||
new=PropertyMock(return_value=container_collection),
|
||||
):
|
||||
container_collection.get.side_effect = NotFound("dne")
|
||||
@@ -212,7 +211,7 @@ async def test_attach_existing_container(
|
||||
container_collection.get.return_value = Container(attrs)
|
||||
with (
|
||||
patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers_legacy",
|
||||
"supervisor.docker.manager.DockerAPI.containers",
|
||||
new=PropertyMock(return_value=container_collection),
|
||||
),
|
||||
patch.object(type(coresys.bus), "fire_event") as fire_event,
|
||||
@@ -254,7 +253,7 @@ async def test_attach_existing_container(
|
||||
|
||||
async def test_attach_container_failure(coresys: CoreSys):
|
||||
"""Test attach fails to find container but finds image."""
|
||||
coresys.docker.containers_legacy.get.side_effect = DockerException()
|
||||
coresys.docker.containers.get.side_effect = DockerException()
|
||||
coresys.docker.images.inspect.return_value.setdefault("Config", {})["Image"] = (
|
||||
"sha256:abc123"
|
||||
)
|
||||
@@ -272,7 +271,7 @@ async def test_attach_container_failure(coresys: CoreSys):
|
||||
|
||||
async def test_attach_total_failure(coresys: CoreSys):
|
||||
"""Test attach fails to find container or image."""
|
||||
coresys.docker.containers_legacy.get.side_effect = DockerException
|
||||
coresys.docker.containers.get.side_effect = DockerException
|
||||
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
|
||||
400, {"message": ""}
|
||||
)
|
||||
@@ -305,10 +304,8 @@ async def test_run_missing_image(
|
||||
tmp_supervisor_data: Path,
|
||||
):
|
||||
"""Test run captures the exception when image is missing."""
|
||||
coresys.docker.containers.create.side_effect = [
|
||||
aiodocker.DockerError(HTTPStatus.NOT_FOUND, {"message": "missing"}),
|
||||
MagicMock(),
|
||||
]
|
||||
coresys.docker.containers.create.side_effect = [NotFound("missing"), MagicMock()]
|
||||
container.status = "stopped"
|
||||
install_addon_ssh.data["image"] = "test_image"
|
||||
|
||||
with pytest.raises(DockerNotFound):
|
||||
@@ -727,11 +724,18 @@ async def test_install_progress_handles_layers_skipping_download(
|
||||
await install_task
|
||||
await event.wait()
|
||||
|
||||
# First update from layer download should have rather low progress ((260937/25445459) / 2 ~ 0.5%)
|
||||
assert install_job_snapshots[0]["progress"] < 1
|
||||
# With the new progress calculation approach:
|
||||
# - Progress is weighted by layer size
|
||||
# - Small layers that skip downloading get minimal size (1 byte)
|
||||
# - Progress should increase monotonically
|
||||
assert len(install_job_snapshots) > 0
|
||||
|
||||
# Total 8 events should lead to a progress update on the install job
|
||||
assert len(install_job_snapshots) == 8
|
||||
# Verify progress is monotonically increasing (or stable)
|
||||
for i in range(1, len(install_job_snapshots)):
|
||||
assert (
|
||||
install_job_snapshots[i]["progress"]
|
||||
>= install_job_snapshots[i - 1]["progress"]
|
||||
)
|
||||
|
||||
# Job should complete successfully
|
||||
assert job.done is True
|
||||
@@ -867,24 +871,24 @@ async def test_install_progress_containerd_snapshot(
|
||||
}
|
||||
|
||||
assert [c.args[0] for c in ha_ws_client.async_send_command.call_args_list] == [
|
||||
# During downloading we get continuous progress updates from download status
|
||||
# Count-based progress: 2 layers, each = 50%. Download = 0-35%, Extract = 35-50%
|
||||
job_event(0),
|
||||
job_event(1.7),
|
||||
job_event(3.4),
|
||||
job_event(8.5),
|
||||
job_event(8.4),
|
||||
job_event(10.2),
|
||||
job_event(15.3),
|
||||
job_event(18.8),
|
||||
job_event(29.0),
|
||||
job_event(35.8),
|
||||
job_event(42.6),
|
||||
job_event(49.5),
|
||||
job_event(56.0),
|
||||
job_event(62.8),
|
||||
# Downloading phase is considered 70% of total. After we only get one update
|
||||
# per image downloaded when extraction is finished. It uses the total size
|
||||
# received during downloading to determine percent complete then.
|
||||
job_event(15.2),
|
||||
job_event(18.7),
|
||||
job_event(28.8),
|
||||
job_event(35.7),
|
||||
job_event(42.4),
|
||||
job_event(49.3),
|
||||
job_event(55.8),
|
||||
job_event(62.7),
|
||||
# Downloading phase is considered 70% of layer's progress.
|
||||
# After download complete, extraction takes remaining 30% per layer.
|
||||
job_event(70.0),
|
||||
job_event(84.8),
|
||||
job_event(85.0),
|
||||
job_event(100),
|
||||
job_event(100, True),
|
||||
]
|
||||
|
||||
@@ -4,7 +4,6 @@ import asyncio
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
from aiodocker.containers import DockerContainer
|
||||
from docker.errors import APIError, DockerException, NotFound
|
||||
import pytest
|
||||
from requests import RequestException
|
||||
@@ -140,38 +139,40 @@ async def test_run_command_custom_stdout_stderr(docker: DockerAPI):
|
||||
assert result.output == b"output"
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
|
||||
async def test_run_container_with_cidfile(coresys: CoreSys, docker: DockerAPI):
|
||||
async def test_run_container_with_cidfile(
|
||||
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
|
||||
):
|
||||
"""Test container creation with cidfile and bind mount."""
|
||||
# Mock container
|
||||
mock_container = MagicMock(spec=DockerContainer, id="test_container_id_12345")
|
||||
mock_container.show.return_value = mock_metadata = {"Id": mock_container.id}
|
||||
mock_container = MagicMock()
|
||||
mock_container.id = "test_container_id_12345"
|
||||
|
||||
container_name = "test_container"
|
||||
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
|
||||
extern_cidfile_path = coresys.config.path_extern_cid_files / f"{container_name}.cid"
|
||||
|
||||
docker.containers.create.return_value = mock_container
|
||||
docker.dockerpy.containers.run.return_value = mock_container
|
||||
|
||||
# Mock container creation
|
||||
with patch.object(
|
||||
docker.containers, "create", return_value=mock_container
|
||||
) as create_mock:
|
||||
# Execute run with a container name
|
||||
result = await docker.run("test_image", tag="latest", name=container_name)
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda kwrgs: docker.run(**kwrgs),
|
||||
{"image": "test_image", "tag": "latest", "name": container_name},
|
||||
)
|
||||
|
||||
# Check the container creation parameters
|
||||
create_mock.assert_called_once()
|
||||
create_config = create_mock.call_args.args[0]
|
||||
kwargs = create_mock.call_args[1]
|
||||
|
||||
assert "HostConfig" in create_config
|
||||
assert "Mounts" in create_config["HostConfig"]
|
||||
assert {
|
||||
"Type": "bind",
|
||||
"Source": str(extern_cidfile_path),
|
||||
"Target": "/run/cid",
|
||||
"ReadOnly": True,
|
||||
} in create_config["HostConfig"]["Mounts"]
|
||||
assert "volumes" in kwargs
|
||||
assert str(extern_cidfile_path) in kwargs["volumes"]
|
||||
assert kwargs["volumes"][str(extern_cidfile_path)]["bind"] == "/run/cid"
|
||||
assert kwargs["volumes"][str(extern_cidfile_path)]["mode"] == "ro"
|
||||
|
||||
# Verify container start was called
|
||||
mock_container.start.assert_called_once()
|
||||
@@ -180,15 +181,16 @@ async def test_run_container_with_cidfile(coresys: CoreSys, docker: DockerAPI):
|
||||
assert cidfile_path.exists()
|
||||
assert cidfile_path.read_text() == mock_container.id
|
||||
|
||||
assert result == mock_metadata
|
||||
assert result == mock_container
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
|
||||
async def test_run_container_with_leftover_cidfile(coresys: CoreSys, docker: DockerAPI):
|
||||
async def test_run_container_with_leftover_cidfile(
|
||||
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
|
||||
):
|
||||
"""Test container creation removes leftover cidfile before creating new one."""
|
||||
# Mock container
|
||||
mock_container = MagicMock(spec=DockerContainer, id="test_container_id_new")
|
||||
mock_container.show.return_value = mock_metadata = {"Id": mock_container.id}
|
||||
mock_container = MagicMock()
|
||||
mock_container.id = "test_container_id_new"
|
||||
|
||||
container_name = "test_container"
|
||||
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
|
||||
@@ -201,7 +203,12 @@ async def test_run_container_with_leftover_cidfile(coresys: CoreSys, docker: Doc
|
||||
docker.containers, "create", return_value=mock_container
|
||||
) as create_mock:
|
||||
# Execute run with a container name
|
||||
result = await docker.run("test_image", tag="latest", name=container_name)
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda kwrgs: docker.run(**kwrgs),
|
||||
{"image": "test_image", "tag": "latest", "name": container_name},
|
||||
)
|
||||
|
||||
# Verify container was created
|
||||
create_mock.assert_called_once()
|
||||
@@ -210,7 +217,7 @@ async def test_run_container_with_leftover_cidfile(coresys: CoreSys, docker: Doc
|
||||
assert cidfile_path.exists()
|
||||
assert cidfile_path.read_text() == mock_container.id
|
||||
|
||||
assert result == mock_metadata
|
||||
assert result == mock_container
|
||||
|
||||
|
||||
async def test_stop_container_with_cidfile_cleanup(
|
||||
@@ -229,7 +236,7 @@ async def test_stop_container_with_cidfile_cleanup(
|
||||
|
||||
# Mock the containers.get method and cidfile cleanup
|
||||
with (
|
||||
patch.object(docker.containers_legacy, "get", return_value=mock_container),
|
||||
patch.object(docker.containers, "get", return_value=mock_container),
|
||||
):
|
||||
# Call stop_container with remove_container=True
|
||||
loop = asyncio.get_event_loop()
|
||||
@@ -256,7 +263,7 @@ async def test_stop_container_without_removal_no_cidfile_cleanup(docker: DockerA
|
||||
|
||||
# Mock the containers.get method and cidfile cleanup
|
||||
with (
|
||||
patch.object(docker.containers_legacy, "get", return_value=mock_container),
|
||||
patch.object(docker.containers, "get", return_value=mock_container),
|
||||
patch("pathlib.Path.unlink") as mock_unlink,
|
||||
):
|
||||
# Call stop_container with remove_container=False
|
||||
@@ -270,8 +277,9 @@ async def test_stop_container_without_removal_no_cidfile_cleanup(docker: DockerA
|
||||
mock_unlink.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
|
||||
async def test_cidfile_cleanup_handles_oserror(coresys: CoreSys, docker: DockerAPI):
|
||||
async def test_cidfile_cleanup_handles_oserror(
|
||||
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
|
||||
):
|
||||
"""Test that cidfile cleanup handles OSError gracefully."""
|
||||
# Mock container
|
||||
mock_container = MagicMock()
|
||||
@@ -285,7 +293,7 @@ async def test_cidfile_cleanup_handles_oserror(coresys: CoreSys, docker: DockerA
|
||||
|
||||
# Mock the containers.get method and cidfile cleanup to raise OSError
|
||||
with (
|
||||
patch.object(docker.containers_legacy, "get", return_value=mock_container),
|
||||
patch.object(docker.containers, "get", return_value=mock_container),
|
||||
patch("pathlib.Path.is_dir", return_value=False),
|
||||
patch("pathlib.Path.is_file", return_value=True),
|
||||
patch(
|
||||
@@ -303,9 +311,8 @@ async def test_cidfile_cleanup_handles_oserror(coresys: CoreSys, docker: DockerA
|
||||
mock_unlink.assert_called_once_with(missing_ok=True)
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
|
||||
async def test_run_container_with_leftover_cidfile_directory(
|
||||
coresys: CoreSys, docker: DockerAPI
|
||||
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
|
||||
):
|
||||
"""Test container creation removes leftover cidfile directory before creating new one.
|
||||
|
||||
@@ -314,8 +321,8 @@ async def test_run_container_with_leftover_cidfile_directory(
|
||||
the bind mount source as a directory.
|
||||
"""
|
||||
# Mock container
|
||||
mock_container = MagicMock(spec=DockerContainer, id="test_container_id_new")
|
||||
mock_container.show.return_value = mock_metadata = {"Id": mock_container.id}
|
||||
mock_container = MagicMock()
|
||||
mock_container.id = "test_container_id_new"
|
||||
|
||||
container_name = "test_container"
|
||||
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
|
||||
@@ -329,7 +336,12 @@ async def test_run_container_with_leftover_cidfile_directory(
|
||||
docker.containers, "create", return_value=mock_container
|
||||
) as create_mock:
|
||||
# Execute run with a container name
|
||||
result = await docker.run("test_image", tag="latest", name=container_name)
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda kwrgs: docker.run(**kwrgs),
|
||||
{"image": "test_image", "tag": "latest", "name": container_name},
|
||||
)
|
||||
|
||||
# Verify container was created
|
||||
create_mock.assert_called_once()
|
||||
@@ -339,7 +351,7 @@ async def test_run_container_with_leftover_cidfile_directory(
|
||||
assert cidfile_path.is_file()
|
||||
assert cidfile_path.read_text() == mock_container.id
|
||||
|
||||
assert result == mock_metadata
|
||||
assert result == mock_container
|
||||
|
||||
|
||||
async def test_repair(coresys: CoreSys, caplog: pytest.LogCaptureFixture):
|
||||
|
||||
143
tests/docker/test_manifest.py
Normal file
143
tests/docker/test_manifest.py
Normal file
@@ -0,0 +1,143 @@
|
||||
"""Tests for registry manifest fetcher."""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.manifest import (
|
||||
DOCKER_HUB,
|
||||
ImageManifest,
|
||||
RegistryManifestFetcher,
|
||||
parse_image_reference,
|
||||
)
|
||||
|
||||
|
||||
def test_parse_image_reference_ghcr_io():
|
||||
"""Test parsing ghcr.io image."""
|
||||
registry, repo, tag = parse_image_reference(
|
||||
"ghcr.io/home-assistant/home-assistant", "2025.1.0"
|
||||
)
|
||||
assert registry == "ghcr.io"
|
||||
assert repo == "home-assistant/home-assistant"
|
||||
assert tag == "2025.1.0"
|
||||
|
||||
|
||||
def test_parse_image_reference_docker_hub_with_org():
|
||||
"""Test parsing Docker Hub image with organization."""
|
||||
registry, repo, tag = parse_image_reference(
|
||||
"homeassistant/home-assistant", "latest"
|
||||
)
|
||||
assert registry == DOCKER_HUB
|
||||
assert repo == "homeassistant/home-assistant"
|
||||
assert tag == "latest"
|
||||
|
||||
|
||||
def test_parse_image_reference_docker_hub_official_image():
|
||||
"""Test parsing Docker Hub official image (no org)."""
|
||||
registry, repo, tag = parse_image_reference("alpine", "3.18")
|
||||
assert registry == DOCKER_HUB
|
||||
assert repo == "library/alpine"
|
||||
assert tag == "3.18"
|
||||
|
||||
|
||||
def test_parse_image_reference_gcr_io():
|
||||
"""Test parsing gcr.io image."""
|
||||
registry, repo, tag = parse_image_reference("gcr.io/project/image", "v1")
|
||||
assert registry == "gcr.io"
|
||||
assert repo == "project/image"
|
||||
assert tag == "v1"
|
||||
|
||||
|
||||
def test_image_manifest_layer_count():
|
||||
"""Test ImageManifest layer_count property."""
|
||||
manifest = ImageManifest(
|
||||
digest="sha256:abc",
|
||||
total_size=1000,
|
||||
layers={"layer1": 500, "layer2": 500},
|
||||
)
|
||||
assert manifest.layer_count == 2
|
||||
|
||||
|
||||
async def test_get_manifest_success(coresys: CoreSys, websession: MagicMock):
|
||||
"""Test successful manifest fetch by mocking internal methods."""
|
||||
fetcher = RegistryManifestFetcher(coresys)
|
||||
manifest_data = {
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {"digest": "sha256:abc123"},
|
||||
"layers": [
|
||||
{"digest": "sha256:layer1abc123def456789012", "size": 1000},
|
||||
{"digest": "sha256:layer2def456abc789012345", "size": 2000},
|
||||
],
|
||||
}
|
||||
|
||||
# Mock the internal methods
|
||||
with (
|
||||
patch.object(
|
||||
fetcher, "_get_auth_token", new=AsyncMock(return_value="test-token")
|
||||
),
|
||||
patch.object(
|
||||
fetcher, "_fetch_manifest", new=AsyncMock(return_value=manifest_data)
|
||||
),
|
||||
):
|
||||
result = await fetcher.get_manifest(
|
||||
"test.io/org/image", "v1.0", platform="linux/amd64"
|
||||
)
|
||||
|
||||
assert result is not None
|
||||
assert result.total_size == 3000
|
||||
assert result.layer_count == 2
|
||||
# First 12 chars after sha256:
|
||||
assert "layer1abc123" in result.layers
|
||||
assert result.layers["layer1abc123"] == 1000
|
||||
|
||||
|
||||
async def test_get_manifest_returns_none_on_failure(
|
||||
coresys: CoreSys, websession: MagicMock
|
||||
):
|
||||
"""Test that get_manifest returns None on failure."""
|
||||
fetcher = RegistryManifestFetcher(coresys)
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
fetcher, "_get_auth_token", new=AsyncMock(return_value="test-token")
|
||||
),
|
||||
patch.object(fetcher, "_fetch_manifest", new=AsyncMock(return_value=None)),
|
||||
):
|
||||
result = await fetcher.get_manifest(
|
||||
"test.io/org/image", "v1.0", platform="linux/amd64"
|
||||
)
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_get_credentials_docker_hub(coresys: CoreSys, websession: MagicMock):
|
||||
"""Test getting Docker Hub credentials."""
|
||||
coresys.docker.config._data["registries"] = { # pylint: disable=protected-access
|
||||
"docker.io": {"username": "user", "password": "pass"}
|
||||
}
|
||||
fetcher = RegistryManifestFetcher(coresys)
|
||||
|
||||
creds = fetcher._get_credentials(DOCKER_HUB) # pylint: disable=protected-access
|
||||
|
||||
assert creds == ("user", "pass")
|
||||
|
||||
|
||||
def test_get_credentials_custom_registry(coresys: CoreSys, websession: MagicMock):
|
||||
"""Test getting credentials for custom registry."""
|
||||
coresys.docker.config._data["registries"] = { # pylint: disable=protected-access
|
||||
"ghcr.io": {"username": "user", "password": "token"}
|
||||
}
|
||||
fetcher = RegistryManifestFetcher(coresys)
|
||||
|
||||
creds = fetcher._get_credentials("ghcr.io") # pylint: disable=protected-access
|
||||
|
||||
assert creds == ("user", "token")
|
||||
|
||||
|
||||
def test_get_credentials_not_found(coresys: CoreSys, websession: MagicMock):
|
||||
"""Test no credentials found."""
|
||||
coresys.docker.config._data["registries"] = {} # pylint: disable=protected-access
|
||||
fetcher = RegistryManifestFetcher(coresys)
|
||||
|
||||
creds = fetcher._get_credentials("unknown.io") # pylint: disable=protected-access
|
||||
|
||||
assert creds is None
|
||||
@@ -120,7 +120,7 @@ async def test_unlabeled_container(coresys: CoreSys):
|
||||
}
|
||||
)
|
||||
with patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers_legacy",
|
||||
"supervisor.docker.manager.DockerAPI.containers",
|
||||
new=PropertyMock(return_value=container_collection),
|
||||
):
|
||||
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
"""Test Observer plugin container."""
|
||||
|
||||
from ipaddress import IPv4Address, ip_network
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
|
||||
from docker.types import Mount
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import DockerMount, MountType
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
|
||||
|
||||
async def test_start(coresys: CoreSys, container: MagicMock):
|
||||
async def test_start(coresys: CoreSys):
|
||||
"""Test starting observer plugin."""
|
||||
with patch.object(DockerAPI, "run", return_value=container.attrs) as run:
|
||||
with patch.object(DockerAPI, "run") as run:
|
||||
await coresys.plugins.observer.start()
|
||||
|
||||
run.assert_called_once()
|
||||
@@ -27,8 +28,8 @@ async def test_start(coresys: CoreSys, container: MagicMock):
|
||||
)
|
||||
assert run.call_args.kwargs["ports"] == {"80/tcp": 4357}
|
||||
assert run.call_args.kwargs["mounts"] == [
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
Mount(
|
||||
type="bind",
|
||||
source="/run/docker.sock",
|
||||
target="/run/docker.sock",
|
||||
read_only=True,
|
||||
|
||||
1002
tests/docker/test_pull_progress.py
Normal file
1002
tests/docker/test_pull_progress.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -238,7 +238,6 @@ async def test_install_other_error(
|
||||
@pytest.mark.usefixtures("path_extern")
|
||||
async def test_start(
|
||||
coresys: CoreSys,
|
||||
container: MagicMock,
|
||||
container_exc: DockerException | None,
|
||||
image_exc: aiodocker.DockerError | None,
|
||||
remove_calls: list[call],
|
||||
@@ -246,8 +245,8 @@ async def test_start(
|
||||
"""Test starting Home Assistant."""
|
||||
coresys.docker.images.inspect.return_value = {"Id": "123"}
|
||||
coresys.docker.images.inspect.side_effect = image_exc
|
||||
coresys.docker.containers_legacy.get.return_value.id = "123"
|
||||
coresys.docker.containers_legacy.get.side_effect = container_exc
|
||||
coresys.docker.containers.get.return_value.id = "123"
|
||||
coresys.docker.containers.get.side_effect = container_exc
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
@@ -255,7 +254,7 @@ async def test_start(
|
||||
"version",
|
||||
new=PropertyMock(return_value=AwesomeVersion("2023.7.0")),
|
||||
),
|
||||
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
|
||||
patch.object(DockerAPI, "run") as run,
|
||||
patch.object(HomeAssistantCore, "_block_till_run") as block_till_run,
|
||||
):
|
||||
await coresys.homeassistant.core.start()
|
||||
@@ -269,18 +268,17 @@ async def test_start(
|
||||
assert run.call_args.kwargs["name"] == "homeassistant"
|
||||
assert run.call_args.kwargs["hostname"] == "homeassistant"
|
||||
|
||||
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
|
||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||
assert (
|
||||
coresys.docker.containers_legacy.get.return_value.remove.call_args_list
|
||||
== remove_calls
|
||||
coresys.docker.containers.get.return_value.remove.call_args_list == remove_calls
|
||||
)
|
||||
|
||||
|
||||
async def test_start_existing_container(coresys: CoreSys, path_extern):
|
||||
"""Test starting Home Assistant when container exists and is viable."""
|
||||
coresys.docker.images.inspect.return_value = {"Id": "123"}
|
||||
coresys.docker.containers_legacy.get.return_value.image.id = "123"
|
||||
coresys.docker.containers_legacy.get.return_value.status = "exited"
|
||||
coresys.docker.containers.get.return_value.image.id = "123"
|
||||
coresys.docker.containers.get.return_value.status = "exited"
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
@@ -293,29 +291,29 @@ async def test_start_existing_container(coresys: CoreSys, path_extern):
|
||||
await coresys.homeassistant.core.start()
|
||||
block_till_run.assert_called_once()
|
||||
|
||||
coresys.docker.containers_legacy.get.return_value.start.assert_called_once()
|
||||
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
|
||||
coresys.docker.containers_legacy.get.return_value.remove.assert_not_called()
|
||||
coresys.docker.containers_legacy.get.return_value.run.assert_not_called()
|
||||
coresys.docker.containers.get.return_value.start.assert_called_once()
|
||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||
coresys.docker.containers.get.return_value.remove.assert_not_called()
|
||||
coresys.docker.containers.get.return_value.run.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("exists", [True, False])
|
||||
async def test_stop(coresys: CoreSys, exists: bool):
|
||||
"""Test stoppping Home Assistant."""
|
||||
if exists:
|
||||
coresys.docker.containers_legacy.get.return_value.status = "running"
|
||||
coresys.docker.containers.get.return_value.status = "running"
|
||||
else:
|
||||
coresys.docker.containers_legacy.get.side_effect = NotFound("missing")
|
||||
coresys.docker.containers.get.side_effect = NotFound("missing")
|
||||
|
||||
await coresys.homeassistant.core.stop()
|
||||
|
||||
coresys.docker.containers_legacy.get.return_value.remove.assert_not_called()
|
||||
coresys.docker.containers.get.return_value.remove.assert_not_called()
|
||||
if exists:
|
||||
coresys.docker.containers_legacy.get.return_value.stop.assert_called_once_with(
|
||||
coresys.docker.containers.get.return_value.stop.assert_called_once_with(
|
||||
timeout=260
|
||||
)
|
||||
else:
|
||||
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
|
||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||
|
||||
|
||||
async def test_restart(coresys: CoreSys):
|
||||
@@ -324,20 +322,18 @@ async def test_restart(coresys: CoreSys):
|
||||
await coresys.homeassistant.core.restart()
|
||||
block_till_run.assert_called_once()
|
||||
|
||||
coresys.docker.containers_legacy.get.return_value.restart.assert_called_once_with(
|
||||
coresys.docker.containers.get.return_value.restart.assert_called_once_with(
|
||||
timeout=260
|
||||
)
|
||||
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
|
||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("get_error", [NotFound("missing"), DockerException(), None])
|
||||
async def test_restart_failures(coresys: CoreSys, get_error: DockerException | None):
|
||||
"""Test restart fails when container missing or can't be restarted."""
|
||||
coresys.docker.containers_legacy.get.return_value.restart.side_effect = (
|
||||
DockerException()
|
||||
)
|
||||
coresys.docker.containers.get.return_value.restart.side_effect = DockerException()
|
||||
if get_error:
|
||||
coresys.docker.containers_legacy.get.side_effect = get_error
|
||||
coresys.docker.containers.get.side_effect = get_error
|
||||
|
||||
with pytest.raises(HomeAssistantError):
|
||||
await coresys.homeassistant.core.restart()
|
||||
@@ -356,12 +352,10 @@ async def test_stats_failures(
|
||||
coresys: CoreSys, get_error: DockerException | None, status: str
|
||||
):
|
||||
"""Test errors when getting stats."""
|
||||
coresys.docker.containers_legacy.get.return_value.status = status
|
||||
coresys.docker.containers_legacy.get.return_value.stats.side_effect = (
|
||||
DockerException()
|
||||
)
|
||||
coresys.docker.containers.get.return_value.status = status
|
||||
coresys.docker.containers.get.return_value.stats.side_effect = DockerException()
|
||||
if get_error:
|
||||
coresys.docker.containers_legacy.get.side_effect = get_error
|
||||
coresys.docker.containers.get.side_effect = get_error
|
||||
|
||||
with pytest.raises(HomeAssistantError):
|
||||
await coresys.homeassistant.core.stats()
|
||||
@@ -393,7 +387,7 @@ async def test_api_check_timeout(
|
||||
):
|
||||
await coresys.homeassistant.core.start()
|
||||
|
||||
assert coresys.homeassistant.api.get_api_state.call_count == 10
|
||||
assert coresys.homeassistant.api.get_api_state.call_count == 3
|
||||
assert (
|
||||
"No Home Assistant Core response, assuming a fatal startup error" in caplog.text
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Test base plugin functionality."""
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, call, patch
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
@@ -158,13 +159,15 @@ async def test_plugin_watchdog(coresys: CoreSys, plugin: PluginBase) -> None:
|
||||
],
|
||||
indirect=["plugin"],
|
||||
)
|
||||
@pytest.mark.usefixtures("coresys", "tmp_supervisor_data", "path_extern")
|
||||
async def test_plugin_watchdog_max_failed_attempts(
|
||||
coresys: CoreSys,
|
||||
capture_exception: Mock,
|
||||
plugin: PluginBase,
|
||||
error: PluginError,
|
||||
container: MagicMock,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
tmp_supervisor_data: Path,
|
||||
path_extern,
|
||||
) -> None:
|
||||
"""Test plugin watchdog gives up after max failed attempts."""
|
||||
with patch.object(type(plugin.instance), "attach"):
|
||||
|
||||
@@ -76,7 +76,7 @@ async def test_check(
|
||||
docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon, folder: str
|
||||
):
|
||||
"""Test check reports issue when containers have incorrect config."""
|
||||
docker.containers_legacy.get = _make_mock_container_get(
|
||||
docker.containers.get = _make_mock_container_get(
|
||||
["homeassistant", "hassio_audio", "addon_local_ssh"], folder
|
||||
)
|
||||
# Use state used in setup()
|
||||
@@ -132,7 +132,7 @@ async def test_check(
|
||||
assert await docker_config.approve_check()
|
||||
|
||||
# IF config issue is resolved, all issues are removed except the main one. Which will be removed if check isn't approved
|
||||
docker.containers_legacy.get = _make_mock_container_get([])
|
||||
docker.containers.get = _make_mock_container_get([])
|
||||
with patch.object(DockerInterface, "is_running", return_value=True):
|
||||
await coresys.plugins.load()
|
||||
await coresys.homeassistant.load()
|
||||
@@ -159,7 +159,7 @@ async def test_addon_volume_mount_not_flagged(
|
||||
] # No media/share
|
||||
|
||||
# Mock container that has VOLUME mount to media/share with wrong propagation
|
||||
docker.containers_legacy.get = _make_mock_container_get_with_volume_mount(
|
||||
docker.containers.get = _make_mock_container_get_with_volume_mount(
|
||||
["addon_local_ssh"], folder
|
||||
)
|
||||
|
||||
@@ -221,7 +221,7 @@ async def test_addon_configured_mount_still_flagged(
|
||||
out.attrs["Mounts"].append(mount)
|
||||
return out
|
||||
|
||||
docker.containers_legacy.get = mock_container_get
|
||||
docker.containers.get = mock_container_get
|
||||
|
||||
await coresys.core.set_state(CoreState.SETUP)
|
||||
with patch.object(DockerInterface, "is_running", return_value=True):
|
||||
@@ -275,7 +275,7 @@ async def test_addon_custom_target_path_flagged(
|
||||
out.attrs["Mounts"].append(mount)
|
||||
return out
|
||||
|
||||
docker.containers_legacy.get = mock_container_get
|
||||
docker.containers.get = mock_container_get
|
||||
|
||||
await coresys.core.set_state(CoreState.SETUP)
|
||||
with patch.object(DockerInterface, "is_running", return_value=True):
|
||||
|
||||
@@ -30,7 +30,7 @@ async def test_evaluation(coresys: CoreSys):
|
||||
assert container.reason not in coresys.resolution.unsupported
|
||||
assert UnhealthyReason.DOCKER not in coresys.resolution.unhealthy
|
||||
|
||||
coresys.docker.containers_legacy.list.return_value = [
|
||||
coresys.docker.containers.list.return_value = [
|
||||
_make_image_attr("armhfbuild/watchtower:latest"),
|
||||
_make_image_attr("concerco/watchtowerv6:10.0.2"),
|
||||
_make_image_attr("containrrr/watchtower:1.1"),
|
||||
@@ -47,7 +47,7 @@ async def test_evaluation(coresys: CoreSys):
|
||||
"pyouroboros/ouroboros:1.4.3",
|
||||
}
|
||||
|
||||
coresys.docker.containers_legacy.list.return_value = []
|
||||
coresys.docker.containers.list.return_value = []
|
||||
await container()
|
||||
assert container.reason not in coresys.resolution.unsupported
|
||||
|
||||
@@ -62,7 +62,7 @@ async def test_corrupt_docker(coresys: CoreSys):
|
||||
corrupt_docker = Issue(IssueType.CORRUPT_DOCKER, ContextType.SYSTEM)
|
||||
assert corrupt_docker not in coresys.resolution.issues
|
||||
|
||||
coresys.docker.containers_legacy.list.side_effect = DockerException
|
||||
coresys.docker.containers.list.side_effect = DockerException
|
||||
await container()
|
||||
assert corrupt_docker in coresys.resolution.issues
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ async def test_evaluation(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
meta.attrs = observer_attrs if name == "hassio_observer" else addon_attrs
|
||||
return meta
|
||||
|
||||
coresys.docker.containers_legacy.get = get_container
|
||||
coresys.docker.containers.get = get_container
|
||||
await coresys.plugins.observer.instance.attach(TEST_VERSION)
|
||||
await install_addon_ssh.instance.attach(TEST_VERSION)
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ async def _mock_wait_for_container() -> None:
|
||||
|
||||
async def test_fixup(docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test fixup rebuilds addon's container."""
|
||||
docker.containers_legacy.get = make_mock_container_get("running")
|
||||
docker.containers.get = make_mock_container_get("running")
|
||||
|
||||
addon_execute_rebuild = FixupAddonExecuteRebuild(coresys)
|
||||
|
||||
@@ -61,7 +61,7 @@ async def test_fixup_stopped_core(
|
||||
):
|
||||
"""Test fixup just removes addon's container when it is stopped."""
|
||||
caplog.clear()
|
||||
docker.containers_legacy.get = make_mock_container_get("stopped")
|
||||
docker.containers.get = make_mock_container_get("stopped")
|
||||
addon_execute_rebuild = FixupAddonExecuteRebuild(coresys)
|
||||
|
||||
coresys.resolution.create_issue(
|
||||
@@ -76,7 +76,7 @@ async def test_fixup_stopped_core(
|
||||
|
||||
assert not coresys.resolution.issues
|
||||
assert not coresys.resolution.suggestions
|
||||
docker.containers_legacy.get("addon_local_ssh").remove.assert_called_once_with(
|
||||
docker.containers.get("addon_local_ssh").remove.assert_called_once_with(
|
||||
force=True, v=True
|
||||
)
|
||||
assert "Addon local_ssh is stopped" in caplog.text
|
||||
@@ -90,7 +90,7 @@ async def test_fixup_unknown_core(
|
||||
):
|
||||
"""Test fixup does nothing if addon's container has already been removed."""
|
||||
caplog.clear()
|
||||
docker.containers_legacy.get.side_effect = NotFound("")
|
||||
docker.containers.get.side_effect = NotFound("")
|
||||
addon_execute_rebuild = FixupAddonExecuteRebuild(coresys)
|
||||
|
||||
coresys.resolution.create_issue(
|
||||
|
||||
@@ -27,7 +27,7 @@ def make_mock_container_get(status: str):
|
||||
|
||||
async def test_fixup(docker: DockerAPI, coresys: CoreSys):
|
||||
"""Test fixup rebuilds core's container."""
|
||||
docker.containers_legacy.get = make_mock_container_get("running")
|
||||
docker.containers.get = make_mock_container_get("running")
|
||||
|
||||
core_execute_rebuild = FixupCoreExecuteRebuild(coresys)
|
||||
|
||||
@@ -51,7 +51,7 @@ async def test_fixup_stopped_core(
|
||||
):
|
||||
"""Test fixup just removes HA's container when it is stopped."""
|
||||
caplog.clear()
|
||||
docker.containers_legacy.get = make_mock_container_get("stopped")
|
||||
docker.containers.get = make_mock_container_get("stopped")
|
||||
core_execute_rebuild = FixupCoreExecuteRebuild(coresys)
|
||||
|
||||
coresys.resolution.create_issue(
|
||||
@@ -65,7 +65,7 @@ async def test_fixup_stopped_core(
|
||||
|
||||
assert not coresys.resolution.issues
|
||||
assert not coresys.resolution.suggestions
|
||||
docker.containers_legacy.get("homeassistant").remove.assert_called_once_with(
|
||||
docker.containers.get("homeassistant").remove.assert_called_once_with(
|
||||
force=True, v=True
|
||||
)
|
||||
assert "Home Assistant is stopped" in caplog.text
|
||||
@@ -76,7 +76,7 @@ async def test_fixup_unknown_core(
|
||||
):
|
||||
"""Test fixup does nothing if core's container has already been removed."""
|
||||
caplog.clear()
|
||||
docker.containers_legacy.get.side_effect = NotFound("")
|
||||
docker.containers.get.side_effect = NotFound("")
|
||||
core_execute_rebuild = FixupCoreExecuteRebuild(coresys)
|
||||
|
||||
coresys.resolution.create_issue(
|
||||
|
||||
@@ -28,7 +28,7 @@ def make_mock_container_get(status: str):
|
||||
@pytest.mark.parametrize("status", ["running", "stopped"])
|
||||
async def test_fixup(docker: DockerAPI, coresys: CoreSys, status: str):
|
||||
"""Test fixup rebuilds plugin's container regardless of current state."""
|
||||
docker.containers_legacy.get = make_mock_container_get(status)
|
||||
docker.containers.get = make_mock_container_get(status)
|
||||
|
||||
plugin_execute_rebuild = FixupPluginExecuteRebuild(coresys)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user