mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-12-03 06:28:23 +00:00
Compare commits
7 Commits
refactor-d
...
copilot/su
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8487e8ee1c | ||
|
|
a351b09757 | ||
|
|
743b1c3b92 | ||
|
|
5f6d7c230f | ||
|
|
e7be2bfd0d | ||
|
|
8486354eba | ||
|
|
65b10c1931 |
4
.github/workflows/builder.yml
vendored
4
.github/workflows/builder.yml
vendored
@@ -165,7 +165,7 @@ jobs:
|
|||||||
|
|
||||||
# home-assistant/builder doesn't support sha pinning
|
# home-assistant/builder doesn't support sha pinning
|
||||||
- name: Build supervisor
|
- name: Build supervisor
|
||||||
uses: home-assistant/builder@2025.11.0
|
uses: home-assistant/builder@2025.09.0
|
||||||
with:
|
with:
|
||||||
args: |
|
args: |
|
||||||
$BUILD_ARGS \
|
$BUILD_ARGS \
|
||||||
@@ -211,7 +211,7 @@ jobs:
|
|||||||
# home-assistant/builder doesn't support sha pinning
|
# home-assistant/builder doesn't support sha pinning
|
||||||
- name: Build the Supervisor
|
- name: Build the Supervisor
|
||||||
if: needs.init.outputs.publish != 'true'
|
if: needs.init.outputs.publish != 'true'
|
||||||
uses: home-assistant/builder@2025.11.0
|
uses: home-assistant/builder@2025.09.0
|
||||||
with:
|
with:
|
||||||
args: |
|
args: |
|
||||||
--test \
|
--test \
|
||||||
|
|||||||
14
Dockerfile
14
Dockerfile
@@ -8,7 +8,9 @@ ENV \
|
|||||||
UV_SYSTEM_PYTHON=true
|
UV_SYSTEM_PYTHON=true
|
||||||
|
|
||||||
ARG \
|
ARG \
|
||||||
COSIGN_VERSION
|
COSIGN_VERSION \
|
||||||
|
BUILD_ARCH \
|
||||||
|
QEMU_CPU
|
||||||
|
|
||||||
# Install base
|
# Install base
|
||||||
WORKDIR /usr/src
|
WORKDIR /usr/src
|
||||||
@@ -30,9 +32,15 @@ RUN \
|
|||||||
&& pip3 install uv==0.8.9
|
&& pip3 install uv==0.8.9
|
||||||
|
|
||||||
# Install requirements
|
# Install requirements
|
||||||
|
COPY requirements.txt .
|
||||||
RUN \
|
RUN \
|
||||||
--mount=type=bind,source=./requirements.txt,target=/usr/src/requirements.txt \
|
if [ "${BUILD_ARCH}" = "i386" ]; then \
|
||||||
uv pip install --compile-bytecode --no-cache --no-build -r requirements.txt
|
setarch="linux32"; \
|
||||||
|
else \
|
||||||
|
setarch=""; \
|
||||||
|
fi \
|
||||||
|
&& ${setarch} uv pip install --compile-bytecode --no-cache --no-build -r requirements.txt \
|
||||||
|
&& rm -f requirements.txt
|
||||||
|
|
||||||
# Install Home Assistant Supervisor
|
# Install Home Assistant Supervisor
|
||||||
COPY . supervisor
|
COPY . supervisor
|
||||||
|
|||||||
@@ -321,6 +321,8 @@ lint.ignore = [
|
|||||||
"PLW2901", # Outer {outer_kind} variable {name} overwritten by inner {inner_kind} target
|
"PLW2901", # Outer {outer_kind} variable {name} overwritten by inner {inner_kind} target
|
||||||
"UP006", # keep type annotation style as is
|
"UP006", # keep type annotation style as is
|
||||||
"UP007", # keep type annotation style as is
|
"UP007", # keep type annotation style as is
|
||||||
|
# Ignored due to performance: https://github.com/charliermarsh/ruff/issues/2923
|
||||||
|
"UP038", # Use `X | Y` in `isinstance` call instead of `(X, Y)`
|
||||||
|
|
||||||
# May conflict with the formatter, https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules
|
# May conflict with the formatter, https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules
|
||||||
"W191",
|
"W191",
|
||||||
|
|||||||
@@ -1,16 +1,16 @@
|
|||||||
astroid==4.0.2
|
astroid==4.0.2
|
||||||
coverage==7.12.0
|
coverage==7.12.0
|
||||||
mypy==1.19.0
|
mypy==1.18.2
|
||||||
pre-commit==4.5.0
|
pre-commit==4.5.0
|
||||||
pylint==4.0.4
|
pylint==4.0.3
|
||||||
pytest-aiohttp==1.1.0
|
pytest-aiohttp==1.1.0
|
||||||
pytest-asyncio==1.3.0
|
pytest-asyncio==1.3.0
|
||||||
pytest-cov==7.0.0
|
pytest-cov==7.0.0
|
||||||
pytest-timeout==2.4.0
|
pytest-timeout==2.4.0
|
||||||
pytest==9.0.1
|
pytest==9.0.1
|
||||||
ruff==0.14.7
|
ruff==0.14.6
|
||||||
time-machine==3.1.0
|
time-machine==3.1.0
|
||||||
types-docker==7.1.0.20251129
|
types-docker==7.1.0.20251125
|
||||||
types-pyyaml==6.0.12.20250915
|
types-pyyaml==6.0.12.20250915
|
||||||
types-requests==2.32.4.20250913
|
types-requests==2.32.4.20250913
|
||||||
urllib3==2.5.0
|
urllib3==2.5.0
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ from ..const import (
|
|||||||
FILE_SUFFIX_CONFIGURATION,
|
FILE_SUFFIX_CONFIGURATION,
|
||||||
META_ADDON,
|
META_ADDON,
|
||||||
SOCKET_DOCKER,
|
SOCKET_DOCKER,
|
||||||
CpuArch,
|
|
||||||
)
|
)
|
||||||
from ..coresys import CoreSys, CoreSysAttributes
|
from ..coresys import CoreSys, CoreSysAttributes
|
||||||
from ..docker.const import DOCKER_HUB
|
from ..docker.const import DOCKER_HUB
|
||||||
@@ -68,7 +67,7 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
|||||||
raise RuntimeError()
|
raise RuntimeError()
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def arch(self) -> CpuArch:
|
def arch(self) -> str:
|
||||||
"""Return arch of the add-on."""
|
"""Return arch of the add-on."""
|
||||||
return self.sys_arch.match([self.addon.arch])
|
return self.sys_arch.match([self.addon.arch])
|
||||||
|
|
||||||
@@ -133,10 +132,6 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
|||||||
|
|
||||||
Returns a JSON string with registry credentials for the base image's registry,
|
Returns a JSON string with registry credentials for the base image's registry,
|
||||||
or None if no matching registry is configured.
|
or None if no matching registry is configured.
|
||||||
|
|
||||||
Raises:
|
|
||||||
HassioArchNotFound: If the add-on is not supported on the current architecture.
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Early return before accessing base_image to avoid unnecessary arch lookup
|
# Early return before accessing base_image to avoid unnecessary arch lookup
|
||||||
if not self.sys_docker.config.registries:
|
if not self.sys_docker.config.registries:
|
||||||
|
|||||||
@@ -87,7 +87,6 @@ from ..const import (
|
|||||||
AddonBootConfig,
|
AddonBootConfig,
|
||||||
AddonStage,
|
AddonStage,
|
||||||
AddonStartup,
|
AddonStartup,
|
||||||
CpuArch,
|
|
||||||
)
|
)
|
||||||
from ..coresys import CoreSys
|
from ..coresys import CoreSys
|
||||||
from ..docker.const import Capabilities
|
from ..docker.const import Capabilities
|
||||||
@@ -549,7 +548,7 @@ class AddonModel(JobGroup, ABC):
|
|||||||
return self.data.get(ATTR_MACHINE, [])
|
return self.data.get(ATTR_MACHINE, [])
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def arch(self) -> CpuArch:
|
def arch(self) -> str:
|
||||||
"""Return architecture to use for the addon's image."""
|
"""Return architecture to use for the addon's image."""
|
||||||
if ATTR_IMAGE in self.data:
|
if ATTR_IMAGE in self.data:
|
||||||
return self.sys_arch.match(self.data[ATTR_ARCH])
|
return self.sys_arch.match(self.data[ATTR_ARCH])
|
||||||
|
|||||||
@@ -813,10 +813,6 @@ class RestAPI(CoreSysAttributes):
|
|||||||
self.webapp.add_routes(
|
self.webapp.add_routes(
|
||||||
[
|
[
|
||||||
web.get("/docker/info", api_docker.info),
|
web.get("/docker/info", api_docker.info),
|
||||||
web.post(
|
|
||||||
"/docker/migrate-storage-driver",
|
|
||||||
api_docker.migrate_docker_storage_driver,
|
|
||||||
),
|
|
||||||
web.post("/docker/options", api_docker.options),
|
web.post("/docker/options", api_docker.options),
|
||||||
web.get("/docker/registries", api_docker.registries),
|
web.get("/docker/registries", api_docker.registries),
|
||||||
web.post("/docker/registries", api_docker.create_registry),
|
web.post("/docker/registries", api_docker.create_registry),
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import logging
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from aiohttp import web
|
from aiohttp import web
|
||||||
from awesomeversion import AwesomeVersion
|
|
||||||
import voluptuous as vol
|
import voluptuous as vol
|
||||||
|
|
||||||
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
||||||
@@ -17,7 +16,6 @@ from ..const import (
|
|||||||
ATTR_PASSWORD,
|
ATTR_PASSWORD,
|
||||||
ATTR_REGISTRIES,
|
ATTR_REGISTRIES,
|
||||||
ATTR_STORAGE,
|
ATTR_STORAGE,
|
||||||
ATTR_STORAGE_DRIVER,
|
|
||||||
ATTR_USERNAME,
|
ATTR_USERNAME,
|
||||||
ATTR_VERSION,
|
ATTR_VERSION,
|
||||||
)
|
)
|
||||||
@@ -44,12 +42,6 @@ SCHEMA_OPTIONS = vol.Schema(
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER = vol.Schema(
|
|
||||||
{
|
|
||||||
vol.Required(ATTR_STORAGE_DRIVER): vol.In(["overlayfs", "overlay2"]),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class APIDocker(CoreSysAttributes):
|
class APIDocker(CoreSysAttributes):
|
||||||
"""Handle RESTful API for Docker configuration."""
|
"""Handle RESTful API for Docker configuration."""
|
||||||
@@ -131,27 +123,3 @@ class APIDocker(CoreSysAttributes):
|
|||||||
|
|
||||||
del self.sys_docker.config.registries[hostname]
|
del self.sys_docker.config.registries[hostname]
|
||||||
await self.sys_docker.config.save_data()
|
await self.sys_docker.config.save_data()
|
||||||
|
|
||||||
@api_process
|
|
||||||
async def migrate_docker_storage_driver(self, request: web.Request) -> None:
|
|
||||||
"""Migrate Docker storage driver."""
|
|
||||||
if (
|
|
||||||
not self.coresys.os.available
|
|
||||||
or not self.coresys.os.version
|
|
||||||
or self.coresys.os.version < AwesomeVersion("17.0.dev0")
|
|
||||||
):
|
|
||||||
raise APINotFound(
|
|
||||||
"Home Assistant OS 17.0 or newer required for Docker storage driver migration"
|
|
||||||
)
|
|
||||||
|
|
||||||
body = await api_validate(SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER, request)
|
|
||||||
await self.sys_dbus.agent.system.migrate_docker_storage_driver(
|
|
||||||
body[ATTR_STORAGE_DRIVER]
|
|
||||||
)
|
|
||||||
|
|
||||||
_LOGGER.info("Host system reboot required to apply Docker storage migration")
|
|
||||||
self.sys_resolution.create_issue(
|
|
||||||
IssueType.REBOOT_REQUIRED,
|
|
||||||
ContextType.SYSTEM,
|
|
||||||
suggestions=[SuggestionType.EXECUTE_REBOOT],
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import logging
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import platform
|
import platform
|
||||||
|
|
||||||
from .const import CpuArch
|
|
||||||
from .coresys import CoreSys, CoreSysAttributes
|
from .coresys import CoreSys, CoreSysAttributes
|
||||||
from .exceptions import ConfigurationFileError, HassioArchNotFound
|
from .exceptions import ConfigurationFileError, HassioArchNotFound
|
||||||
from .utils.json import read_json_file
|
from .utils.json import read_json_file
|
||||||
@@ -13,40 +12,38 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
ARCH_JSON: Path = Path(__file__).parent.joinpath("data/arch.json")
|
ARCH_JSON: Path = Path(__file__).parent.joinpath("data/arch.json")
|
||||||
|
|
||||||
MAP_CPU: dict[str, CpuArch] = {
|
MAP_CPU = {
|
||||||
"armv7": CpuArch.ARMV7,
|
"armv7": "armv7",
|
||||||
"armv6": CpuArch.ARMHF,
|
"armv6": "armhf",
|
||||||
"armv8": CpuArch.AARCH64,
|
"armv8": "aarch64",
|
||||||
"aarch64": CpuArch.AARCH64,
|
"aarch64": "aarch64",
|
||||||
"i686": CpuArch.I386,
|
"i686": "i386",
|
||||||
"x86_64": CpuArch.AMD64,
|
"x86_64": "amd64",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class CpuArchManager(CoreSysAttributes):
|
class CpuArch(CoreSysAttributes):
|
||||||
"""Manage available architectures."""
|
"""Manage available architectures."""
|
||||||
|
|
||||||
def __init__(self, coresys: CoreSys) -> None:
|
def __init__(self, coresys: CoreSys) -> None:
|
||||||
"""Initialize CPU Architecture handler."""
|
"""Initialize CPU Architecture handler."""
|
||||||
self.coresys = coresys
|
self.coresys = coresys
|
||||||
self._supported_arch: list[CpuArch] = []
|
self._supported_arch: list[str] = []
|
||||||
self._supported_set: set[CpuArch] = set()
|
self._supported_set: set[str] = set()
|
||||||
self._default_arch: CpuArch
|
self._default_arch: str
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def default(self) -> CpuArch:
|
def default(self) -> str:
|
||||||
"""Return system default arch."""
|
"""Return system default arch."""
|
||||||
return self._default_arch
|
return self._default_arch
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def supervisor(self) -> CpuArch:
|
def supervisor(self) -> str:
|
||||||
"""Return supervisor arch."""
|
"""Return supervisor arch."""
|
||||||
if self.sys_supervisor.arch:
|
return self.sys_supervisor.arch or self._default_arch
|
||||||
return CpuArch(self.sys_supervisor.arch)
|
|
||||||
return self._default_arch
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def supported(self) -> list[CpuArch]:
|
def supported(self) -> list[str]:
|
||||||
"""Return support arch by CPU/Machine."""
|
"""Return support arch by CPU/Machine."""
|
||||||
return self._supported_arch
|
return self._supported_arch
|
||||||
|
|
||||||
@@ -68,7 +65,7 @@ class CpuArchManager(CoreSysAttributes):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Use configs from arch.json
|
# Use configs from arch.json
|
||||||
self._supported_arch.extend(CpuArch(a) for a in arch_data[self.sys_machine])
|
self._supported_arch.extend(arch_data[self.sys_machine])
|
||||||
self._default_arch = self.supported[0]
|
self._default_arch = self.supported[0]
|
||||||
|
|
||||||
# Make sure native support is in supported list
|
# Make sure native support is in supported list
|
||||||
@@ -81,14 +78,14 @@ class CpuArchManager(CoreSysAttributes):
|
|||||||
"""Return True if there is a supported arch by this platform."""
|
"""Return True if there is a supported arch by this platform."""
|
||||||
return not self._supported_set.isdisjoint(arch_list)
|
return not self._supported_set.isdisjoint(arch_list)
|
||||||
|
|
||||||
def match(self, arch_list: list[str]) -> CpuArch:
|
def match(self, arch_list: list[str]) -> str:
|
||||||
"""Return best match for this CPU/Platform."""
|
"""Return best match for this CPU/Platform."""
|
||||||
for self_arch in self.supported:
|
for self_arch in self.supported:
|
||||||
if self_arch in arch_list:
|
if self_arch in arch_list:
|
||||||
return self_arch
|
return self_arch
|
||||||
raise HassioArchNotFound()
|
raise HassioArchNotFound()
|
||||||
|
|
||||||
def detect_cpu(self) -> CpuArch:
|
def detect_cpu(self) -> str:
|
||||||
"""Return the arch type of local CPU."""
|
"""Return the arch type of local CPU."""
|
||||||
cpu = platform.machine()
|
cpu = platform.machine()
|
||||||
for check, value in MAP_CPU.items():
|
for check, value in MAP_CPU.items():
|
||||||
@@ -99,10 +96,9 @@ class CpuArchManager(CoreSysAttributes):
|
|||||||
"Unknown CPU architecture %s, falling back to Supervisor architecture.",
|
"Unknown CPU architecture %s, falling back to Supervisor architecture.",
|
||||||
cpu,
|
cpu,
|
||||||
)
|
)
|
||||||
return CpuArch(self.sys_supervisor.arch)
|
return self.sys_supervisor.arch
|
||||||
_LOGGER.warning(
|
_LOGGER.warning(
|
||||||
"Unknown CPU architecture %s, assuming CPU architecture equals Supervisor architecture.",
|
"Unknown CPU architecture %s, assuming CPU architecture equals Supervisor architecture.",
|
||||||
cpu,
|
cpu,
|
||||||
)
|
)
|
||||||
# Return the cpu string as-is, wrapped in CpuArch (may fail if invalid)
|
return cpu
|
||||||
return CpuArch(cpu)
|
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ from colorlog import ColoredFormatter
|
|||||||
|
|
||||||
from .addons.manager import AddonManager
|
from .addons.manager import AddonManager
|
||||||
from .api import RestAPI
|
from .api import RestAPI
|
||||||
from .arch import CpuArchManager
|
from .arch import CpuArch
|
||||||
from .auth import Auth
|
from .auth import Auth
|
||||||
from .backups.manager import BackupManager
|
from .backups.manager import BackupManager
|
||||||
from .bus import Bus
|
from .bus import Bus
|
||||||
@@ -71,7 +71,7 @@ async def initialize_coresys() -> CoreSys:
|
|||||||
coresys.jobs = await JobManager(coresys).load_config()
|
coresys.jobs = await JobManager(coresys).load_config()
|
||||||
coresys.core = await Core(coresys).post_init()
|
coresys.core = await Core(coresys).post_init()
|
||||||
coresys.plugins = await PluginManager(coresys).load_config()
|
coresys.plugins = await PluginManager(coresys).load_config()
|
||||||
coresys.arch = CpuArchManager(coresys)
|
coresys.arch = CpuArch(coresys)
|
||||||
coresys.auth = await Auth(coresys).load_config()
|
coresys.auth = await Auth(coresys).load_config()
|
||||||
coresys.updater = await Updater(coresys).load_config()
|
coresys.updater = await Updater(coresys).load_config()
|
||||||
coresys.api = RestAPI(coresys)
|
coresys.api = RestAPI(coresys)
|
||||||
|
|||||||
@@ -328,7 +328,6 @@ ATTR_STATE = "state"
|
|||||||
ATTR_STATIC = "static"
|
ATTR_STATIC = "static"
|
||||||
ATTR_STDIN = "stdin"
|
ATTR_STDIN = "stdin"
|
||||||
ATTR_STORAGE = "storage"
|
ATTR_STORAGE = "storage"
|
||||||
ATTR_STORAGE_DRIVER = "storage_driver"
|
|
||||||
ATTR_SUGGESTIONS = "suggestions"
|
ATTR_SUGGESTIONS = "suggestions"
|
||||||
ATTR_SUPERVISOR = "supervisor"
|
ATTR_SUPERVISOR = "supervisor"
|
||||||
ATTR_SUPERVISOR_INTERNET = "supervisor_internet"
|
ATTR_SUPERVISOR_INTERNET = "supervisor_internet"
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ from .const import (
|
|||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .addons.manager import AddonManager
|
from .addons.manager import AddonManager
|
||||||
from .api import RestAPI
|
from .api import RestAPI
|
||||||
from .arch import CpuArchManager
|
from .arch import CpuArch
|
||||||
from .auth import Auth
|
from .auth import Auth
|
||||||
from .backups.manager import BackupManager
|
from .backups.manager import BackupManager
|
||||||
from .bus import Bus
|
from .bus import Bus
|
||||||
@@ -78,7 +78,7 @@ class CoreSys:
|
|||||||
# Internal objects pointers
|
# Internal objects pointers
|
||||||
self._docker: DockerAPI | None = None
|
self._docker: DockerAPI | None = None
|
||||||
self._core: Core | None = None
|
self._core: Core | None = None
|
||||||
self._arch: CpuArchManager | None = None
|
self._arch: CpuArch | None = None
|
||||||
self._auth: Auth | None = None
|
self._auth: Auth | None = None
|
||||||
self._homeassistant: HomeAssistant | None = None
|
self._homeassistant: HomeAssistant | None = None
|
||||||
self._supervisor: Supervisor | None = None
|
self._supervisor: Supervisor | None = None
|
||||||
@@ -266,17 +266,17 @@ class CoreSys:
|
|||||||
self._plugins = value
|
self._plugins = value
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def arch(self) -> CpuArchManager:
|
def arch(self) -> CpuArch:
|
||||||
"""Return CpuArchManager object."""
|
"""Return CpuArch object."""
|
||||||
if self._arch is None:
|
if self._arch is None:
|
||||||
raise RuntimeError("CpuArchManager not set!")
|
raise RuntimeError("CpuArch not set!")
|
||||||
return self._arch
|
return self._arch
|
||||||
|
|
||||||
@arch.setter
|
@arch.setter
|
||||||
def arch(self, value: CpuArchManager) -> None:
|
def arch(self, value: CpuArch) -> None:
|
||||||
"""Set a CpuArchManager object."""
|
"""Set a CpuArch object."""
|
||||||
if self._arch:
|
if self._arch:
|
||||||
raise RuntimeError("CpuArchManager already set!")
|
raise RuntimeError("CpuArch already set!")
|
||||||
self._arch = value
|
self._arch = value
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -733,8 +733,8 @@ class CoreSysAttributes:
|
|||||||
return self.coresys.plugins
|
return self.coresys.plugins
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def sys_arch(self) -> CpuArchManager:
|
def sys_arch(self) -> CpuArch:
|
||||||
"""Return CpuArchManager object."""
|
"""Return CpuArch object."""
|
||||||
return self.coresys.arch
|
return self.coresys.arch
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -15,8 +15,3 @@ class System(DBusInterface):
|
|||||||
async def schedule_wipe_device(self) -> bool:
|
async def schedule_wipe_device(self) -> bool:
|
||||||
"""Schedule a factory reset on next system boot."""
|
"""Schedule a factory reset on next system boot."""
|
||||||
return await self.connected_dbus.System.call("schedule_wipe_device")
|
return await self.connected_dbus.System.call("schedule_wipe_device")
|
||||||
|
|
||||||
@dbus_connected
|
|
||||||
async def migrate_docker_storage_driver(self, backend: str) -> None:
|
|
||||||
"""Migrate Docker storage driver."""
|
|
||||||
await self.connected_dbus.System.call("migrate_docker_storage_driver", backend)
|
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ from ipaddress import IPv4Address
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from socket import SocketIO
|
|
||||||
import tempfile
|
import tempfile
|
||||||
from typing import TYPE_CHECKING, cast
|
from typing import TYPE_CHECKING, cast
|
||||||
|
|
||||||
@@ -835,10 +834,7 @@ class DockerAddon(DockerInterface):
|
|||||||
try:
|
try:
|
||||||
# Load needed docker objects
|
# Load needed docker objects
|
||||||
container = self.sys_docker.containers.get(self.name)
|
container = self.sys_docker.containers.get(self.name)
|
||||||
# attach_socket returns SocketIO for local Docker connections (Unix socket)
|
socket = container.attach_socket(params={"stdin": 1, "stream": 1})
|
||||||
socket = cast(
|
|
||||||
SocketIO, container.attach_socket(params={"stdin": 1, "stream": 1})
|
|
||||||
)
|
|
||||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||||
_LOGGER.error("Can't attach to %s stdin: %s", self.name, err)
|
_LOGGER.error("Can't attach to %s stdin: %s", self.name, err)
|
||||||
raise DockerError() from err
|
raise DockerError() from err
|
||||||
|
|||||||
@@ -2,14 +2,19 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from enum import StrEnum
|
from contextlib import suppress
|
||||||
|
from enum import Enum, StrEnum
|
||||||
|
from functools import total_ordering
|
||||||
from pathlib import PurePath
|
from pathlib import PurePath
|
||||||
import re
|
import re
|
||||||
|
from typing import cast
|
||||||
|
|
||||||
from docker.types import Mount
|
from docker.types import Mount
|
||||||
|
|
||||||
from ..const import MACHINE_ID
|
from ..const import MACHINE_ID
|
||||||
|
|
||||||
|
RE_RETRYING_DOWNLOAD_STATUS = re.compile(r"Retrying in \d+ seconds?")
|
||||||
|
|
||||||
# Docker Hub registry identifier
|
# Docker Hub registry identifier
|
||||||
DOCKER_HUB = "hub.docker.com"
|
DOCKER_HUB = "hub.docker.com"
|
||||||
|
|
||||||
@@ -76,6 +81,57 @@ class PropagationMode(StrEnum):
|
|||||||
RSLAVE = "rslave"
|
RSLAVE = "rslave"
|
||||||
|
|
||||||
|
|
||||||
|
@total_ordering
|
||||||
|
class PullImageLayerStage(Enum):
|
||||||
|
"""Job stages for pulling an image layer.
|
||||||
|
|
||||||
|
These are a subset of the statuses in a docker pull image log. They
|
||||||
|
are the standardized ones that are the most useful to us.
|
||||||
|
"""
|
||||||
|
|
||||||
|
PULLING_FS_LAYER = 1, "Pulling fs layer"
|
||||||
|
RETRYING_DOWNLOAD = 2, "Retrying download"
|
||||||
|
DOWNLOADING = 2, "Downloading"
|
||||||
|
VERIFYING_CHECKSUM = 3, "Verifying Checksum"
|
||||||
|
DOWNLOAD_COMPLETE = 4, "Download complete"
|
||||||
|
EXTRACTING = 5, "Extracting"
|
||||||
|
PULL_COMPLETE = 6, "Pull complete"
|
||||||
|
|
||||||
|
def __init__(self, order: int, status: str) -> None:
|
||||||
|
"""Set fields from values."""
|
||||||
|
self.order = order
|
||||||
|
self.status = status
|
||||||
|
|
||||||
|
def __eq__(self, value: object, /) -> bool:
|
||||||
|
"""Check equality, allow StrEnum style comparisons on status."""
|
||||||
|
with suppress(AttributeError):
|
||||||
|
return self.status == cast(PullImageLayerStage, value).status
|
||||||
|
return self.status == value
|
||||||
|
|
||||||
|
def __lt__(self, other: object) -> bool:
|
||||||
|
"""Order instances."""
|
||||||
|
with suppress(AttributeError):
|
||||||
|
return self.order < cast(PullImageLayerStage, other).order
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __hash__(self) -> int:
|
||||||
|
"""Hash instance."""
|
||||||
|
return hash(self.status)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_status(cls, status: str) -> PullImageLayerStage | None:
|
||||||
|
"""Return stage instance from pull log status."""
|
||||||
|
for i in cls:
|
||||||
|
if i.status == status:
|
||||||
|
return i
|
||||||
|
|
||||||
|
# This one includes number of seconds until download so its not constant
|
||||||
|
if RE_RETRYING_DOWNLOAD_STATUS.match(status):
|
||||||
|
return cls.RETRYING_DOWNLOAD
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
ENV_TIME = "TZ"
|
ENV_TIME = "TZ"
|
||||||
ENV_TOKEN = "SUPERVISOR_TOKEN"
|
ENV_TOKEN = "SUPERVISOR_TOKEN"
|
||||||
ENV_TOKEN_OLD = "HASSIO_TOKEN"
|
ENV_TOKEN_OLD = "HASSIO_TOKEN"
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ import docker
|
|||||||
from docker.models.containers import Container
|
from docker.models.containers import Container
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
from ..bus import EventListener
|
||||||
from ..const import (
|
from ..const import (
|
||||||
ATTR_PASSWORD,
|
ATTR_PASSWORD,
|
||||||
ATTR_REGISTRY,
|
ATTR_REGISTRY,
|
||||||
@@ -34,23 +35,24 @@ from ..exceptions import (
|
|||||||
DockerError,
|
DockerError,
|
||||||
DockerHubRateLimitExceeded,
|
DockerHubRateLimitExceeded,
|
||||||
DockerJobError,
|
DockerJobError,
|
||||||
|
DockerLogOutOfOrder,
|
||||||
DockerNotFound,
|
DockerNotFound,
|
||||||
DockerRequestError,
|
DockerRequestError,
|
||||||
)
|
)
|
||||||
|
from ..jobs import SupervisorJob
|
||||||
from ..jobs.const import JOB_GROUP_DOCKER_INTERFACE, JobConcurrency
|
from ..jobs.const import JOB_GROUP_DOCKER_INTERFACE, JobConcurrency
|
||||||
from ..jobs.decorator import Job
|
from ..jobs.decorator import Job
|
||||||
from ..jobs.job_group import JobGroup
|
from ..jobs.job_group import JobGroup
|
||||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||||
from ..utils.sentry import async_capture_exception
|
from ..utils.sentry import async_capture_exception
|
||||||
from .const import DOCKER_HUB, ContainerState, RestartPolicy
|
from .const import DOCKER_HUB, ContainerState, PullImageLayerStage, RestartPolicy
|
||||||
from .manager import CommandReturn, PullLogEntry
|
from .manager import CommandReturn, PullLogEntry
|
||||||
from .monitor import DockerContainerStateEvent
|
from .monitor import DockerContainerStateEvent
|
||||||
from .pull_progress import ImagePullProgress
|
|
||||||
from .stats import DockerStats
|
from .stats import DockerStats
|
||||||
|
|
||||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
MAP_ARCH: dict[CpuArch, str] = {
|
MAP_ARCH: dict[CpuArch | str, str] = {
|
||||||
CpuArch.ARMV7: "linux/arm/v7",
|
CpuArch.ARMV7: "linux/arm/v7",
|
||||||
CpuArch.ARMHF: "linux/arm/v6",
|
CpuArch.ARMHF: "linux/arm/v6",
|
||||||
CpuArch.AARCH64: "linux/arm64",
|
CpuArch.AARCH64: "linux/arm64",
|
||||||
@@ -193,6 +195,167 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
|
|
||||||
return credentials
|
return credentials
|
||||||
|
|
||||||
|
def _process_pull_image_log( # noqa: C901
|
||||||
|
self, install_job_id: str, reference: PullLogEntry
|
||||||
|
) -> None:
|
||||||
|
"""Process events fired from a docker while pulling an image, filtered to a given job id."""
|
||||||
|
if (
|
||||||
|
reference.job_id != install_job_id
|
||||||
|
or not reference.id
|
||||||
|
or not reference.status
|
||||||
|
or not (stage := PullImageLayerStage.from_status(reference.status))
|
||||||
|
):
|
||||||
|
return
|
||||||
|
|
||||||
|
# Pulling FS Layer is our marker for a layer that needs to be downloaded and extracted. Otherwise it already exists and we can ignore
|
||||||
|
job: SupervisorJob | None = None
|
||||||
|
if stage == PullImageLayerStage.PULLING_FS_LAYER:
|
||||||
|
job = self.sys_jobs.new_job(
|
||||||
|
name="Pulling container image layer",
|
||||||
|
initial_stage=stage.status,
|
||||||
|
reference=reference.id,
|
||||||
|
parent_id=install_job_id,
|
||||||
|
internal=True,
|
||||||
|
)
|
||||||
|
job.done = False
|
||||||
|
return
|
||||||
|
|
||||||
|
# Find our sub job to update details of
|
||||||
|
for j in self.sys_jobs.jobs:
|
||||||
|
if j.parent_id == install_job_id and j.reference == reference.id:
|
||||||
|
job = j
|
||||||
|
break
|
||||||
|
|
||||||
|
# This likely only occurs if the logs came in out of sync and we got progress before the Pulling FS Layer one
|
||||||
|
if not job:
|
||||||
|
raise DockerLogOutOfOrder(
|
||||||
|
f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {install_job_id} but could not find a matching job, skipping",
|
||||||
|
_LOGGER.debug,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Hopefully these come in order but if they sometimes get out of sync, avoid accidentally going backwards
|
||||||
|
# If it happens a lot though we may need to reconsider the value of this feature
|
||||||
|
if job.done:
|
||||||
|
raise DockerLogOutOfOrder(
|
||||||
|
f"Received pull image log with status {reference.status} for job {job.uuid} but job was done, skipping",
|
||||||
|
_LOGGER.debug,
|
||||||
|
)
|
||||||
|
|
||||||
|
if job.stage and stage < PullImageLayerStage.from_status(job.stage):
|
||||||
|
raise DockerLogOutOfOrder(
|
||||||
|
f"Received pull image log with status {reference.status} for job {job.uuid} but job was already on stage {job.stage}, skipping",
|
||||||
|
_LOGGER.debug,
|
||||||
|
)
|
||||||
|
|
||||||
|
# For progress calcuation we assume downloading and extracting are each 50% of the time and others stages negligible
|
||||||
|
progress = job.progress
|
||||||
|
match stage:
|
||||||
|
case PullImageLayerStage.DOWNLOADING | PullImageLayerStage.EXTRACTING:
|
||||||
|
if (
|
||||||
|
reference.progress_detail
|
||||||
|
and reference.progress_detail.current
|
||||||
|
and reference.progress_detail.total
|
||||||
|
):
|
||||||
|
progress = 50 * (
|
||||||
|
reference.progress_detail.current
|
||||||
|
/ reference.progress_detail.total
|
||||||
|
)
|
||||||
|
if stage == PullImageLayerStage.EXTRACTING:
|
||||||
|
progress += 50
|
||||||
|
case (
|
||||||
|
PullImageLayerStage.VERIFYING_CHECKSUM
|
||||||
|
| PullImageLayerStage.DOWNLOAD_COMPLETE
|
||||||
|
):
|
||||||
|
progress = 50
|
||||||
|
case PullImageLayerStage.PULL_COMPLETE:
|
||||||
|
progress = 100
|
||||||
|
case PullImageLayerStage.RETRYING_DOWNLOAD:
|
||||||
|
progress = 0
|
||||||
|
|
||||||
|
if stage != PullImageLayerStage.RETRYING_DOWNLOAD and progress < job.progress:
|
||||||
|
raise DockerLogOutOfOrder(
|
||||||
|
f"Received pull image log with status {reference.status} for job {job.uuid} that implied progress was {progress} but current progress is {job.progress}, skipping",
|
||||||
|
_LOGGER.debug,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Our filters have all passed. Time to update the job
|
||||||
|
# Only downloading and extracting have progress details. Use that to set extra
|
||||||
|
# We'll leave it around on later stages as the total bytes may be useful after that stage
|
||||||
|
# Enforce range to prevent float drift error
|
||||||
|
progress = max(0, min(progress, 100))
|
||||||
|
if (
|
||||||
|
stage in {PullImageLayerStage.DOWNLOADING, PullImageLayerStage.EXTRACTING}
|
||||||
|
and reference.progress_detail
|
||||||
|
and reference.progress_detail.current is not None
|
||||||
|
and reference.progress_detail.total is not None
|
||||||
|
):
|
||||||
|
job.update(
|
||||||
|
progress=progress,
|
||||||
|
stage=stage.status,
|
||||||
|
extra={
|
||||||
|
"current": reference.progress_detail.current,
|
||||||
|
"total": reference.progress_detail.total,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# If we reach DOWNLOAD_COMPLETE without ever having set extra (small layers that skip
|
||||||
|
# the downloading phase), set a minimal extra so aggregate progress calculation can proceed
|
||||||
|
extra = job.extra
|
||||||
|
if stage == PullImageLayerStage.DOWNLOAD_COMPLETE and not job.extra:
|
||||||
|
extra = {"current": 1, "total": 1}
|
||||||
|
|
||||||
|
job.update(
|
||||||
|
progress=progress,
|
||||||
|
stage=stage.status,
|
||||||
|
done=stage == PullImageLayerStage.PULL_COMPLETE,
|
||||||
|
extra=None if stage == PullImageLayerStage.RETRYING_DOWNLOAD else extra,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Once we have received a progress update for every child job, start to set status of the main one
|
||||||
|
install_job = self.sys_jobs.get_job(install_job_id)
|
||||||
|
layer_jobs = [
|
||||||
|
job
|
||||||
|
for job in self.sys_jobs.jobs
|
||||||
|
if job.parent_id == install_job.uuid
|
||||||
|
and job.name == "Pulling container image layer"
|
||||||
|
]
|
||||||
|
|
||||||
|
# First set the total bytes to be downloaded/extracted on the main job
|
||||||
|
if not install_job.extra:
|
||||||
|
total = 0
|
||||||
|
for job in layer_jobs:
|
||||||
|
if not job.extra:
|
||||||
|
return
|
||||||
|
total += job.extra["total"]
|
||||||
|
install_job.extra = {"total": total}
|
||||||
|
else:
|
||||||
|
total = install_job.extra["total"]
|
||||||
|
|
||||||
|
# Then determine total progress based on progress of each sub-job, factoring in size of each compared to total
|
||||||
|
progress = 0.0
|
||||||
|
stage = PullImageLayerStage.PULL_COMPLETE
|
||||||
|
for job in layer_jobs:
|
||||||
|
if not job.extra:
|
||||||
|
return
|
||||||
|
progress += job.progress * (job.extra["total"] / total)
|
||||||
|
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
|
||||||
|
|
||||||
|
if job_stage < PullImageLayerStage.EXTRACTING:
|
||||||
|
stage = PullImageLayerStage.DOWNLOADING
|
||||||
|
elif (
|
||||||
|
stage == PullImageLayerStage.PULL_COMPLETE
|
||||||
|
and job_stage < PullImageLayerStage.PULL_COMPLETE
|
||||||
|
):
|
||||||
|
stage = PullImageLayerStage.EXTRACTING
|
||||||
|
|
||||||
|
# Ensure progress is 100 at this point to prevent float drift
|
||||||
|
if stage == PullImageLayerStage.PULL_COMPLETE:
|
||||||
|
progress = 100
|
||||||
|
|
||||||
|
# To reduce noise, limit updates to when result has changed by an entire percent or when stage changed
|
||||||
|
if stage != install_job.stage or progress >= install_job.progress + 1:
|
||||||
|
install_job.update(stage=stage.status, progress=max(0, min(progress, 100)))
|
||||||
|
|
||||||
@Job(
|
@Job(
|
||||||
name="docker_interface_install",
|
name="docker_interface_install",
|
||||||
on_condition=DockerJobError,
|
on_condition=DockerJobError,
|
||||||
@@ -211,36 +374,31 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
if not image:
|
if not image:
|
||||||
raise ValueError("Cannot pull without an image!")
|
raise ValueError("Cannot pull without an image!")
|
||||||
|
|
||||||
image_arch = arch or self.sys_arch.supervisor
|
image_arch = str(arch) if arch else self.sys_arch.supervisor
|
||||||
pull_progress = ImagePullProgress()
|
listener: EventListener | None = None
|
||||||
current_job = self.sys_jobs.current
|
|
||||||
|
|
||||||
async def process_pull_event(event: PullLogEntry) -> None:
|
|
||||||
"""Process pull event and update job progress."""
|
|
||||||
if event.job_id != current_job.uuid:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Process event through progress tracker
|
|
||||||
pull_progress.process_event(event)
|
|
||||||
|
|
||||||
# Update job if progress changed significantly (>= 1%)
|
|
||||||
should_update, progress = pull_progress.should_update_job()
|
|
||||||
if should_update:
|
|
||||||
stage = pull_progress.get_stage()
|
|
||||||
current_job.update(progress=progress, stage=stage)
|
|
||||||
|
|
||||||
listener = self.sys_bus.register_event(
|
|
||||||
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_event
|
|
||||||
)
|
|
||||||
|
|
||||||
_LOGGER.info("Downloading docker image %s with tag %s.", image, version)
|
_LOGGER.info("Downloading docker image %s with tag %s.", image, version)
|
||||||
try:
|
try:
|
||||||
# Get credentials for private registries to pass to aiodocker
|
# Get credentials for private registries to pass to aiodocker
|
||||||
credentials = self._get_credentials(image) or None
|
credentials = self._get_credentials(image) or None
|
||||||
|
|
||||||
|
curr_job_id = self.sys_jobs.current.uuid
|
||||||
|
|
||||||
|
async def process_pull_image_log(reference: PullLogEntry) -> None:
|
||||||
|
try:
|
||||||
|
self._process_pull_image_log(curr_job_id, reference)
|
||||||
|
except DockerLogOutOfOrder as err:
|
||||||
|
# Send all these to sentry. Missing a few progress updates
|
||||||
|
# shouldn't matter to users but matters to us
|
||||||
|
await async_capture_exception(err)
|
||||||
|
|
||||||
|
listener = self.sys_bus.register_event(
|
||||||
|
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_image_log
|
||||||
|
)
|
||||||
|
|
||||||
# Pull new image, passing credentials to aiodocker
|
# Pull new image, passing credentials to aiodocker
|
||||||
docker_image = await self.sys_docker.pull_image(
|
docker_image = await self.sys_docker.pull_image(
|
||||||
current_job.uuid,
|
self.sys_jobs.current.uuid,
|
||||||
image,
|
image,
|
||||||
str(version),
|
str(version),
|
||||||
platform=MAP_ARCH[image_arch],
|
platform=MAP_ARCH[image_arch],
|
||||||
@@ -288,7 +446,8 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
f"Unknown error with {image}:{version!s} -> {err!s}", _LOGGER.error
|
f"Unknown error with {image}:{version!s} -> {err!s}", _LOGGER.error
|
||||||
) from err
|
) from err
|
||||||
finally:
|
finally:
|
||||||
self.sys_bus.remove_listener(listener)
|
if listener:
|
||||||
|
self.sys_bus.remove_listener(listener)
|
||||||
|
|
||||||
self._meta = docker_image
|
self._meta = docker_image
|
||||||
|
|
||||||
@@ -452,7 +611,9 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
expected_cpu_arch: CpuArch | None = None,
|
expected_cpu_arch: CpuArch | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Check we have expected image with correct arch."""
|
"""Check we have expected image with correct arch."""
|
||||||
arch = expected_cpu_arch or self.sys_arch.supervisor
|
expected_image_cpu_arch = (
|
||||||
|
str(expected_cpu_arch) if expected_cpu_arch else self.sys_arch.supervisor
|
||||||
|
)
|
||||||
image_name = f"{expected_image}:{version!s}"
|
image_name = f"{expected_image}:{version!s}"
|
||||||
if self.image == expected_image:
|
if self.image == expected_image:
|
||||||
try:
|
try:
|
||||||
@@ -470,7 +631,7 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
# If we have an image and its the right arch, all set
|
# If we have an image and its the right arch, all set
|
||||||
# It seems that newer Docker version return a variant for arm64 images.
|
# It seems that newer Docker version return a variant for arm64 images.
|
||||||
# Make sure we match linux/arm64 and linux/arm64/v8.
|
# Make sure we match linux/arm64 and linux/arm64/v8.
|
||||||
expected_image_arch = MAP_ARCH[arch]
|
expected_image_arch = MAP_ARCH[expected_image_cpu_arch]
|
||||||
if image_arch.startswith(expected_image_arch):
|
if image_arch.startswith(expected_image_arch):
|
||||||
return
|
return
|
||||||
_LOGGER.info(
|
_LOGGER.info(
|
||||||
@@ -483,7 +644,7 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
# We're missing the image we need. Stop and clean up what we have then pull the right one
|
# We're missing the image we need. Stop and clean up what we have then pull the right one
|
||||||
with suppress(DockerError):
|
with suppress(DockerError):
|
||||||
await self.remove()
|
await self.remove()
|
||||||
await self.install(version, expected_image, arch=arch)
|
await self.install(version, expected_image, arch=expected_image_cpu_arch)
|
||||||
|
|
||||||
@Job(
|
@Job(
|
||||||
name="docker_interface_update",
|
name="docker_interface_update",
|
||||||
|
|||||||
@@ -111,15 +111,10 @@ class PullProgressDetail:
|
|||||||
"""Progress detail information for pull.
|
"""Progress detail information for pull.
|
||||||
|
|
||||||
Documentation lacking but both of these seem to be in bytes when populated.
|
Documentation lacking but both of these seem to be in bytes when populated.
|
||||||
|
|
||||||
Containerd-snapshot update - When leveraging this new feature, this information
|
|
||||||
becomes useless to us while extracting. It simply tells elapsed time using
|
|
||||||
current and units.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
current: int | None = None
|
current: int | None = None
|
||||||
total: int | None = None
|
total: int | None = None
|
||||||
units: str | None = None
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_pull_log_dict(cls, value: dict[str, int]) -> PullProgressDetail:
|
def from_pull_log_dict(cls, value: dict[str, int]) -> PullProgressDetail:
|
||||||
@@ -708,8 +703,7 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
raise DockerError(f"Container {name} is not running", _LOGGER.error)
|
raise DockerError(f"Container {name} is not running", _LOGGER.error)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# When stream=False, stats() returns dict, not Iterator
|
return docker_container.stats(stream=False)
|
||||||
return cast(dict[str, Any], docker_container.stats(stream=False))
|
|
||||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Can't read stats from {name}: {err}", _LOGGER.error
|
f"Can't read stats from {name}: {err}", _LOGGER.error
|
||||||
|
|||||||
@@ -7,8 +7,6 @@ import logging
|
|||||||
from typing import Self, cast
|
from typing import Self, cast
|
||||||
|
|
||||||
import docker
|
import docker
|
||||||
from docker.models.containers import Container
|
|
||||||
from docker.models.networks import Network
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ..const import (
|
from ..const import (
|
||||||
@@ -61,7 +59,7 @@ class DockerNetwork:
|
|||||||
def __init__(self, docker_client: docker.DockerClient):
|
def __init__(self, docker_client: docker.DockerClient):
|
||||||
"""Initialize internal Supervisor network."""
|
"""Initialize internal Supervisor network."""
|
||||||
self.docker: docker.DockerClient = docker_client
|
self.docker: docker.DockerClient = docker_client
|
||||||
self._network: Network
|
self._network: docker.models.networks.Network
|
||||||
|
|
||||||
async def post_init(
|
async def post_init(
|
||||||
self, enable_ipv6: bool | None = None, mtu: int | None = None
|
self, enable_ipv6: bool | None = None, mtu: int | None = None
|
||||||
@@ -78,7 +76,7 @@ class DockerNetwork:
|
|||||||
return DOCKER_NETWORK
|
return DOCKER_NETWORK
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def network(self) -> Network:
|
def network(self) -> docker.models.networks.Network:
|
||||||
"""Return docker network."""
|
"""Return docker network."""
|
||||||
return self._network
|
return self._network
|
||||||
|
|
||||||
@@ -119,7 +117,7 @@ class DockerNetwork:
|
|||||||
|
|
||||||
def _get_network(
|
def _get_network(
|
||||||
self, enable_ipv6: bool | None = None, mtu: int | None = None
|
self, enable_ipv6: bool | None = None, mtu: int | None = None
|
||||||
) -> Network:
|
) -> docker.models.networks.Network:
|
||||||
"""Get supervisor network."""
|
"""Get supervisor network."""
|
||||||
try:
|
try:
|
||||||
if network := self.docker.networks.get(DOCKER_NETWORK):
|
if network := self.docker.networks.get(DOCKER_NETWORK):
|
||||||
@@ -220,7 +218,7 @@ class DockerNetwork:
|
|||||||
|
|
||||||
def attach_container(
|
def attach_container(
|
||||||
self,
|
self,
|
||||||
container: Container,
|
container: docker.models.containers.Container,
|
||||||
alias: list[str] | None = None,
|
alias: list[str] | None = None,
|
||||||
ipv4: IPv4Address | None = None,
|
ipv4: IPv4Address | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
@@ -277,7 +275,9 @@ class DockerNetwork:
|
|||||||
if container.id not in self.containers:
|
if container.id not in self.containers:
|
||||||
self.attach_container(container, alias, ipv4)
|
self.attach_container(container, alias, ipv4)
|
||||||
|
|
||||||
def detach_default_bridge(self, container: Container) -> None:
|
def detach_default_bridge(
|
||||||
|
self, container: docker.models.containers.Container
|
||||||
|
) -> None:
|
||||||
"""Detach default Docker bridge.
|
"""Detach default Docker bridge.
|
||||||
|
|
||||||
Need run inside executor.
|
Need run inside executor.
|
||||||
|
|||||||
@@ -1,316 +0,0 @@
|
|||||||
"""Image pull progress tracking."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from contextlib import suppress
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
from enum import Enum
|
|
||||||
import logging
|
|
||||||
from typing import TYPE_CHECKING, cast
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from .manager import PullLogEntry
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Progress weight distribution: 70% downloading, 30% extraction
|
|
||||||
DOWNLOAD_WEIGHT = 70.0
|
|
||||||
EXTRACT_WEIGHT = 30.0
|
|
||||||
|
|
||||||
|
|
||||||
class LayerPullStatus(Enum):
|
|
||||||
"""Status values for pulling an image layer.
|
|
||||||
|
|
||||||
These are a subset of the statuses in a docker pull image log.
|
|
||||||
The order field allows comparing which stage is further along.
|
|
||||||
"""
|
|
||||||
|
|
||||||
PULLING_FS_LAYER = 1, "Pulling fs layer"
|
|
||||||
WAITING = 1, "Waiting"
|
|
||||||
RETRYING = 2, "Retrying" # Matches "Retrying in N seconds"
|
|
||||||
DOWNLOADING = 3, "Downloading"
|
|
||||||
VERIFYING_CHECKSUM = 4, "Verifying Checksum"
|
|
||||||
DOWNLOAD_COMPLETE = 5, "Download complete"
|
|
||||||
EXTRACTING = 6, "Extracting"
|
|
||||||
PULL_COMPLETE = 7, "Pull complete"
|
|
||||||
ALREADY_EXISTS = 7, "Already exists"
|
|
||||||
|
|
||||||
def __init__(self, order: int, status: str) -> None:
|
|
||||||
"""Set fields from values."""
|
|
||||||
self.order = order
|
|
||||||
self.status = status
|
|
||||||
|
|
||||||
def __eq__(self, value: object, /) -> bool:
|
|
||||||
"""Check equality, allow string comparisons on status."""
|
|
||||||
with suppress(AttributeError):
|
|
||||||
return self.status == cast(LayerPullStatus, value).status
|
|
||||||
return self.status == value
|
|
||||||
|
|
||||||
def __hash__(self) -> int:
|
|
||||||
"""Return hash based on status string."""
|
|
||||||
return hash(self.status)
|
|
||||||
|
|
||||||
def __lt__(self, other: object) -> bool:
|
|
||||||
"""Order instances by stage progression."""
|
|
||||||
with suppress(AttributeError):
|
|
||||||
return self.order < cast(LayerPullStatus, other).order
|
|
||||||
return False
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_status(cls, status: str) -> LayerPullStatus | None:
|
|
||||||
"""Get enum from status string, or None if not recognized."""
|
|
||||||
# Handle "Retrying in N seconds" pattern
|
|
||||||
if status.startswith("Retrying in "):
|
|
||||||
return cls.RETRYING
|
|
||||||
for member in cls:
|
|
||||||
if member.status == status:
|
|
||||||
return member
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class LayerProgress:
|
|
||||||
"""Track progress of a single layer."""
|
|
||||||
|
|
||||||
layer_id: str
|
|
||||||
total_size: int = 0 # Size in bytes (from downloading, reused for extraction)
|
|
||||||
download_current: int = 0
|
|
||||||
extract_current: int = 0 # Extraction progress in bytes (overlay2 only)
|
|
||||||
download_complete: bool = False
|
|
||||||
extract_complete: bool = False
|
|
||||||
already_exists: bool = False # Layer was already locally available
|
|
||||||
|
|
||||||
def calculate_progress(self) -> float:
|
|
||||||
"""Calculate layer progress 0-100.
|
|
||||||
|
|
||||||
Progress is weighted: 70% download, 30% extraction.
|
|
||||||
For overlay2, we have byte-based extraction progress.
|
|
||||||
For containerd, extraction jumps from 70% to 100% on completion.
|
|
||||||
"""
|
|
||||||
if self.already_exists or self.extract_complete:
|
|
||||||
return 100.0
|
|
||||||
|
|
||||||
if self.download_complete:
|
|
||||||
# Check if we have extraction progress (overlay2)
|
|
||||||
if self.extract_current > 0 and self.total_size > 0:
|
|
||||||
extract_pct = min(1.0, self.extract_current / self.total_size)
|
|
||||||
return DOWNLOAD_WEIGHT + (extract_pct * EXTRACT_WEIGHT)
|
|
||||||
# No extraction progress yet - return 70%
|
|
||||||
return DOWNLOAD_WEIGHT
|
|
||||||
|
|
||||||
if self.total_size > 0:
|
|
||||||
download_pct = min(1.0, self.download_current / self.total_size)
|
|
||||||
return download_pct * DOWNLOAD_WEIGHT
|
|
||||||
|
|
||||||
return 0.0
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ImagePullProgress:
|
|
||||||
"""Track overall progress of pulling an image.
|
|
||||||
|
|
||||||
Uses count-based progress where each layer contributes equally regardless of size.
|
|
||||||
This avoids progress regression when large layers are discovered late due to
|
|
||||||
Docker's rate-limiting of concurrent downloads.
|
|
||||||
|
|
||||||
Progress is only reported after the first "Downloading" event, since Docker
|
|
||||||
sends "Already exists" and "Pulling fs layer" events before we know the full
|
|
||||||
layer count.
|
|
||||||
"""
|
|
||||||
|
|
||||||
layers: dict[str, LayerProgress] = field(default_factory=dict)
|
|
||||||
_last_reported_progress: float = field(default=0.0, repr=False)
|
|
||||||
_seen_downloading: bool = field(default=False, repr=False)
|
|
||||||
|
|
||||||
def get_or_create_layer(self, layer_id: str) -> LayerProgress:
|
|
||||||
"""Get existing layer or create new one."""
|
|
||||||
if layer_id not in self.layers:
|
|
||||||
self.layers[layer_id] = LayerProgress(layer_id=layer_id)
|
|
||||||
return self.layers[layer_id]
|
|
||||||
|
|
||||||
def process_event(self, entry: PullLogEntry) -> None:
|
|
||||||
"""Process a pull log event and update layer state."""
|
|
||||||
# Skip events without layer ID or status
|
|
||||||
if not entry.id or not entry.status:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Skip metadata events that aren't layer-specific
|
|
||||||
# "Pulling from X" has id=tag but isn't a layer
|
|
||||||
if entry.status.startswith("Pulling from "):
|
|
||||||
return
|
|
||||||
|
|
||||||
# Parse status to enum (returns None for unrecognized statuses)
|
|
||||||
status = LayerPullStatus.from_status(entry.status)
|
|
||||||
if status is None:
|
|
||||||
return
|
|
||||||
|
|
||||||
layer = self.get_or_create_layer(entry.id)
|
|
||||||
|
|
||||||
# Handle "Already exists" - layer is locally available
|
|
||||||
if status is LayerPullStatus.ALREADY_EXISTS:
|
|
||||||
layer.already_exists = True
|
|
||||||
layer.download_complete = True
|
|
||||||
layer.extract_complete = True
|
|
||||||
return
|
|
||||||
|
|
||||||
# Handle "Pulling fs layer" / "Waiting" - layer is being tracked
|
|
||||||
if status in (LayerPullStatus.PULLING_FS_LAYER, LayerPullStatus.WAITING):
|
|
||||||
return
|
|
||||||
|
|
||||||
# Handle "Downloading" - update download progress
|
|
||||||
if status is LayerPullStatus.DOWNLOADING:
|
|
||||||
# Mark that we've seen downloading - now we know layer count is complete
|
|
||||||
self._seen_downloading = True
|
|
||||||
if (
|
|
||||||
entry.progress_detail
|
|
||||||
and entry.progress_detail.current is not None
|
|
||||||
and entry.progress_detail.total is not None
|
|
||||||
):
|
|
||||||
layer.download_current = entry.progress_detail.current
|
|
||||||
# Only set total_size if not already set or if this is larger
|
|
||||||
# (handles case where total changes during download)
|
|
||||||
layer.total_size = max(layer.total_size, entry.progress_detail.total)
|
|
||||||
return
|
|
||||||
|
|
||||||
# Handle "Verifying Checksum" - download is essentially complete
|
|
||||||
if status is LayerPullStatus.VERIFYING_CHECKSUM:
|
|
||||||
if layer.total_size > 0:
|
|
||||||
layer.download_current = layer.total_size
|
|
||||||
return
|
|
||||||
|
|
||||||
# Handle "Download complete" - download phase done
|
|
||||||
if status is LayerPullStatus.DOWNLOAD_COMPLETE:
|
|
||||||
layer.download_complete = True
|
|
||||||
if layer.total_size > 0:
|
|
||||||
layer.download_current = layer.total_size
|
|
||||||
elif layer.total_size == 0:
|
|
||||||
# Small layer that skipped downloading phase
|
|
||||||
# Set minimal size so it doesn't distort weighted average
|
|
||||||
layer.total_size = 1
|
|
||||||
layer.download_current = 1
|
|
||||||
return
|
|
||||||
|
|
||||||
# Handle "Extracting" - extraction in progress
|
|
||||||
if status is LayerPullStatus.EXTRACTING:
|
|
||||||
# For overlay2: progressDetail has {current, total} in bytes
|
|
||||||
# For containerd: progressDetail has {current, units: "s"} (time elapsed)
|
|
||||||
# We can only use byte-based progress (overlay2)
|
|
||||||
layer.download_complete = True
|
|
||||||
if layer.total_size > 0:
|
|
||||||
layer.download_current = layer.total_size
|
|
||||||
|
|
||||||
# Check if this is byte-based extraction progress (overlay2)
|
|
||||||
# Overlay2 has {current, total} in bytes, no units field
|
|
||||||
# Containerd has {current, units: "s"} which is useless for progress
|
|
||||||
if (
|
|
||||||
entry.progress_detail
|
|
||||||
and entry.progress_detail.current is not None
|
|
||||||
and entry.progress_detail.units is None
|
|
||||||
):
|
|
||||||
# Use layer's total_size from downloading phase (doesn't change)
|
|
||||||
layer.extract_current = entry.progress_detail.current
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Layer %s extracting: %d/%d (%.1f%%)",
|
|
||||||
layer.layer_id,
|
|
||||||
layer.extract_current,
|
|
||||||
layer.total_size,
|
|
||||||
(layer.extract_current / layer.total_size * 100)
|
|
||||||
if layer.total_size > 0
|
|
||||||
else 0,
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
# Handle "Pull complete" - layer is fully done
|
|
||||||
if status is LayerPullStatus.PULL_COMPLETE:
|
|
||||||
layer.download_complete = True
|
|
||||||
layer.extract_complete = True
|
|
||||||
if layer.total_size > 0:
|
|
||||||
layer.download_current = layer.total_size
|
|
||||||
return
|
|
||||||
|
|
||||||
# Handle "Retrying in N seconds" - reset download progress
|
|
||||||
if status is LayerPullStatus.RETRYING:
|
|
||||||
layer.download_current = 0
|
|
||||||
layer.download_complete = False
|
|
||||||
return
|
|
||||||
|
|
||||||
def calculate_progress(self) -> float:
|
|
||||||
"""Calculate overall progress 0-100.
|
|
||||||
|
|
||||||
Uses count-based progress where each layer that needs pulling contributes
|
|
||||||
equally. Layers that already exist locally are excluded from the calculation.
|
|
||||||
|
|
||||||
Returns 0 until we've seen the first "Downloading" event, since Docker
|
|
||||||
reports "Already exists" and "Pulling fs layer" events before we know
|
|
||||||
the complete layer count.
|
|
||||||
"""
|
|
||||||
# Don't report progress until we've seen downloading start
|
|
||||||
# This ensures we know the full layer count before calculating progress
|
|
||||||
if not self._seen_downloading or not self.layers:
|
|
||||||
return 0.0
|
|
||||||
|
|
||||||
# Only count layers that need pulling (exclude already_exists)
|
|
||||||
layers_to_pull = [
|
|
||||||
layer for layer in self.layers.values() if not layer.already_exists
|
|
||||||
]
|
|
||||||
|
|
||||||
if not layers_to_pull:
|
|
||||||
# All layers already exist, nothing to download
|
|
||||||
return 100.0
|
|
||||||
|
|
||||||
# Each layer contributes equally: sum of layer progresses / total layers
|
|
||||||
total_progress = sum(layer.calculate_progress() for layer in layers_to_pull)
|
|
||||||
return total_progress / len(layers_to_pull)
|
|
||||||
|
|
||||||
def get_stage(self) -> str | None:
|
|
||||||
"""Get current stage based on layer states."""
|
|
||||||
if not self.layers:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Check if any layer is still downloading
|
|
||||||
for layer in self.layers.values():
|
|
||||||
if layer.already_exists:
|
|
||||||
continue
|
|
||||||
if not layer.download_complete:
|
|
||||||
return "Downloading"
|
|
||||||
|
|
||||||
# All downloads complete, check if extracting
|
|
||||||
for layer in self.layers.values():
|
|
||||||
if layer.already_exists:
|
|
||||||
continue
|
|
||||||
if not layer.extract_complete:
|
|
||||||
return "Extracting"
|
|
||||||
|
|
||||||
# All done
|
|
||||||
return "Pull complete"
|
|
||||||
|
|
||||||
def should_update_job(self, threshold: float = 1.0) -> tuple[bool, float]:
|
|
||||||
"""Check if job should be updated based on progress change.
|
|
||||||
|
|
||||||
Returns (should_update, current_progress).
|
|
||||||
Updates are triggered when progress changes by at least threshold%.
|
|
||||||
Progress is guaranteed to only increase (monotonic).
|
|
||||||
"""
|
|
||||||
current_progress = self.calculate_progress()
|
|
||||||
|
|
||||||
# Ensure monotonic progress - never report a decrease
|
|
||||||
# This can happen when new layers get size info and change the weighted average
|
|
||||||
if current_progress < self._last_reported_progress:
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Progress decreased from %.1f%% to %.1f%%, keeping last reported",
|
|
||||||
self._last_reported_progress,
|
|
||||||
current_progress,
|
|
||||||
)
|
|
||||||
return False, self._last_reported_progress
|
|
||||||
|
|
||||||
if current_progress >= self._last_reported_progress + threshold:
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Progress update: %.1f%% -> %.1f%% (delta: %.1f%%)",
|
|
||||||
self._last_reported_progress,
|
|
||||||
current_progress,
|
|
||||||
current_progress - self._last_reported_progress,
|
|
||||||
)
|
|
||||||
self._last_reported_progress = current_progress
|
|
||||||
return True, current_progress
|
|
||||||
|
|
||||||
return False, self._last_reported_progress
|
|
||||||
@@ -632,6 +632,10 @@ class DockerNotFound(DockerError):
|
|||||||
"""Docker object don't Exists."""
|
"""Docker object don't Exists."""
|
||||||
|
|
||||||
|
|
||||||
|
class DockerLogOutOfOrder(DockerError):
|
||||||
|
"""Raise when log from docker action was out of order."""
|
||||||
|
|
||||||
|
|
||||||
class DockerNoSpaceOnDevice(DockerError):
|
class DockerNoSpaceOnDevice(DockerError):
|
||||||
"""Raise if a docker pull fails due to available space."""
|
"""Raise if a docker pull fails due to available space."""
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
"""A collection of tasks."""
|
"""A collection of tasks."""
|
||||||
|
|
||||||
from contextlib import suppress
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
import logging
|
import logging
|
||||||
from typing import cast
|
from typing import cast
|
||||||
@@ -14,7 +13,6 @@ from ..exceptions import (
|
|||||||
BackupFileNotFoundError,
|
BackupFileNotFoundError,
|
||||||
HomeAssistantError,
|
HomeAssistantError,
|
||||||
ObserverError,
|
ObserverError,
|
||||||
SupervisorUpdateError,
|
|
||||||
)
|
)
|
||||||
from ..homeassistant.const import LANDINGPAGE, WSType
|
from ..homeassistant.const import LANDINGPAGE, WSType
|
||||||
from ..jobs.const import JobConcurrency
|
from ..jobs.const import JobConcurrency
|
||||||
@@ -176,11 +174,7 @@ class Tasks(CoreSysAttributes):
|
|||||||
"Found new Supervisor version %s, updating",
|
"Found new Supervisor version %s, updating",
|
||||||
self.sys_supervisor.latest_version,
|
self.sys_supervisor.latest_version,
|
||||||
)
|
)
|
||||||
|
await self.sys_supervisor.update()
|
||||||
# Errors are logged by the exceptions, we can't really do something
|
|
||||||
# if an update fails here.
|
|
||||||
with suppress(SupervisorUpdateError):
|
|
||||||
await self.sys_supervisor.update()
|
|
||||||
|
|
||||||
async def _watchdog_homeassistant_api(self):
|
async def _watchdog_homeassistant_api(self):
|
||||||
"""Create scheduler task for monitoring running state of API.
|
"""Create scheduler task for monitoring running state of API.
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from awesomeversion import AwesomeVersion
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from supervisor.addons.addon import Addon
|
from supervisor.addons.addon import Addon
|
||||||
from supervisor.arch import CpuArchManager
|
from supervisor.arch import CpuArch
|
||||||
from supervisor.config import CoreConfig
|
from supervisor.config import CoreConfig
|
||||||
from supervisor.const import AddonBoot, AddonStartup, AddonState, BusEvent
|
from supervisor.const import AddonBoot, AddonStartup, AddonState, BusEvent
|
||||||
from supervisor.coresys import CoreSys
|
from supervisor.coresys import CoreSys
|
||||||
@@ -54,9 +54,7 @@ async def fixture_mock_arch_disk() -> AsyncGenerator[None]:
|
|||||||
"""Mock supported arch and disk space."""
|
"""Mock supported arch and disk space."""
|
||||||
with (
|
with (
|
||||||
patch("shutil.disk_usage", return_value=(42, 42, 2 * (1024.0**3))),
|
patch("shutil.disk_usage", return_value=(42, 42, 2 * (1024.0**3))),
|
||||||
patch.object(
|
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
|
||||||
),
|
|
||||||
):
|
):
|
||||||
yield
|
yield
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import pytest
|
|||||||
|
|
||||||
from supervisor.addons.addon import Addon
|
from supervisor.addons.addon import Addon
|
||||||
from supervisor.addons.build import AddonBuild
|
from supervisor.addons.build import AddonBuild
|
||||||
from supervisor.arch import CpuArchManager
|
from supervisor.arch import CpuArch
|
||||||
from supervisor.const import AddonState
|
from supervisor.const import AddonState
|
||||||
from supervisor.coresys import CoreSys
|
from supervisor.coresys import CoreSys
|
||||||
from supervisor.docker.addon import DockerAddon
|
from supervisor.docker.addon import DockerAddon
|
||||||
@@ -236,9 +236,7 @@ async def test_api_addon_rebuild_healthcheck(
|
|||||||
patch.object(AddonBuild, "is_valid", return_value=True),
|
patch.object(AddonBuild, "is_valid", return_value=True),
|
||||||
patch.object(DockerAddon, "is_running", return_value=False),
|
patch.object(DockerAddon, "is_running", return_value=False),
|
||||||
patch.object(Addon, "need_build", new=PropertyMock(return_value=True)),
|
patch.object(Addon, "need_build", new=PropertyMock(return_value=True)),
|
||||||
patch.object(
|
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
|
||||||
),
|
|
||||||
patch.object(DockerAddon, "run", new=container_events_task),
|
patch.object(DockerAddon, "run", new=container_events_task),
|
||||||
patch.object(
|
patch.object(
|
||||||
coresys.docker,
|
coresys.docker,
|
||||||
@@ -310,9 +308,7 @@ async def test_api_addon_rebuild_force(
|
|||||||
patch.object(
|
patch.object(
|
||||||
Addon, "need_build", new=PropertyMock(return_value=False)
|
Addon, "need_build", new=PropertyMock(return_value=False)
|
||||||
), # Image-based
|
), # Image-based
|
||||||
patch.object(
|
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
|
||||||
),
|
|
||||||
):
|
):
|
||||||
resp = await api_client.post("/addons/local_ssh/rebuild")
|
resp = await api_client.post("/addons/local_ssh/rebuild")
|
||||||
|
|
||||||
@@ -330,9 +326,7 @@ async def test_api_addon_rebuild_force(
|
|||||||
patch.object(
|
patch.object(
|
||||||
Addon, "need_build", new=PropertyMock(return_value=False)
|
Addon, "need_build", new=PropertyMock(return_value=False)
|
||||||
), # Image-based
|
), # Image-based
|
||||||
patch.object(
|
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
|
||||||
),
|
|
||||||
patch.object(DockerAddon, "run", new=container_events_task),
|
patch.object(DockerAddon, "run", new=container_events_task),
|
||||||
patch.object(
|
patch.object(
|
||||||
coresys.docker,
|
coresys.docker,
|
||||||
|
|||||||
@@ -4,11 +4,6 @@ from aiohttp.test_utils import TestClient
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from supervisor.coresys import CoreSys
|
from supervisor.coresys import CoreSys
|
||||||
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
|
||||||
from supervisor.resolution.data import Issue, Suggestion
|
|
||||||
|
|
||||||
from tests.dbus_service_mocks.agent_system import System as SystemService
|
|
||||||
from tests.dbus_service_mocks.base import DBusServiceMock
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@@ -89,79 +84,3 @@ async def test_registry_not_found(api_client: TestClient):
|
|||||||
assert resp.status == 404
|
assert resp.status == 404
|
||||||
body = await resp.json()
|
body = await resp.json()
|
||||||
assert body["message"] == "Hostname bad does not exist in registries"
|
assert body["message"] == "Hostname bad does not exist in registries"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
|
|
||||||
async def test_api_migrate_docker_storage_driver(
|
|
||||||
api_client: TestClient,
|
|
||||||
coresys: CoreSys,
|
|
||||||
os_agent_services: dict[str, DBusServiceMock],
|
|
||||||
os_available,
|
|
||||||
):
|
|
||||||
"""Test Docker storage driver migration."""
|
|
||||||
system_service: SystemService = os_agent_services["agent_system"]
|
|
||||||
system_service.MigrateDockerStorageDriver.calls.clear()
|
|
||||||
|
|
||||||
resp = await api_client.post(
|
|
||||||
"/docker/migrate-storage-driver",
|
|
||||||
json={"storage_driver": "overlayfs"},
|
|
||||||
)
|
|
||||||
assert resp.status == 200
|
|
||||||
|
|
||||||
assert system_service.MigrateDockerStorageDriver.calls == [("overlayfs",)]
|
|
||||||
assert (
|
|
||||||
Issue(IssueType.REBOOT_REQUIRED, ContextType.SYSTEM)
|
|
||||||
in coresys.resolution.issues
|
|
||||||
)
|
|
||||||
assert (
|
|
||||||
Suggestion(SuggestionType.EXECUTE_REBOOT, ContextType.SYSTEM)
|
|
||||||
in coresys.resolution.suggestions
|
|
||||||
)
|
|
||||||
|
|
||||||
# Test migration back to overlay2 (graph driver)
|
|
||||||
system_service.MigrateDockerStorageDriver.calls.clear()
|
|
||||||
resp = await api_client.post(
|
|
||||||
"/docker/migrate-storage-driver",
|
|
||||||
json={"storage_driver": "overlay2"},
|
|
||||||
)
|
|
||||||
assert resp.status == 200
|
|
||||||
assert system_service.MigrateDockerStorageDriver.calls == [("overlay2",)]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
|
|
||||||
async def test_api_migrate_docker_storage_driver_invalid_backend(
|
|
||||||
api_client: TestClient,
|
|
||||||
os_available,
|
|
||||||
):
|
|
||||||
"""Test 400 is returned for invalid storage driver."""
|
|
||||||
resp = await api_client.post(
|
|
||||||
"/docker/migrate-storage-driver",
|
|
||||||
json={"storage_driver": "invalid"},
|
|
||||||
)
|
|
||||||
assert resp.status == 400
|
|
||||||
|
|
||||||
|
|
||||||
async def test_api_migrate_docker_storage_driver_not_os(
|
|
||||||
api_client: TestClient,
|
|
||||||
coresys: CoreSys,
|
|
||||||
):
|
|
||||||
"""Test 404 is returned if not running on HAOS."""
|
|
||||||
resp = await api_client.post(
|
|
||||||
"/docker/migrate-storage-driver",
|
|
||||||
json={"storage_driver": "overlayfs"},
|
|
||||||
)
|
|
||||||
assert resp.status == 404
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("os_available", ["16.2"], indirect=True)
|
|
||||||
async def test_api_migrate_docker_storage_driver_old_os(
|
|
||||||
api_client: TestClient,
|
|
||||||
coresys: CoreSys,
|
|
||||||
os_available,
|
|
||||||
):
|
|
||||||
"""Test 404 is returned if OS is older than 17.0."""
|
|
||||||
resp = await api_client.post(
|
|
||||||
"/docker/migrate-storage-driver",
|
|
||||||
json={"storage_driver": "overlayfs"},
|
|
||||||
)
|
|
||||||
assert resp.status == 404
|
|
||||||
|
|||||||
@@ -305,8 +305,6 @@ async def test_api_progress_updates_home_assistant_update(
|
|||||||
and evt.args[0]["data"]["event"] == WSEvent.JOB
|
and evt.args[0]["data"]["event"] == WSEvent.JOB
|
||||||
and evt.args[0]["data"]["data"]["name"] == "home_assistant_core_update"
|
and evt.args[0]["data"]["data"]["name"] == "home_assistant_core_update"
|
||||||
]
|
]
|
||||||
# Count-based progress: 2 layers need pulling (each worth 50%)
|
|
||||||
# Layers that already exist are excluded from progress calculation
|
|
||||||
assert events[:5] == [
|
assert events[:5] == [
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
@@ -320,34 +318,34 @@ async def test_api_progress_updates_home_assistant_update(
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 9.2,
|
"progress": 0.1,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 25.6,
|
"progress": 1.2,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 35.4,
|
"progress": 2.8,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
assert events[-5:] == [
|
assert events[-5:] == [
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 95.5,
|
"progress": 97.2,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 96.9,
|
"progress": 98.4,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 98.2,
|
"progress": 99.4,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from awesomeversion import AwesomeVersion
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from supervisor.addons.addon import Addon
|
from supervisor.addons.addon import Addon
|
||||||
from supervisor.arch import CpuArchManager
|
from supervisor.arch import CpuArch
|
||||||
from supervisor.backups.manager import BackupManager
|
from supervisor.backups.manager import BackupManager
|
||||||
from supervisor.config import CoreConfig
|
from supervisor.config import CoreConfig
|
||||||
from supervisor.const import AddonState, CoreState
|
from supervisor.const import AddonState, CoreState
|
||||||
@@ -191,9 +191,7 @@ async def test_api_store_update_healthcheck(
|
|||||||
patch.object(DockerAddon, "run", new=container_events_task),
|
patch.object(DockerAddon, "run", new=container_events_task),
|
||||||
patch.object(DockerInterface, "install"),
|
patch.object(DockerInterface, "install"),
|
||||||
patch.object(DockerAddon, "is_running", return_value=False),
|
patch.object(DockerAddon, "is_running", return_value=False),
|
||||||
patch.object(
|
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
|
||||||
),
|
|
||||||
):
|
):
|
||||||
resp = await api_client.post(f"/store/addons/{TEST_ADDON_SLUG}/update")
|
resp = await api_client.post(f"/store/addons/{TEST_ADDON_SLUG}/update")
|
||||||
|
|
||||||
@@ -550,9 +548,7 @@ async def test_api_store_addons_addon_availability_arch_not_supported(
|
|||||||
coresys.addons.data.user[addon_obj.slug] = {"version": AwesomeVersion("0.0.1")}
|
coresys.addons.data.user[addon_obj.slug] = {"version": AwesomeVersion("0.0.1")}
|
||||||
|
|
||||||
# Mock the system architecture to be different
|
# Mock the system architecture to be different
|
||||||
with patch.object(
|
with patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])):
|
||||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
|
||||||
):
|
|
||||||
resp = await api_client.request(
|
resp = await api_client.request(
|
||||||
api_method, f"/store/addons/{addon_obj.slug}/{api_action}"
|
api_method, f"/store/addons/{addon_obj.slug}/{api_action}"
|
||||||
)
|
)
|
||||||
@@ -764,8 +760,6 @@ async def test_api_progress_updates_addon_install_update(
|
|||||||
and evt.args[0]["data"]["data"]["name"] == job_name
|
and evt.args[0]["data"]["data"]["name"] == job_name
|
||||||
and evt.args[0]["data"]["data"]["reference"] == addon_slug
|
and evt.args[0]["data"]["data"]["reference"] == addon_slug
|
||||||
]
|
]
|
||||||
# Count-based progress: 2 layers need pulling (each worth 50%)
|
|
||||||
# Layers that already exist are excluded from progress calculation
|
|
||||||
assert events[:4] == [
|
assert events[:4] == [
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
@@ -774,34 +768,34 @@ async def test_api_progress_updates_addon_install_update(
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 9.2,
|
"progress": 0.1,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 25.6,
|
"progress": 1.2,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 35.4,
|
"progress": 2.8,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
assert events[-5:] == [
|
assert events[-5:] == [
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 95.5,
|
"progress": 97.2,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 96.9,
|
"progress": 98.4,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 98.2,
|
"progress": 99.4,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -358,8 +358,6 @@ async def test_api_progress_updates_supervisor_update(
|
|||||||
and evt.args[0]["data"]["event"] == WSEvent.JOB
|
and evt.args[0]["data"]["event"] == WSEvent.JOB
|
||||||
and evt.args[0]["data"]["data"]["name"] == "supervisor_update"
|
and evt.args[0]["data"]["data"]["name"] == "supervisor_update"
|
||||||
]
|
]
|
||||||
# Count-based progress: 2 layers need pulling (each worth 50%)
|
|
||||||
# Layers that already exist are excluded from progress calculation
|
|
||||||
assert events[:4] == [
|
assert events[:4] == [
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
@@ -368,34 +366,34 @@ async def test_api_progress_updates_supervisor_update(
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 9.2,
|
"progress": 0.1,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 25.6,
|
"progress": 1.2,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 35.4,
|
"progress": 2.8,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
assert events[-5:] == [
|
assert events[-5:] == [
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 95.5,
|
"progress": 97.2,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 96.9,
|
"progress": 98.4,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 98.2,
|
"progress": 99.4,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
"""Mock of OS Agent System dbus service."""
|
"""Mock of OS Agent System dbus service."""
|
||||||
|
|
||||||
from dbus_fast import DBusError, ErrorType
|
from dbus_fast import DBusError
|
||||||
|
|
||||||
from .base import DBusServiceMock, dbus_method
|
from .base import DBusServiceMock, dbus_method
|
||||||
|
|
||||||
@@ -21,7 +21,6 @@ class System(DBusServiceMock):
|
|||||||
object_path = "/io/hass/os/System"
|
object_path = "/io/hass/os/System"
|
||||||
interface = "io.hass.os.System"
|
interface = "io.hass.os.System"
|
||||||
response_schedule_wipe_device: bool | DBusError = True
|
response_schedule_wipe_device: bool | DBusError = True
|
||||||
response_migrate_docker_storage_driver: None | DBusError = None
|
|
||||||
|
|
||||||
@dbus_method()
|
@dbus_method()
|
||||||
def ScheduleWipeDevice(self) -> "b":
|
def ScheduleWipeDevice(self) -> "b":
|
||||||
@@ -29,14 +28,3 @@ class System(DBusServiceMock):
|
|||||||
if isinstance(self.response_schedule_wipe_device, DBusError):
|
if isinstance(self.response_schedule_wipe_device, DBusError):
|
||||||
raise self.response_schedule_wipe_device # pylint: disable=raising-bad-type
|
raise self.response_schedule_wipe_device # pylint: disable=raising-bad-type
|
||||||
return self.response_schedule_wipe_device
|
return self.response_schedule_wipe_device
|
||||||
|
|
||||||
@dbus_method()
|
|
||||||
def MigrateDockerStorageDriver(self, backend: "s") -> None:
|
|
||||||
"""Migrate Docker storage driver."""
|
|
||||||
if isinstance(self.response_migrate_docker_storage_driver, DBusError):
|
|
||||||
raise self.response_migrate_docker_storage_driver # pylint: disable=raising-bad-type
|
|
||||||
if backend not in ("overlayfs", "overlay2"):
|
|
||||||
raise DBusError(
|
|
||||||
ErrorType.FAILED,
|
|
||||||
f"unsupported driver: {backend} (only 'overlayfs' and 'overlay2' are supported)",
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -26,10 +26,7 @@ from supervisor.exceptions import (
|
|||||||
DockerNotFound,
|
DockerNotFound,
|
||||||
DockerRequestError,
|
DockerRequestError,
|
||||||
)
|
)
|
||||||
from supervisor.homeassistant.const import WSEvent, WSType
|
from supervisor.jobs import JobSchedulerOptions, SupervisorJob
|
||||||
from supervisor.jobs import ChildJobSyncFilter, JobSchedulerOptions, SupervisorJob
|
|
||||||
from supervisor.jobs.decorator import Job
|
|
||||||
from supervisor.supervisor import Supervisor
|
|
||||||
|
|
||||||
from tests.common import AsyncIterator, load_json_fixture
|
from tests.common import AsyncIterator, load_json_fixture
|
||||||
|
|
||||||
@@ -317,7 +314,7 @@ async def test_install_fires_progress_events(
|
|||||||
},
|
},
|
||||||
{"status": "Already exists", "progressDetail": {}, "id": "6e771e15690e"},
|
{"status": "Already exists", "progressDetail": {}, "id": "6e771e15690e"},
|
||||||
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1578b14a573c"},
|
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1578b14a573c"},
|
||||||
{"status": "Waiting", "progressDetail": {}, "id": "1578b14a573c"},
|
{"status": "Waiting", "progressDetail": {}, "id": "2488d0e401e1"},
|
||||||
{
|
{
|
||||||
"status": "Downloading",
|
"status": "Downloading",
|
||||||
"progressDetail": {"current": 1378, "total": 1486},
|
"progressDetail": {"current": 1378, "total": 1486},
|
||||||
@@ -387,7 +384,7 @@ async def test_install_fires_progress_events(
|
|||||||
job_id=ANY,
|
job_id=ANY,
|
||||||
status="Waiting",
|
status="Waiting",
|
||||||
progress_detail=PullProgressDetail(),
|
progress_detail=PullProgressDetail(),
|
||||||
id="1578b14a573c",
|
id="2488d0e401e1",
|
||||||
),
|
),
|
||||||
PullLogEntry(
|
PullLogEntry(
|
||||||
job_id=ANY,
|
job_id=ANY,
|
||||||
@@ -541,7 +538,6 @@ async def test_install_raises_on_pull_error(
|
|||||||
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
|
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
|
||||||
"id": "2025.7.2",
|
"id": "2025.7.2",
|
||||||
},
|
},
|
||||||
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1578b14a573c"},
|
|
||||||
{
|
{
|
||||||
"status": "Downloading",
|
"status": "Downloading",
|
||||||
"progressDetail": {"current": 1378, "total": 1486},
|
"progressDetail": {"current": 1378, "total": 1486},
|
||||||
@@ -596,39 +592,16 @@ async def test_install_progress_handles_download_restart(
|
|||||||
capture_exception.assert_not_called()
|
capture_exception.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"extract_log",
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": {"current": 96, "total": 96},
|
|
||||||
"progress": "[==================================================>] 96B/96B",
|
|
||||||
"id": "02a6e69d8d00",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": {"current": 1, "units": "s"},
|
|
||||||
"progress": "1 s",
|
|
||||||
"id": "02a6e69d8d00",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
ids=["normal_extract_log", "containerd_snapshot_extract_log"],
|
|
||||||
)
|
|
||||||
async def test_install_progress_handles_layers_skipping_download(
|
async def test_install_progress_handles_layers_skipping_download(
|
||||||
coresys: CoreSys,
|
coresys: CoreSys,
|
||||||
test_docker_interface: DockerInterface,
|
test_docker_interface: DockerInterface,
|
||||||
capture_exception: Mock,
|
capture_exception: Mock,
|
||||||
extract_log: dict[str, Any],
|
|
||||||
):
|
):
|
||||||
"""Test install handles small layers that skip downloading phase and go directly to download complete.
|
"""Test install handles small layers that skip downloading phase and go directly to download complete.
|
||||||
|
|
||||||
Reproduces the real-world scenario from Supervisor issue #6286:
|
Reproduces the real-world scenario from Supervisor issue #6286:
|
||||||
- Small layer (02a6e69d8d00) completes Download complete at 10:14:08 without ever Downloading
|
- Small layer (02a6e69d8d00) completes Download complete at 10:14:08 without ever Downloading
|
||||||
- Normal layer (3f4a84073184) starts Downloading at 10:14:09 with progress updates
|
- Normal layer (3f4a84073184) starts Downloading at 10:14:09 with progress updates
|
||||||
|
|
||||||
Under containerd snapshotter this presumably can still occur and Supervisor will have even less info
|
|
||||||
since extract logs don't have a total. Supervisor should generally just ignore these and set progress
|
|
||||||
from the larger images that take all the time.
|
|
||||||
"""
|
"""
|
||||||
coresys.core.set_state(CoreState.RUNNING)
|
coresys.core.set_state(CoreState.RUNNING)
|
||||||
|
|
||||||
@@ -672,7 +645,12 @@ async def test_install_progress_handles_layers_skipping_download(
|
|||||||
},
|
},
|
||||||
{"status": "Pull complete", "progressDetail": {}, "id": "3f4a84073184"},
|
{"status": "Pull complete", "progressDetail": {}, "id": "3f4a84073184"},
|
||||||
# Small layer finally extracts (10:14:58 in logs)
|
# Small layer finally extracts (10:14:58 in logs)
|
||||||
extract_log,
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": {"current": 96, "total": 96},
|
||||||
|
"progress": "[==================================================>] 96B/96B",
|
||||||
|
"id": "02a6e69d8d00",
|
||||||
|
},
|
||||||
{"status": "Pull complete", "progressDetail": {}, "id": "02a6e69d8d00"},
|
{"status": "Pull complete", "progressDetail": {}, "id": "02a6e69d8d00"},
|
||||||
{"status": "Digest: sha256:test"},
|
{"status": "Digest: sha256:test"},
|
||||||
{"status": "Status: Downloaded newer image for test/image:latest"},
|
{"status": "Status: Downloaded newer image for test/image:latest"},
|
||||||
@@ -709,18 +687,11 @@ async def test_install_progress_handles_layers_skipping_download(
|
|||||||
await install_task
|
await install_task
|
||||||
await event.wait()
|
await event.wait()
|
||||||
|
|
||||||
# With the new progress calculation approach:
|
# First update from layer download should have rather low progress ((260937/25445459) / 2 ~ 0.5%)
|
||||||
# - Progress is weighted by layer size
|
assert install_job_snapshots[0]["progress"] < 1
|
||||||
# - Small layers that skip downloading get minimal size (1 byte)
|
|
||||||
# - Progress should increase monotonically
|
|
||||||
assert len(install_job_snapshots) > 0
|
|
||||||
|
|
||||||
# Verify progress is monotonically increasing (or stable)
|
# Total 8 events should lead to a progress update on the install job
|
||||||
for i in range(1, len(install_job_snapshots)):
|
assert len(install_job_snapshots) == 8
|
||||||
assert (
|
|
||||||
install_job_snapshots[i]["progress"]
|
|
||||||
>= install_job_snapshots[i - 1]["progress"]
|
|
||||||
)
|
|
||||||
|
|
||||||
# Job should complete successfully
|
# Job should complete successfully
|
||||||
assert job.done is True
|
assert job.done is True
|
||||||
@@ -787,88 +758,3 @@ async def test_missing_total_handled_gracefully(
|
|||||||
await event.wait()
|
await event.wait()
|
||||||
|
|
||||||
capture_exception.assert_not_called()
|
capture_exception.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
async def test_install_progress_containerd_snapshot(
|
|
||||||
coresys: CoreSys, ha_ws_client: AsyncMock
|
|
||||||
):
|
|
||||||
"""Test install handles docker progress events using containerd snapshotter."""
|
|
||||||
coresys.core.set_state(CoreState.RUNNING)
|
|
||||||
|
|
||||||
class TestDockerInterface(DockerInterface):
|
|
||||||
"""Test interface for events."""
|
|
||||||
|
|
||||||
@property
|
|
||||||
def name(self) -> str:
|
|
||||||
"""Name of test interface."""
|
|
||||||
return "test_interface"
|
|
||||||
|
|
||||||
@Job(
|
|
||||||
name="mock_docker_interface_install",
|
|
||||||
child_job_syncs=[
|
|
||||||
ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
|
|
||||||
],
|
|
||||||
)
|
|
||||||
async def mock_install(self) -> None:
|
|
||||||
"""Mock install."""
|
|
||||||
await super().install(
|
|
||||||
AwesomeVersion("1.2.3"), image="test", arch=CpuArch.I386
|
|
||||||
)
|
|
||||||
|
|
||||||
# Fixture emulates log as received when using containerd snapshotter
|
|
||||||
# Should not error but progress gets choppier once extraction starts
|
|
||||||
logs = load_json_fixture("docker_pull_image_log_containerd_snapshot.json")
|
|
||||||
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
|
||||||
test_docker_interface = TestDockerInterface(coresys)
|
|
||||||
|
|
||||||
with patch.object(Supervisor, "arch", PropertyMock(return_value="i386")):
|
|
||||||
await test_docker_interface.mock_install()
|
|
||||||
coresys.docker.images.pull.assert_called_once_with(
|
|
||||||
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
|
||||||
)
|
|
||||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
|
||||||
|
|
||||||
await asyncio.sleep(1)
|
|
||||||
|
|
||||||
def job_event(progress: float, done: bool = False):
|
|
||||||
return {
|
|
||||||
"type": WSType.SUPERVISOR_EVENT,
|
|
||||||
"data": {
|
|
||||||
"event": WSEvent.JOB,
|
|
||||||
"data": {
|
|
||||||
"name": "mock_docker_interface_install",
|
|
||||||
"reference": "test_interface",
|
|
||||||
"uuid": ANY,
|
|
||||||
"progress": progress,
|
|
||||||
"stage": None,
|
|
||||||
"done": done,
|
|
||||||
"parent_id": None,
|
|
||||||
"errors": [],
|
|
||||||
"created": ANY,
|
|
||||||
"extra": None,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
assert [c.args[0] for c in ha_ws_client.async_send_command.call_args_list] == [
|
|
||||||
# Count-based progress: 2 layers, each = 50%. Download = 0-35%, Extract = 35-50%
|
|
||||||
job_event(0),
|
|
||||||
job_event(1.7),
|
|
||||||
job_event(3.4),
|
|
||||||
job_event(8.4),
|
|
||||||
job_event(10.2),
|
|
||||||
job_event(15.2),
|
|
||||||
job_event(18.7),
|
|
||||||
job_event(28.8),
|
|
||||||
job_event(35.7),
|
|
||||||
job_event(42.4),
|
|
||||||
job_event(49.3),
|
|
||||||
job_event(55.8),
|
|
||||||
job_event(62.7),
|
|
||||||
# Downloading phase is considered 70% of layer's progress.
|
|
||||||
# After download complete, extraction takes remaining 30% per layer.
|
|
||||||
job_event(70.0),
|
|
||||||
job_event(85.0),
|
|
||||||
job_event(100),
|
|
||||||
job_event(100, True),
|
|
||||||
]
|
|
||||||
|
|||||||
@@ -1,786 +0,0 @@
|
|||||||
"""Tests for image pull progress tracking."""
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from supervisor.docker.manager import PullLogEntry, PullProgressDetail
|
|
||||||
from supervisor.docker.pull_progress import (
|
|
||||||
DOWNLOAD_WEIGHT,
|
|
||||||
EXTRACT_WEIGHT,
|
|
||||||
ImagePullProgress,
|
|
||||||
LayerProgress,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestLayerProgress:
|
|
||||||
"""Tests for LayerProgress class."""
|
|
||||||
|
|
||||||
def test_already_exists_layer(self):
|
|
||||||
"""Test that already existing layer returns 100%."""
|
|
||||||
layer = LayerProgress(layer_id="abc123", already_exists=True)
|
|
||||||
assert layer.calculate_progress() == 100.0
|
|
||||||
|
|
||||||
def test_extract_complete_layer(self):
|
|
||||||
"""Test that extracted layer returns 100%."""
|
|
||||||
layer = LayerProgress(
|
|
||||||
layer_id="abc123",
|
|
||||||
total_size=1000,
|
|
||||||
download_current=1000,
|
|
||||||
download_complete=True,
|
|
||||||
extract_complete=True,
|
|
||||||
)
|
|
||||||
assert layer.calculate_progress() == 100.0
|
|
||||||
|
|
||||||
def test_download_complete_not_extracted(self):
|
|
||||||
"""Test layer that finished downloading but not extracting."""
|
|
||||||
layer = LayerProgress(
|
|
||||||
layer_id="abc123",
|
|
||||||
total_size=1000,
|
|
||||||
download_current=1000,
|
|
||||||
download_complete=True,
|
|
||||||
extract_complete=False,
|
|
||||||
)
|
|
||||||
assert layer.calculate_progress() == DOWNLOAD_WEIGHT # 70%
|
|
||||||
|
|
||||||
def test_extraction_progress_overlay2(self):
|
|
||||||
"""Test layer with byte-based extraction progress (overlay2)."""
|
|
||||||
layer = LayerProgress(
|
|
||||||
layer_id="abc123",
|
|
||||||
total_size=1000,
|
|
||||||
download_current=1000,
|
|
||||||
extract_current=500, # 50% extracted
|
|
||||||
download_complete=True,
|
|
||||||
extract_complete=False,
|
|
||||||
)
|
|
||||||
# 70% + (50% of 30%) = 70% + 15% = 85%
|
|
||||||
assert layer.calculate_progress() == DOWNLOAD_WEIGHT + (0.5 * EXTRACT_WEIGHT)
|
|
||||||
|
|
||||||
def test_downloading_progress(self):
|
|
||||||
"""Test layer during download phase."""
|
|
||||||
layer = LayerProgress(
|
|
||||||
layer_id="abc123",
|
|
||||||
total_size=1000,
|
|
||||||
download_current=500, # 50% downloaded
|
|
||||||
download_complete=False,
|
|
||||||
)
|
|
||||||
# 50% of 70% = 35%
|
|
||||||
assert layer.calculate_progress() == 35.0
|
|
||||||
|
|
||||||
def test_no_size_info_yet(self):
|
|
||||||
"""Test layer with no size information."""
|
|
||||||
layer = LayerProgress(layer_id="abc123")
|
|
||||||
assert layer.calculate_progress() == 0.0
|
|
||||||
|
|
||||||
|
|
||||||
class TestImagePullProgress:
|
|
||||||
"""Tests for ImagePullProgress class."""
|
|
||||||
|
|
||||||
def test_empty_progress(self):
|
|
||||||
"""Test progress with no layers."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
assert progress.calculate_progress() == 0.0
|
|
||||||
|
|
||||||
def test_all_layers_already_exist(self):
|
|
||||||
"""Test when all layers already exist locally.
|
|
||||||
|
|
||||||
When an image is fully cached, there are no "Downloading" events.
|
|
||||||
Progress stays at 0 until the job completes and sets 100%.
|
|
||||||
"""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
# Simulate "Already exists" events
|
|
||||||
entry1 = PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Already exists",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
entry2 = PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer2",
|
|
||||||
status="Already exists",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
progress.process_event(entry1)
|
|
||||||
progress.process_event(entry2)
|
|
||||||
|
|
||||||
# No downloading events = no progress reported (job completion sets 100%)
|
|
||||||
assert progress.calculate_progress() == 0.0
|
|
||||||
|
|
||||||
def test_single_layer_download(self):
|
|
||||||
"""Test progress tracking for single layer download."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
# Pull fs layer
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Start downloading
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
# 50% of download phase = 35%
|
|
||||||
assert progress.calculate_progress() == pytest.approx(35.0)
|
|
||||||
|
|
||||||
# Download complete
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Download complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
assert progress.calculate_progress() == 70.0
|
|
||||||
|
|
||||||
# Pull complete
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pull complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
assert progress.calculate_progress() == 100.0
|
|
||||||
|
|
||||||
def test_multiple_layers_equal_weight_progress(self):
|
|
||||||
"""Test count-based progress where each layer contributes equally."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
# Two layers: sizes don't matter for weight, each layer = 50%
|
|
||||||
|
|
||||||
# Pulling fs layer for both
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="large",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="small",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Large layer: 50% downloaded = 35% layer progress (50% of 70%)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="large",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Small layer: 100% downloaded, waiting for extraction = 70% layer progress
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="small",
|
|
||||||
status="Download complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="small",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=100, total=100),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Progress calculation (count-based, equal weight per layer):
|
|
||||||
# Large layer: 35% (50% of 70% download weight)
|
|
||||||
# Small layer: 70% (download complete)
|
|
||||||
# Each layer = 50% weight
|
|
||||||
# Total: (35 + 70) / 2 = 52.5%
|
|
||||||
assert progress.calculate_progress() == pytest.approx(52.5)
|
|
||||||
|
|
||||||
def test_download_retry(self):
|
|
||||||
"""Test that download retry resets progress."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Download 50%
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
assert progress.calculate_progress() == pytest.approx(35.0)
|
|
||||||
|
|
||||||
# Retry
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Retrying in 5 seconds",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
assert progress.calculate_progress() == 0.0
|
|
||||||
|
|
||||||
def test_layer_skips_download(self):
|
|
||||||
"""Test small layer that goes straight to Download complete."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="small",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Goes directly to Download complete (skipping Downloading events)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="small",
|
|
||||||
status="Download complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Should still work - sets minimal size
|
|
||||||
layer = progress.layers["small"]
|
|
||||||
assert layer.total_size == 1
|
|
||||||
assert layer.download_complete is True
|
|
||||||
|
|
||||||
def test_containerd_extract_progress(self):
|
|
||||||
"""Test extraction progress with containerd snapshotter (time-based)."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Download complete
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=1000, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Download complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Containerd extraction progress (time-based, not byte-based)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Extracting",
|
|
||||||
progress_detail=PullProgressDetail(current=5, units="s"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Should be at 70% (download complete, time-based extraction not tracked)
|
|
||||||
assert progress.calculate_progress() == 70.0
|
|
||||||
|
|
||||||
# Pull complete
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pull complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
assert progress.calculate_progress() == 100.0
|
|
||||||
|
|
||||||
def test_overlay2_extract_progress(self):
|
|
||||||
"""Test extraction progress with overlay2 (byte-based)."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Download complete
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=1000, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Download complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# At download complete, progress should be 70%
|
|
||||||
assert progress.calculate_progress() == 70.0
|
|
||||||
|
|
||||||
# Overlay2 extraction progress (byte-based, 50% extracted)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Extracting",
|
|
||||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Should be at 70% + (50% of 30%) = 85%
|
|
||||||
assert progress.calculate_progress() == pytest.approx(85.0)
|
|
||||||
|
|
||||||
# Extraction continues to 80%
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Extracting",
|
|
||||||
progress_detail=PullProgressDetail(current=800, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Should be at 70% + (80% of 30%) = 94%
|
|
||||||
assert progress.calculate_progress() == pytest.approx(94.0)
|
|
||||||
|
|
||||||
# Pull complete
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pull complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
assert progress.calculate_progress() == 100.0
|
|
||||||
|
|
||||||
def test_get_stage(self):
|
|
||||||
"""Test stage detection."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
assert progress.get_stage() is None
|
|
||||||
|
|
||||||
# Add a layer that needs downloading
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
assert progress.get_stage() == "Downloading"
|
|
||||||
|
|
||||||
# Download complete
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Download complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
assert progress.get_stage() == "Extracting"
|
|
||||||
|
|
||||||
# Pull complete
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pull complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
assert progress.get_stage() == "Pull complete"
|
|
||||||
|
|
||||||
def test_should_update_job(self):
|
|
||||||
"""Test update threshold logic."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
# Initial state - no updates
|
|
||||||
should_update, _ = progress.should_update_job()
|
|
||||||
assert not should_update
|
|
||||||
|
|
||||||
# Add a layer and start downloading
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Small progress - 1%
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=20, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
# 2% of download = 1.4% total
|
|
||||||
should_update, current = progress.should_update_job()
|
|
||||||
assert should_update
|
|
||||||
assert current == pytest.approx(1.4)
|
|
||||||
|
|
||||||
# Tiny increment - shouldn't trigger update
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=25, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
should_update, _ = progress.should_update_job()
|
|
||||||
assert not should_update
|
|
||||||
|
|
||||||
# Larger increment - should trigger
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=100, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
should_update, _ = progress.should_update_job()
|
|
||||||
assert should_update
|
|
||||||
|
|
||||||
def test_verifying_checksum(self):
|
|
||||||
"""Test that Verifying Checksum marks download as nearly complete."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=800, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Verifying Checksum",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
layer = progress.layers["layer1"]
|
|
||||||
assert layer.download_current == 1000 # Should be set to total
|
|
||||||
|
|
||||||
def test_events_without_status_ignored(self):
|
|
||||||
"""Test that events without status are ignored."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
# Event without status (just id field)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="abc123",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Event without id
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
status="Digest: sha256:abc123",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# They shouldn't create layers or cause errors
|
|
||||||
assert len(progress.layers) == 0
|
|
||||||
|
|
||||||
def test_mixed_already_exists_and_pull(self):
|
|
||||||
"""Test combination of cached and pulled layers."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
# Layer 1 already exists
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="cached",
|
|
||||||
status="Already exists",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Layer 2 needs to be pulled
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="pulled",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="pulled",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Only 1 layer needs pulling (cached layer excluded)
|
|
||||||
# pulled: 35% (50% of 70% download weight)
|
|
||||||
assert progress.calculate_progress() == pytest.approx(35.0)
|
|
||||||
|
|
||||||
# Complete the pulled layer
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="pulled",
|
|
||||||
status="Download complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="pulled",
|
|
||||||
status="Pull complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
assert progress.calculate_progress() == 100.0
|
|
||||||
|
|
||||||
def test_pending_layers_prevent_premature_100(self):
|
|
||||||
"""Test that layers without size info scale down progress."""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
# First batch of layers - they complete
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer2",
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Layer1 downloads and completes
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=1000, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer1",
|
|
||||||
status="Pull complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Layer2 is still pending (no size info yet) - simulating Docker rate limiting
|
|
||||||
# Progress should NOT be 100% because layer2 hasn't started
|
|
||||||
|
|
||||||
# Layer1 is 100% complete, layer2 is 0%
|
|
||||||
# With scaling: 1 known layer at 100%, 1 pending layer
|
|
||||||
# Scale factor = 1/(1+1) = 0.5, so progress = 100 * 0.5 = 50%
|
|
||||||
assert progress.calculate_progress() == pytest.approx(50.0)
|
|
||||||
|
|
||||||
# Now layer2 starts downloading
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer2",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Now both layers have size info, no scaling needed
|
|
||||||
# Layer1: 100%, Layer2: 35% (50% of 70%)
|
|
||||||
# Weighted by equal size: (100 + 35) / 2 = 67.5%
|
|
||||||
assert progress.calculate_progress() == pytest.approx(67.5)
|
|
||||||
|
|
||||||
# Complete layer2
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="layer2",
|
|
||||||
status="Pull complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
assert progress.calculate_progress() == 100.0
|
|
||||||
|
|
||||||
def test_large_layers_appearing_late_dont_cause_regression(self):
|
|
||||||
"""Test that large layers discovered late don't cause progress to drop.
|
|
||||||
|
|
||||||
This simulates Docker's rate-limiting behavior where small layers complete
|
|
||||||
first, then large layers start downloading later.
|
|
||||||
"""
|
|
||||||
progress = ImagePullProgress()
|
|
||||||
|
|
||||||
# All layers announced upfront (Docker does this)
|
|
||||||
for layer_id in ["small1", "small2", "big1", "big2"]:
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id=layer_id,
|
|
||||||
status="Pulling fs layer",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Big layers are "Waiting" (rate limited)
|
|
||||||
for layer_id in ["big1", "big2"]:
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id=layer_id,
|
|
||||||
status="Waiting",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Small layers download quickly (1KB each)
|
|
||||||
for layer_id in ["small1", "small2"]:
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id=layer_id,
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=1000, total=1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id=layer_id,
|
|
||||||
status="Pull complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# At this point, 2 small layers are complete, 2 big layers are unknown size
|
|
||||||
progress_before_big = progress.calculate_progress()
|
|
||||||
|
|
||||||
# Now big layers start downloading - they're 100MB each!
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="big1",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=1000000, total=100000000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
progress_after_big1 = progress.calculate_progress()
|
|
||||||
|
|
||||||
# Progress should NOT drop significantly when big layer appears
|
|
||||||
# The monotonic tracking in should_update_job will help, but the
|
|
||||||
# raw calculation should also not regress too badly
|
|
||||||
assert progress_after_big1 >= progress_before_big * 0.5, (
|
|
||||||
f"Progress dropped too much: {progress_before_big} -> {progress_after_big1}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Second big layer appears
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id="big2",
|
|
||||||
status="Downloading",
|
|
||||||
progress_detail=PullProgressDetail(current=1000000, total=100000000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Should still make forward progress overall
|
|
||||||
# Complete all layers
|
|
||||||
for layer_id in ["big1", "big2"]:
|
|
||||||
progress.process_event(
|
|
||||||
PullLogEntry(
|
|
||||||
job_id="test",
|
|
||||||
id=layer_id,
|
|
||||||
status="Pull complete",
|
|
||||||
progress_detail=PullProgressDetail(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
assert progress.calculate_progress() == 100.0
|
|
||||||
@@ -1,196 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"status": "Pulling from home-assistant/home-assistant",
|
|
||||||
"id": "2025.12.0.dev202511080235"
|
|
||||||
},
|
|
||||||
{ "status": "Pulling fs layer", "progressDetail": {}, "id": "eafecc6b43cc" },
|
|
||||||
{ "status": "Pulling fs layer", "progressDetail": {}, "id": "333270549f95" },
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 1048576, "total": 21863319 },
|
|
||||||
"progress": "[==\u003e ] 1.049MB/21.86MB",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 1048576, "total": 21179924 },
|
|
||||||
"progress": "[==\u003e ] 1.049MB/21.18MB",
|
|
||||||
"id": "333270549f95"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 4194304, "total": 21863319 },
|
|
||||||
"progress": "[=========\u003e ] 4.194MB/21.86MB",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 2097152, "total": 21179924 },
|
|
||||||
"progress": "[====\u003e ] 2.097MB/21.18MB",
|
|
||||||
"id": "333270549f95"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 7340032, "total": 21863319 },
|
|
||||||
"progress": "[================\u003e ] 7.34MB/21.86MB",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 4194304, "total": 21179924 },
|
|
||||||
"progress": "[=========\u003e ] 4.194MB/21.18MB",
|
|
||||||
"id": "333270549f95"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 13631488, "total": 21863319 },
|
|
||||||
"progress": "[===============================\u003e ] 13.63MB/21.86MB",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 8388608, "total": 21179924 },
|
|
||||||
"progress": "[===================\u003e ] 8.389MB/21.18MB",
|
|
||||||
"id": "333270549f95"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 17825792, "total": 21863319 },
|
|
||||||
"progress": "[========================================\u003e ] 17.83MB/21.86MB",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 12582912, "total": 21179924 },
|
|
||||||
"progress": "[=============================\u003e ] 12.58MB/21.18MB",
|
|
||||||
"id": "333270549f95"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 21863319, "total": 21863319 },
|
|
||||||
"progress": "[==================================================\u003e] 21.86MB/21.86MB",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 16777216, "total": 21179924 },
|
|
||||||
"progress": "[=======================================\u003e ] 16.78MB/21.18MB",
|
|
||||||
"id": "333270549f95"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Downloading",
|
|
||||||
"progressDetail": { "current": 21179924, "total": 21179924 },
|
|
||||||
"progress": "[==================================================\u003e] 21.18MB/21.18MB",
|
|
||||||
"id": "333270549f95"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Download complete",
|
|
||||||
"progressDetail": { "hidecounts": true },
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Download complete",
|
|
||||||
"progressDetail": { "hidecounts": true },
|
|
||||||
"id": "333270549f95"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 1, "units": "s" },
|
|
||||||
"progress": "1 s",
|
|
||||||
"id": "333270549f95"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 1, "units": "s" },
|
|
||||||
"progress": "1 s",
|
|
||||||
"id": "333270549f95"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Pull complete",
|
|
||||||
"progressDetail": { "hidecounts": true },
|
|
||||||
"id": "333270549f95"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 1, "units": "s" },
|
|
||||||
"progress": "1 s",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 1, "units": "s" },
|
|
||||||
"progress": "1 s",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 2, "units": "s" },
|
|
||||||
"progress": "2 s",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 2, "units": "s" },
|
|
||||||
"progress": "2 s",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 3, "units": "s" },
|
|
||||||
"progress": "3 s",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 3, "units": "s" },
|
|
||||||
"progress": "3 s",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 4, "units": "s" },
|
|
||||||
"progress": "4 s",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 4, "units": "s" },
|
|
||||||
"progress": "4 s",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 5, "units": "s" },
|
|
||||||
"progress": "5 s",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 5, "units": "s" },
|
|
||||||
"progress": "5 s",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 6, "units": "s" },
|
|
||||||
"progress": "6 s",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": { "current": 6, "units": "s" },
|
|
||||||
"progress": "6 s",
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Pull complete",
|
|
||||||
"progressDetail": { "hidecounts": true },
|
|
||||||
"id": "eafecc6b43cc"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Digest: sha256:bfc9efc13552c0c228f3d9d35987331cce68b43c9bc79c80a57eeadadd44cccf"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/home-assistant:2025.12.0.dev202511080235"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -9,7 +9,7 @@ from awesomeversion import AwesomeVersion
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from supervisor.addons.addon import Addon
|
from supervisor.addons.addon import Addon
|
||||||
from supervisor.arch import CpuArchManager
|
from supervisor.arch import CpuArch
|
||||||
from supervisor.backups.manager import BackupManager
|
from supervisor.backups.manager import BackupManager
|
||||||
from supervisor.coresys import CoreSys
|
from supervisor.coresys import CoreSys
|
||||||
from supervisor.exceptions import AddonNotSupportedError, StoreJobError
|
from supervisor.exceptions import AddonNotSupportedError, StoreJobError
|
||||||
@@ -163,9 +163,7 @@ async def test_update_unavailable_addon(
|
|||||||
with (
|
with (
|
||||||
patch.object(BackupManager, "do_backup_partial") as backup,
|
patch.object(BackupManager, "do_backup_partial") as backup,
|
||||||
patch.object(AddonStore, "data", new=PropertyMock(return_value=addon_config)),
|
patch.object(AddonStore, "data", new=PropertyMock(return_value=addon_config)),
|
||||||
patch.object(
|
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
|
||||||
),
|
|
||||||
patch.object(CoreSys, "machine", new=PropertyMock(return_value="qemux86-64")),
|
patch.object(CoreSys, "machine", new=PropertyMock(return_value="qemux86-64")),
|
||||||
patch.object(
|
patch.object(
|
||||||
HomeAssistant,
|
HomeAssistant,
|
||||||
@@ -221,9 +219,7 @@ async def test_install_unavailable_addon(
|
|||||||
|
|
||||||
with (
|
with (
|
||||||
patch.object(AddonStore, "data", new=PropertyMock(return_value=addon_config)),
|
patch.object(AddonStore, "data", new=PropertyMock(return_value=addon_config)),
|
||||||
patch.object(
|
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
|
||||||
),
|
|
||||||
patch.object(CoreSys, "machine", new=PropertyMock(return_value="qemux86-64")),
|
patch.object(CoreSys, "machine", new=PropertyMock(return_value="qemux86-64")),
|
||||||
patch.object(
|
patch.object(
|
||||||
HomeAssistant,
|
HomeAssistant,
|
||||||
|
|||||||
Reference in New Issue
Block a user