mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-07-20 15:46:29 +00:00
Reduce executor code for docker (#4438)
* Reduce executor code for docker * Fix pylint errors and move import/export image * Fix test and a couple other risky executor calls * Fix dataclass and return * Fix test case and add one for corrupt docker * Add some coverage * Undo changes to docker manager startup
This commit is contained in:
parent
1f940a04fd
commit
1f92ab42ca
@ -434,6 +434,7 @@ class AddonManager(CoreSysAttributes):
|
||||
async def sync_dns(self) -> None:
|
||||
"""Sync add-ons DNS names."""
|
||||
# Update hosts
|
||||
add_host_coros: list[Awaitable[None]] = []
|
||||
for addon in self.installed:
|
||||
try:
|
||||
if not await addon.instance.is_running():
|
||||
@ -448,9 +449,13 @@ class AddonManager(CoreSysAttributes):
|
||||
)
|
||||
capture_exception(err)
|
||||
else:
|
||||
add_host_coros.append(
|
||||
self.sys_plugins.dns.add_host(
|
||||
ipv4=addon.ip_address, names=[addon.hostname], write=False
|
||||
)
|
||||
)
|
||||
|
||||
await asyncio.gather(*add_host_coros)
|
||||
|
||||
# Write hosts files
|
||||
with suppress(CoreDNSError):
|
||||
|
@ -421,7 +421,7 @@ class Backup(CoreSysAttributes):
|
||||
async def store_folders(self, folder_list: list[str]):
|
||||
"""Backup Supervisor data into backup."""
|
||||
|
||||
def _folder_save(name: str):
|
||||
async def _folder_save(name: str):
|
||||
"""Take backup of a folder."""
|
||||
slug_name = name.replace("/", "_")
|
||||
tar_name = Path(
|
||||
@ -434,6 +434,7 @@ class Backup(CoreSysAttributes):
|
||||
_LOGGER.warning("Can't find backup folder %s", name)
|
||||
return
|
||||
|
||||
def _save() -> None:
|
||||
# Take backup
|
||||
_LOGGER.info("Backing up folder %s", name)
|
||||
with SecureTarFile(
|
||||
@ -451,13 +452,15 @@ class Backup(CoreSysAttributes):
|
||||
)
|
||||
|
||||
_LOGGER.info("Backup folder %s done", name)
|
||||
|
||||
await self.sys_run_in_executor(_save)
|
||||
self._data[ATTR_FOLDERS].append(name)
|
||||
|
||||
# Save folder sequential
|
||||
# avoid issue on slow IO
|
||||
for folder in folder_list:
|
||||
try:
|
||||
await self.sys_run_in_executor(_folder_save, folder)
|
||||
await _folder_save(folder)
|
||||
except (tarfile.TarError, OSError) as err:
|
||||
raise BackupError(
|
||||
f"Can't backup folder {folder}: {str(err)}", _LOGGER.error
|
||||
|
@ -130,7 +130,7 @@ class Core(CoreSysAttributes):
|
||||
self._adjust_system_datetime(),
|
||||
# Load mounts
|
||||
self.sys_mounts.load(),
|
||||
# Start docker monitoring
|
||||
# Load Docker manager
|
||||
self.sys_docker.load(),
|
||||
# Load Plugins container
|
||||
self.sys_plugins.load(),
|
||||
|
@ -4,6 +4,7 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
from collections.abc import Callable, Coroutine
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
import logging
|
||||
import os
|
||||
from types import MappingProxyType
|
||||
@ -520,9 +521,12 @@ class CoreSys:
|
||||
return datetime.now(get_time_zone(self.timezone) or UTC)
|
||||
|
||||
def run_in_executor(
|
||||
self, funct: Callable[..., T], *args: Any
|
||||
self, funct: Callable[..., T], *args: tuple[Any], **kwargs: dict[str, Any]
|
||||
) -> Coroutine[Any, Any, T]:
|
||||
"""Add an job to the executor pool."""
|
||||
if kwargs:
|
||||
funct = partial(funct, **kwargs)
|
||||
|
||||
return self.loop.run_in_executor(None, funct, *args)
|
||||
|
||||
def create_task(self, coroutine: Coroutine) -> asyncio.Task:
|
||||
@ -700,10 +704,10 @@ class CoreSysAttributes:
|
||||
return self.coresys.now()
|
||||
|
||||
def sys_run_in_executor(
|
||||
self, funct: Callable[..., T], *args: Any
|
||||
self, funct: Callable[..., T], *args: tuple[Any], **kwargs: dict[str, Any]
|
||||
) -> Coroutine[Any, Any, T]:
|
||||
"""Add an job to the executor pool."""
|
||||
return self.coresys.run_in_executor(funct, *args)
|
||||
return self.coresys.run_in_executor(funct, *args, **kwargs)
|
||||
|
||||
def sys_create_task(self, coroutine: Coroutine) -> asyncio.Task:
|
||||
"""Create an async task."""
|
||||
|
@ -1,7 +1,6 @@
|
||||
"""Init file for Supervisor add-on Docker object."""
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
from ipaddress import IPv4Address, ip_address
|
||||
@ -494,12 +493,9 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
return mounts
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
async def _run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Security check
|
||||
@ -507,14 +503,15 @@ class DockerAddon(DockerInterface):
|
||||
_LOGGER.warning("%s running with disabled protected mode!", self.addon.name)
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
await self._stop()
|
||||
|
||||
# Don't set a hostname if no separate UTS namespace is used
|
||||
hostname = None if self.uts_mode else self.addon.hostname
|
||||
|
||||
# Create & Run container
|
||||
try:
|
||||
docker_container = self.sys_docker.run(
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
tag=str(self.addon.version),
|
||||
name=self.name,
|
||||
@ -553,7 +550,7 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
# Write data to DNS server
|
||||
try:
|
||||
self.sys_plugins.dns.add_host(
|
||||
await self.sys_plugins.dns.add_host(
|
||||
ipv4=self.ip_address, names=[self.addon.hostname]
|
||||
)
|
||||
except CoreDNSError as err:
|
||||
@ -566,13 +563,10 @@ class DockerAddon(DockerInterface):
|
||||
BusEvent.HARDWARE_NEW_DEVICE, self._hardware_events
|
||||
)
|
||||
|
||||
def _update(
|
||||
async def _update(
|
||||
self, version: AwesomeVersion, image: str | None = None, latest: bool = False
|
||||
) -> None:
|
||||
"""Update a docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
"""Update a docker image."""
|
||||
image = image or self.image
|
||||
|
||||
_LOGGER.info(
|
||||
@ -580,15 +574,15 @@ class DockerAddon(DockerInterface):
|
||||
)
|
||||
|
||||
# Update docker image
|
||||
self._install(
|
||||
await self._install(
|
||||
version, image=image, latest=latest, need_build=self.addon.latest_need_build
|
||||
)
|
||||
|
||||
# Stop container & cleanup
|
||||
with suppress(DockerError):
|
||||
self._stop()
|
||||
await self._stop()
|
||||
|
||||
def _install(
|
||||
async def _install(
|
||||
self,
|
||||
version: AwesomeVersion,
|
||||
image: str | None = None,
|
||||
@ -597,20 +591,14 @@ class DockerAddon(DockerInterface):
|
||||
*,
|
||||
need_build: bool | None = None,
|
||||
) -> None:
|
||||
"""Pull Docker image or build it.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
"""Pull Docker image or build it."""
|
||||
if need_build is None and self.addon.need_build or need_build:
|
||||
self._build(version)
|
||||
await self._build(version)
|
||||
else:
|
||||
super()._install(version, image, latest, arch)
|
||||
await super()._install(version, image, latest, arch)
|
||||
|
||||
def _build(self, version: AwesomeVersion) -> None:
|
||||
"""Build a Docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
async def _build(self, version: AwesomeVersion) -> None:
|
||||
"""Build a Docker container."""
|
||||
build_env = AddonBuild(self.coresys, self.addon)
|
||||
if not build_env.is_valid:
|
||||
_LOGGER.error("Invalid build environment, can't build this add-on!")
|
||||
@ -618,8 +606,10 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
_LOGGER.info("Starting build for %s:%s", self.image, version)
|
||||
try:
|
||||
image, log = self.sys_docker.images.build(
|
||||
use_config_proxy=False, **build_env.get_docker_args(version)
|
||||
image, log = await self.sys_run_in_executor(
|
||||
self.sys_docker.images.build,
|
||||
use_config_proxy=False,
|
||||
**build_env.get_docker_args(version),
|
||||
)
|
||||
|
||||
_LOGGER.debug("Build %s:%s done: %s", self.image, version, log)
|
||||
@ -645,74 +635,36 @@ class DockerAddon(DockerInterface):
|
||||
@process_lock
|
||||
def export_image(self, tar_file: Path) -> Awaitable[None]:
|
||||
"""Export current images into a tar file."""
|
||||
return self.sys_run_in_executor(self._export_image, tar_file)
|
||||
|
||||
def _export_image(self, tar_file: Path) -> None:
|
||||
"""Export current images into a tar file.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
image = self.sys_docker.api.get_image(f"{self.image}:{self.version}")
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't fetch image %s: %s", self.image, err)
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Export image %s to %s", self.image, tar_file)
|
||||
try:
|
||||
with tar_file.open("wb") as write_tar:
|
||||
for chunk in image:
|
||||
write_tar.write(chunk)
|
||||
except (OSError, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't write tar file %s: %s", tar_file, err)
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Export image %s done", self.image)
|
||||
return self.sys_run_in_executor(
|
||||
self.sys_docker.export_image, self.image, self.version, tar_file
|
||||
)
|
||||
|
||||
@process_lock
|
||||
def import_image(self, tar_file: Path) -> Awaitable[None]:
|
||||
async def import_image(self, tar_file: Path) -> None:
|
||||
"""Import a tar file as image."""
|
||||
return self.sys_run_in_executor(self._import_image, tar_file)
|
||||
|
||||
def _import_image(self, tar_file: Path) -> None:
|
||||
"""Import a tar file as image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
with tar_file.open("rb") as read_tar:
|
||||
docker_image_list = self.sys_docker.images.load(read_tar)
|
||||
|
||||
if len(docker_image_list) != 1:
|
||||
_LOGGER.warning(
|
||||
"Unexpected image count %d while importing image from tar",
|
||||
len(docker_image_list),
|
||||
docker_image = await self.sys_run_in_executor(
|
||||
self.sys_docker.import_image, tar_file
|
||||
)
|
||||
return
|
||||
docker_image = docker_image_list[0]
|
||||
except (docker.errors.DockerException, OSError) as err:
|
||||
_LOGGER.error("Can't import image %s: %s", self.image, err)
|
||||
raise DockerError() from err
|
||||
|
||||
if docker_image:
|
||||
self._meta = docker_image.attrs
|
||||
_LOGGER.info("Importing image %s and version %s", tar_file, self.version)
|
||||
|
||||
with suppress(DockerError):
|
||||
self._cleanup()
|
||||
await self._cleanup()
|
||||
|
||||
@process_lock
|
||||
def write_stdin(self, data: bytes) -> Awaitable[None]:
|
||||
async def write_stdin(self, data: bytes) -> None:
|
||||
"""Write to add-on stdin."""
|
||||
return self.sys_run_in_executor(self._write_stdin, data)
|
||||
if not await self.is_running():
|
||||
raise DockerError()
|
||||
|
||||
await self.sys_run_in_executor(self._write_stdin, data)
|
||||
|
||||
def _write_stdin(self, data: bytes) -> None:
|
||||
"""Write to add-on stdin.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if not self._is_running():
|
||||
raise DockerError()
|
||||
|
||||
try:
|
||||
# Load needed docker objects
|
||||
container = self.sys_docker.containers.get(self.name)
|
||||
@ -730,15 +682,12 @@ class DockerAddon(DockerInterface):
|
||||
_LOGGER.error("Can't write to %s stdin: %s", self.name, err)
|
||||
raise DockerError() from err
|
||||
|
||||
def _stop(self, remove_container=True) -> None:
|
||||
"""Stop/remove Docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
async def _stop(self, remove_container: bool = True) -> None:
|
||||
"""Stop/remove Docker container."""
|
||||
# DNS
|
||||
if self.ip_address != NO_ADDDRESS:
|
||||
try:
|
||||
self.sys_plugins.dns.delete_host(self.addon.hostname)
|
||||
await self.sys_plugins.dns.delete_host(self.addon.hostname)
|
||||
except CoreDNSError as err:
|
||||
_LOGGER.warning("Can't update DNS for %s", self.name)
|
||||
capture_exception(err)
|
||||
@ -748,9 +697,9 @@ class DockerAddon(DockerInterface):
|
||||
self.sys_bus.remove_listener(self._hw_listener)
|
||||
self._hw_listener = None
|
||||
|
||||
super()._stop(remove_container)
|
||||
await super()._stop(remove_container)
|
||||
|
||||
def _validate_trust(
|
||||
async def _validate_trust(
|
||||
self, image_id: str, image: str, version: AwesomeVersion
|
||||
) -> None:
|
||||
"""Validate trust of content."""
|
||||
@ -758,11 +707,7 @@ class DockerAddon(DockerInterface):
|
||||
return
|
||||
|
||||
checksum = image_id.partition(":")[2]
|
||||
job = asyncio.run_coroutine_threadsafe(
|
||||
self.sys_security.verify_content(self.addon.codenotary, checksum),
|
||||
self.sys_loop,
|
||||
)
|
||||
job.result()
|
||||
return await self.sys_security.verify_content(self.addon.codenotary, checksum)
|
||||
|
||||
@Job(conditions=[JobCondition.OS_AGENT], limit=JobExecutionLimit.SINGLE_WAIT)
|
||||
async def _hardware_events(self, device: Device) -> None:
|
||||
|
@ -82,19 +82,17 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
return None
|
||||
return DOCKER_CPU_RUNTIME_ALLOCATION
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
async def _run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
await self._stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
tag=str(self.sys_plugins.audio.version),
|
||||
init=False,
|
||||
|
@ -23,19 +23,17 @@ class DockerCli(DockerInterface, CoreSysAttributes):
|
||||
"""Return name of Docker container."""
|
||||
return CLI_DOCKER_NAME
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
async def _run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
await self._stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
entrypoint=["/init"],
|
||||
tag=str(self.sys_plugins.cli.version),
|
||||
|
@ -25,19 +25,17 @@ class DockerDNS(DockerInterface, CoreSysAttributes):
|
||||
"""Return name of Docker container."""
|
||||
return DNS_DOCKER_NAME
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
async def _run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
await self._stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
tag=str(self.sys_plugins.dns.version),
|
||||
init=False,
|
||||
|
@ -4,14 +4,12 @@ from ipaddress import IPv4Address
|
||||
import logging
|
||||
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
import docker
|
||||
from docker.types import Mount
|
||||
import requests
|
||||
|
||||
from ..const import LABEL_MACHINE, MACHINE_ID
|
||||
from ..exceptions import DockerError
|
||||
from ..hardware.const import PolicyGroup
|
||||
from ..homeassistant.const import LANDINGPAGE
|
||||
from ..utils import process_lock
|
||||
from .const import (
|
||||
ENV_TIME,
|
||||
ENV_TOKEN,
|
||||
@ -133,19 +131,17 @@ class DockerHomeAssistant(DockerInterface):
|
||||
|
||||
return mounts
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
async def _run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
await self._stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
tag=(self.sys_homeassistant.version),
|
||||
name=self.name,
|
||||
@ -177,12 +173,11 @@ class DockerHomeAssistant(DockerInterface):
|
||||
"Starting Home Assistant %s with version %s", self.image, self.version
|
||||
)
|
||||
|
||||
def _execute_command(self, command: str) -> CommandReturn:
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
return self.sys_docker.run_command(
|
||||
@process_lock
|
||||
async def execute_command(self, command: str) -> CommandReturn:
|
||||
"""Create a temporary container and run command."""
|
||||
return await self.sys_run_in_executor(
|
||||
self.sys_docker.run_command,
|
||||
self.image,
|
||||
version=self.sys_homeassistant.version,
|
||||
command=command,
|
||||
@ -217,34 +212,14 @@ class DockerHomeAssistant(DockerInterface):
|
||||
|
||||
def is_initialize(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker container exists."""
|
||||
return self.sys_run_in_executor(self._is_initialize)
|
||||
|
||||
def _is_initialize(self) -> bool:
|
||||
"""Return True if docker container exists.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_image = self.sys_docker.images.get(
|
||||
f"{self.image}:{self.sys_homeassistant.version}"
|
||||
return self.sys_run_in_executor(
|
||||
self.sys_docker.container_is_initialized,
|
||||
self.name,
|
||||
self.image,
|
||||
self.sys_homeassistant.version,
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return False
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
return DockerError()
|
||||
|
||||
# we run on an old image, stop and start it
|
||||
if docker_container.image.id != docker_image.id:
|
||||
return False
|
||||
|
||||
# Check of correct state
|
||||
if docker_container.status not in ("exited", "running", "created"):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _validate_trust(
|
||||
async def _validate_trust(
|
||||
self, image_id: str, image: str, version: AwesomeVersion
|
||||
) -> None:
|
||||
"""Validate trust of content."""
|
||||
@ -254,4 +229,4 @@ class DockerHomeAssistant(DockerInterface):
|
||||
except AwesomeVersionCompareException:
|
||||
return
|
||||
|
||||
super()._validate_trust(image_id, image, version)
|
||||
await super()._validate_trust(image_id, image, version)
|
||||
|
@ -193,7 +193,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
return credentials
|
||||
|
||||
def _docker_login(self, image: str) -> None:
|
||||
async def _docker_login(self, image: str) -> None:
|
||||
"""Try to log in to the registry if there are credentials available."""
|
||||
if not self.sys_docker.config.registries:
|
||||
return
|
||||
@ -202,7 +202,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
if not credentials:
|
||||
return
|
||||
|
||||
self.sys_docker.docker.login(**credentials)
|
||||
await self.sys_run_in_executor(self.sys_docker.docker.login, **credentials)
|
||||
|
||||
@process_lock
|
||||
def install(
|
||||
@ -211,21 +211,18 @@ class DockerInterface(CoreSysAttributes):
|
||||
image: str | None = None,
|
||||
latest: bool = False,
|
||||
arch: CpuArch | None = None,
|
||||
):
|
||||
) -> Awaitable[None]:
|
||||
"""Pull docker image."""
|
||||
return self.sys_run_in_executor(self._install, version, image, latest, arch)
|
||||
return self._install(version, image, latest, arch)
|
||||
|
||||
def _install(
|
||||
async def _install(
|
||||
self,
|
||||
version: AwesomeVersion,
|
||||
image: str | None = None,
|
||||
latest: bool = False,
|
||||
arch: CpuArch | None = None,
|
||||
) -> None:
|
||||
"""Pull Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
"""Pull Docker image."""
|
||||
image = image or self.image
|
||||
arch = arch or self.sys_arch.supervisor
|
||||
|
||||
@ -233,21 +230,24 @@ class DockerInterface(CoreSysAttributes):
|
||||
try:
|
||||
if self.sys_docker.config.registries:
|
||||
# Try login if we have defined credentials
|
||||
self._docker_login(image)
|
||||
await self._docker_login(image)
|
||||
|
||||
# Pull new image
|
||||
docker_image = self.sys_docker.images.pull(
|
||||
docker_image = await self.sys_run_in_executor(
|
||||
self.sys_docker.images.pull,
|
||||
f"{image}:{version!s}",
|
||||
platform=MAP_ARCH[arch],
|
||||
)
|
||||
|
||||
# Validate content
|
||||
try:
|
||||
self._validate_trust(docker_image.id, image, version)
|
||||
await self._validate_trust(docker_image.id, image, version)
|
||||
except CodeNotaryError:
|
||||
with suppress(docker.errors.DockerException):
|
||||
self.sys_docker.images.remove(
|
||||
image=f"{image}:{version!s}", force=True
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.images.remove,
|
||||
image=f"{image}:{version!s}",
|
||||
force=True,
|
||||
)
|
||||
raise
|
||||
|
||||
@ -256,7 +256,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
_LOGGER.info(
|
||||
"Tagging image %s with version %s as latest", image, version
|
||||
)
|
||||
docker_image.tag(image, tag="latest")
|
||||
await self.sys_run_in_executor(docker_image.tag, image, tag="latest")
|
||||
except docker.errors.APIError as err:
|
||||
if err.status_code == 429:
|
||||
self.sys_resolution.create_issue(
|
||||
@ -289,34 +289,21 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
self._meta = docker_image.attrs
|
||||
|
||||
def exists(self) -> Awaitable[bool]:
|
||||
async def exists(self) -> bool:
|
||||
"""Return True if Docker image exists in local repository."""
|
||||
return self.sys_run_in_executor(self._exists)
|
||||
|
||||
def _exists(self) -> bool:
|
||||
"""Return True if Docker image exists in local repository.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
self.sys_docker.images.get(f"{self.image}:{self.version!s}")
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.images.get, f"{self.image}:{self.version!s}"
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker is running.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.sys_run_in_executor(self._is_running)
|
||||
|
||||
def _is_running(self) -> bool:
|
||||
"""Return True if Docker is running.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
async def is_running(self) -> bool:
|
||||
"""Return True if Docker is running."""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return False
|
||||
except docker.errors.DockerException as err:
|
||||
@ -326,20 +313,12 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
return docker_container.status == "running"
|
||||
|
||||
def current_state(self) -> Awaitable[ContainerState]:
|
||||
"""Return current state of container.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.sys_run_in_executor(self._current_state)
|
||||
|
||||
def _current_state(self) -> ContainerState:
|
||||
"""Return current state of container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
async def current_state(self) -> ContainerState:
|
||||
"""Return current state of container."""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return ContainerState.UNKNOWN
|
||||
except docker.errors.DockerException as err:
|
||||
@ -354,17 +333,16 @@ class DockerInterface(CoreSysAttributes):
|
||||
self, version: AwesomeVersion, *, skip_state_event_if_down: bool = False
|
||||
) -> Awaitable[None]:
|
||||
"""Attach to running Docker container."""
|
||||
return self.sys_run_in_executor(self._attach, version, skip_state_event_if_down)
|
||||
return self._attach(version, skip_state_event_if_down)
|
||||
|
||||
def _attach(
|
||||
async def _attach(
|
||||
self, version: AwesomeVersion, skip_state_event_if_down: bool = False
|
||||
) -> None:
|
||||
"""Attach to running docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
"""Attach to running docker container."""
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
self._meta = docker_container.attrs
|
||||
self.sys_docker.monitor.watch_container(docker_container)
|
||||
|
||||
@ -374,8 +352,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
and state in [ContainerState.STOPPED, ContainerState.FAILED]
|
||||
):
|
||||
# Fire event with current state of container
|
||||
self.sys_loop.call_soon_threadsafe(
|
||||
self.sys_bus.fire_event,
|
||||
self.sys_bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
self.name, state, docker_container.id, int(time())
|
||||
@ -396,95 +373,42 @@ class DockerInterface(CoreSysAttributes):
|
||||
@process_lock
|
||||
def run(self) -> Awaitable[None]:
|
||||
"""Run Docker image."""
|
||||
return self.sys_run_in_executor(self._run)
|
||||
return self._run()
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
async def _run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@process_lock
|
||||
def stop(self, remove_container=True) -> Awaitable[None]:
|
||||
def stop(self, remove_container: bool = True) -> Awaitable[None]:
|
||||
"""Stop/remove Docker container."""
|
||||
return self.sys_run_in_executor(self._stop, remove_container)
|
||||
return self._stop(remove_container)
|
||||
|
||||
def _stop(self, remove_container=True) -> None:
|
||||
"""Stop/remove Docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except docker.errors.NotFound:
|
||||
return
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
if docker_container.status == "running":
|
||||
_LOGGER.info("Stopping %s application", self.name)
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
docker_container.stop(timeout=self.timeout)
|
||||
|
||||
if remove_container:
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
_LOGGER.info("Cleaning %s application", self.name)
|
||||
docker_container.remove(force=True)
|
||||
async def _stop(self, remove_container: bool = True) -> None:
|
||||
"""Stop/remove Docker container."""
|
||||
with suppress(DockerNotFound):
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.stop_container,
|
||||
self.name,
|
||||
self.timeout,
|
||||
remove_container,
|
||||
)
|
||||
|
||||
@process_lock
|
||||
def start(self) -> Awaitable[None]:
|
||||
"""Start Docker container."""
|
||||
return self.sys_run_in_executor(self._start)
|
||||
|
||||
def _start(self) -> None:
|
||||
"""Start docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"{self.name} not found for starting up", _LOGGER.error
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Starting %s", self.name)
|
||||
try:
|
||||
docker_container.start()
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(f"Can't start {self.name}: {err}", _LOGGER.error) from err
|
||||
return self.sys_run_in_executor(self.sys_docker.start_container, self.name)
|
||||
|
||||
@process_lock
|
||||
def remove(self) -> Awaitable[None]:
|
||||
async def remove(self) -> None:
|
||||
"""Remove Docker images."""
|
||||
return self.sys_run_in_executor(self._remove)
|
||||
|
||||
def _remove(self) -> None:
|
||||
"""Remove docker images.
|
||||
|
||||
Needs run inside executor.
|
||||
"""
|
||||
# Cleanup container
|
||||
with suppress(DockerError):
|
||||
self._stop()
|
||||
await self._stop()
|
||||
|
||||
_LOGGER.info("Removing image %s with latest and %s", self.image, self.version)
|
||||
|
||||
try:
|
||||
with suppress(docker.errors.ImageNotFound):
|
||||
self.sys_docker.images.remove(image=f"{self.image}:latest", force=True)
|
||||
|
||||
with suppress(docker.errors.ImageNotFound):
|
||||
self.sys_docker.images.remove(
|
||||
image=f"{self.image}:{self.version!s}", force=True
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.remove_image, self.image, self.version
|
||||
)
|
||||
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't remove image {self.image}: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
self._meta = None
|
||||
|
||||
@process_lock
|
||||
@ -492,15 +416,12 @@ class DockerInterface(CoreSysAttributes):
|
||||
self, version: AwesomeVersion, image: str | None = None, latest: bool = False
|
||||
) -> Awaitable[None]:
|
||||
"""Update a Docker image."""
|
||||
return self.sys_run_in_executor(self._update, version, image, latest)
|
||||
return self._update(version, image, latest)
|
||||
|
||||
def _update(
|
||||
async def _update(
|
||||
self, version: AwesomeVersion, image: str | None = None, latest: bool = False
|
||||
) -> None:
|
||||
"""Update a docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
"""Update a docker image."""
|
||||
image = image or self.image
|
||||
|
||||
_LOGGER.info(
|
||||
@ -508,163 +429,60 @@ class DockerInterface(CoreSysAttributes):
|
||||
)
|
||||
|
||||
# Update docker image
|
||||
self._install(version, image=image, latest=latest)
|
||||
await self._install(version, image=image, latest=latest)
|
||||
|
||||
# Stop container & cleanup
|
||||
with suppress(DockerError):
|
||||
self._stop()
|
||||
await self._stop()
|
||||
|
||||
def logs(self) -> Awaitable[bytes]:
|
||||
"""Return Docker logs of container.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.sys_run_in_executor(self._logs)
|
||||
|
||||
def _logs(self) -> bytes:
|
||||
"""Return Docker logs of container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
return b""
|
||||
|
||||
try:
|
||||
return docker_container.logs(tail=100, stdout=True, stderr=True)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.warning("Can't grep logs from %s: %s", self.image, err)
|
||||
async def logs(self) -> bytes:
|
||||
"""Return Docker logs of container."""
|
||||
with suppress(DockerError):
|
||||
return await self.sys_run_in_executor(
|
||||
self.sys_docker.container_logs, self.name
|
||||
)
|
||||
|
||||
return b""
|
||||
|
||||
@process_lock
|
||||
def cleanup(self, old_image: str | None = None) -> Awaitable[None]:
|
||||
"""Check if old version exists and cleanup."""
|
||||
return self.sys_run_in_executor(self._cleanup, old_image)
|
||||
return self._cleanup(old_image)
|
||||
|
||||
def _cleanup(self, old_image: str | None = None) -> None:
|
||||
"""Check if old version exists and cleanup.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
origin = self.sys_docker.images.get(f"{self.image}:{self.version!s}")
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't find {self.image} for cleanup", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
# Cleanup Current
|
||||
try:
|
||||
images_list = self.sys_docker.images.list(name=self.image)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Corrupt docker overlayfs found: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
for image in images_list:
|
||||
if origin.id == image.id:
|
||||
continue
|
||||
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
_LOGGER.info("Cleanup images: %s", image.tags)
|
||||
self.sys_docker.images.remove(image.id, force=True)
|
||||
|
||||
# Cleanup Old
|
||||
if not old_image or self.image == old_image:
|
||||
return
|
||||
|
||||
try:
|
||||
images_list = self.sys_docker.images.list(name=old_image)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Corrupt docker overlayfs found: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
for image in images_list:
|
||||
if origin.id == image.id:
|
||||
continue
|
||||
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
_LOGGER.info("Cleanup images: %s", image.tags)
|
||||
self.sys_docker.images.remove(image.id, force=True)
|
||||
def _cleanup(self, old_image: str | None = None) -> Awaitable[None]:
|
||||
"""Check if old version exists and cleanup."""
|
||||
return self.sys_run_in_executor(
|
||||
self.sys_docker.cleanup_old_images,
|
||||
self.image,
|
||||
self.version,
|
||||
{old_image} if old_image else None,
|
||||
)
|
||||
|
||||
@process_lock
|
||||
def restart(self) -> Awaitable[None]:
|
||||
"""Restart docker container."""
|
||||
return self.sys_loop.run_in_executor(None, self._restart)
|
||||
|
||||
def _restart(self) -> None:
|
||||
"""Restart docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Restarting %s", self.image)
|
||||
try:
|
||||
container.restart(timeout=self.timeout)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't restart {self.image}: {err}", _LOGGER.warning
|
||||
) from err
|
||||
return self.sys_run_in_executor(
|
||||
self.sys_docker.restart_container, self.name, self.timeout
|
||||
)
|
||||
|
||||
@process_lock
|
||||
def execute_command(self, command: str) -> Awaitable[CommandReturn]:
|
||||
async def execute_command(self, command: str) -> CommandReturn:
|
||||
"""Create a temporary container and run command."""
|
||||
return self.sys_run_in_executor(self._execute_command, command)
|
||||
|
||||
def _execute_command(self, command: str) -> CommandReturn:
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def stats(self) -> Awaitable[DockerStats]:
|
||||
async def stats(self) -> DockerStats:
|
||||
"""Read and return stats from container."""
|
||||
return self.sys_run_in_executor(self._stats)
|
||||
|
||||
def _stats(self) -> DockerStats:
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
# container is not running
|
||||
if docker_container.status != "running":
|
||||
raise DockerError(f"Container {self.name} is not running", _LOGGER.error)
|
||||
|
||||
try:
|
||||
stats = docker_container.stats(stream=False)
|
||||
stats = await self.sys_run_in_executor(
|
||||
self.sys_docker.container_stats, self.name
|
||||
)
|
||||
return DockerStats(stats)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't read stats from {self.name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
def is_failed(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker is failing state.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.sys_run_in_executor(self._is_failed)
|
||||
|
||||
def _is_failed(self) -> bool:
|
||||
"""Return True if Docker is failing state.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
async def is_failed(self) -> bool:
|
||||
"""Return True if Docker is failing state."""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return False
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
@ -677,18 +495,13 @@ class DockerInterface(CoreSysAttributes):
|
||||
# Check return value
|
||||
return int(docker_container.attrs["State"]["ExitCode"]) != 0
|
||||
|
||||
def get_latest_version(self) -> Awaitable[AwesomeVersion]:
|
||||
async def get_latest_version(self) -> AwesomeVersion:
|
||||
"""Return latest version of local image."""
|
||||
return self.sys_run_in_executor(self._get_latest_version)
|
||||
|
||||
def _get_latest_version(self) -> AwesomeVersion:
|
||||
"""Return latest version of local image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
available_version: list[AwesomeVersion] = []
|
||||
try:
|
||||
for image in self.sys_docker.images.list(self.image):
|
||||
for image in await self.sys_run_in_executor(
|
||||
self.sys_docker.images.list, self.image
|
||||
):
|
||||
for tag in image.tags:
|
||||
version = AwesomeVersion(tag.partition(":")[2])
|
||||
if version.strategy == AwesomeVersionStrategy.UNKNOWN:
|
||||
@ -716,48 +529,25 @@ class DockerInterface(CoreSysAttributes):
|
||||
@process_lock
|
||||
def run_inside(self, command: str) -> Awaitable[CommandReturn]:
|
||||
"""Execute a command inside Docker container."""
|
||||
return self.sys_run_in_executor(self._run_inside, command)
|
||||
return self.sys_run_in_executor(
|
||||
self.sys_docker.container_run_inside, self.name, command
|
||||
)
|
||||
|
||||
def _run_inside(self, command: str) -> CommandReturn:
|
||||
"""Execute a command inside Docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except docker.errors.NotFound:
|
||||
raise DockerNotFound() from None
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
# Execute
|
||||
try:
|
||||
code, output = docker_container.exec_run(command)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
return CommandReturn(code, output)
|
||||
|
||||
def _validate_trust(
|
||||
async def _validate_trust(
|
||||
self, image_id: str, image: str, version: AwesomeVersion
|
||||
) -> None:
|
||||
"""Validate trust of content."""
|
||||
checksum = image_id.partition(":")[2]
|
||||
job = asyncio.run_coroutine_threadsafe(
|
||||
self.sys_security.verify_own_content(checksum), self.sys_loop
|
||||
)
|
||||
job.result()
|
||||
return await self.sys_security.verify_own_content(checksum)
|
||||
|
||||
@process_lock
|
||||
def check_trust(self) -> Awaitable[None]:
|
||||
async def check_trust(self) -> None:
|
||||
"""Check trust of exists Docker image."""
|
||||
return self.sys_run_in_executor(self._check_trust)
|
||||
|
||||
def _check_trust(self) -> None:
|
||||
"""Check trust of current image."""
|
||||
try:
|
||||
image = self.sys_docker.images.get(f"{self.image}:{self.version!s}")
|
||||
image = await self.sys_run_in_executor(
|
||||
self.sys_docker.images.get, f"{self.image}:{self.version!s}"
|
||||
)
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
return
|
||||
|
||||
self._validate_trust(image.id, self.image, self.version)
|
||||
await self._validate_trust(image.id, self.image, self.version)
|
||||
|
@ -11,8 +11,9 @@ from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
from docker import errors as docker_errors
|
||||
from docker.api.client import APIClient
|
||||
from docker.client import DockerClient
|
||||
from docker.errors import DockerException, ImageNotFound, NotFound
|
||||
from docker.models.containers import Container, ContainerCollection
|
||||
from docker.models.images import ImageCollection
|
||||
from docker.models.images import Image, ImageCollection
|
||||
from docker.models.networks import Network
|
||||
from docker.types.daemon import CancellableStream
|
||||
import requests
|
||||
@ -351,3 +352,224 @@ class DockerAPI:
|
||||
|
||||
with suppress(docker_errors.DockerException, requests.RequestException):
|
||||
network.disconnect(data.get("Name", cid), force=True)
|
||||
|
||||
def container_is_initialized(
|
||||
self, name: str, image: str, version: AwesomeVersion
|
||||
) -> bool:
|
||||
"""Return True if docker container exists in good state and is built from expected image."""
|
||||
try:
|
||||
docker_container = self.containers.get(name)
|
||||
docker_image = self.images.get(f"{image}:{version}")
|
||||
except NotFound:
|
||||
return False
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
# Check the image is correct and state is good
|
||||
return (
|
||||
docker_container.image.id == docker_image.id
|
||||
and docker_container.status in ("exited", "running", "created")
|
||||
)
|
||||
|
||||
def stop_container(
|
||||
self, name: str, timeout: int, remove_container: bool = True
|
||||
) -> None:
|
||||
"""Stop/remove Docker container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except NotFound:
|
||||
raise DockerNotFound() from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
if docker_container.status == "running":
|
||||
_LOGGER.info("Stopping %s application", name)
|
||||
with suppress(DockerException, requests.RequestException):
|
||||
docker_container.stop(timeout=timeout)
|
||||
|
||||
if remove_container:
|
||||
with suppress(DockerException, requests.RequestException):
|
||||
_LOGGER.info("Cleaning %s application", name)
|
||||
docker_container.remove(force=True)
|
||||
|
||||
def start_container(self, name: str) -> None:
|
||||
"""Start Docker container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except NotFound:
|
||||
raise DockerNotFound(
|
||||
f"{name} not found for starting up", _LOGGER.error
|
||||
) from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Could not get {name} for starting up", _LOGGER.error
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Starting %s", name)
|
||||
try:
|
||||
docker_container.start()
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(f"Can't start {name}: {err}", _LOGGER.error) from err
|
||||
|
||||
def restart_container(self, name: str, timeout: int) -> None:
|
||||
"""Restart docker container."""
|
||||
try:
|
||||
container: Container = self.containers.get(name)
|
||||
except NotFound:
|
||||
raise DockerNotFound() from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Restarting %s", name)
|
||||
try:
|
||||
container.restart(timeout=timeout)
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(f"Can't restart {name}: {err}", _LOGGER.warning) from err
|
||||
|
||||
def container_logs(self, name: str, tail: int = 100) -> bytes:
|
||||
"""Return Docker logs of container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except NotFound:
|
||||
raise DockerNotFound() from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
try:
|
||||
return docker_container.logs(tail=tail, stdout=True, stderr=True)
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't grep logs from {name}: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
def container_stats(self, name: str) -> dict[str, Any]:
|
||||
"""Read and return stats from container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except NotFound:
|
||||
raise DockerNotFound() from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
# container is not running
|
||||
if docker_container.status != "running":
|
||||
raise DockerError(f"Container {name} is not running", _LOGGER.error)
|
||||
|
||||
try:
|
||||
return docker_container.stats(stream=False)
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't read stats from {name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
def container_run_inside(self, name: str, command: str) -> CommandReturn:
|
||||
"""Execute a command inside Docker container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except NotFound:
|
||||
raise DockerNotFound() from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
# Execute
|
||||
try:
|
||||
code, output = docker_container.exec_run(command)
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
return CommandReturn(code, output)
|
||||
|
||||
def remove_image(
|
||||
self, image: str, version: AwesomeVersion, latest: bool = True
|
||||
) -> None:
|
||||
"""Remove a Docker image by version and latest."""
|
||||
try:
|
||||
if latest:
|
||||
_LOGGER.info("Removing image %s with latest", image)
|
||||
with suppress(ImageNotFound):
|
||||
self.images.remove(image=f"{image}:latest", force=True)
|
||||
|
||||
_LOGGER.info("Removing image %s with %s", image, version)
|
||||
with suppress(ImageNotFound):
|
||||
self.images.remove(image=f"{image}:{version!s}", force=True)
|
||||
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't remove image {image}: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
def import_image(self, tar_file: Path) -> Image | None:
|
||||
"""Import a tar file as image."""
|
||||
try:
|
||||
with tar_file.open("rb") as read_tar:
|
||||
docker_image_list: list[Image] = self.images.load(read_tar)
|
||||
|
||||
if len(docker_image_list) != 1:
|
||||
_LOGGER.warning(
|
||||
"Unexpected image count %d while importing image from tar",
|
||||
len(docker_image_list),
|
||||
)
|
||||
return None
|
||||
return docker_image_list[0]
|
||||
except (DockerException, OSError) as err:
|
||||
raise DockerError(
|
||||
f"Can't import image from tar: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
def export_image(self, image: str, version: AwesomeVersion, tar_file: Path) -> None:
|
||||
"""Export current images into a tar file."""
|
||||
try:
|
||||
image = self.api.get_image(f"{image}:{version}")
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't fetch image {image}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Export image %s to %s", image, tar_file)
|
||||
try:
|
||||
with tar_file.open("wb") as write_tar:
|
||||
for chunk in image:
|
||||
write_tar.write(chunk)
|
||||
except (OSError, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't write tar file {tar_file}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Export image %s done", image)
|
||||
|
||||
def cleanup_old_images(
|
||||
self,
|
||||
current_image: str,
|
||||
current_version: AwesomeVersion,
|
||||
old_images: set[str] | None = None,
|
||||
) -> None:
|
||||
"""Clean up old versions of an image."""
|
||||
try:
|
||||
current: Image = self.images.get(f"{current_image}:{current_version!s}")
|
||||
except ImageNotFound:
|
||||
raise DockerNotFound(
|
||||
f"{current_image} not found for cleanup", _LOGGER.warning
|
||||
) from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't get {current_image} for cleanup", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
# Cleanup old and current
|
||||
image_names = list(
|
||||
old_images | {current_image} if old_images else {current_image}
|
||||
)
|
||||
try:
|
||||
images_list = self.images.list(name=image_names)
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Corrupt docker overlayfs found: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
for image in images_list:
|
||||
if current.id == image.id:
|
||||
continue
|
||||
|
||||
with suppress(DockerException, requests.RequestException):
|
||||
_LOGGER.info("Cleanup images: %s", image.tags)
|
||||
self.images.remove(image.id, force=True)
|
||||
|
@ -28,19 +28,17 @@ class DockerMulticast(DockerInterface, CoreSysAttributes):
|
||||
"""Generate needed capabilities."""
|
||||
return [Capabilities.NET_ADMIN.value]
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
async def _run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
await self._stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
tag=str(self.sys_plugins.multicast.version),
|
||||
init=False,
|
||||
|
@ -21,7 +21,7 @@ class DockerNetwork:
|
||||
def __init__(self, docker_client: docker.DockerClient):
|
||||
"""Initialize internal Supervisor network."""
|
||||
self.docker: docker.DockerClient = docker_client
|
||||
self.network: docker.models.networks.Network = self._get_network()
|
||||
self._network: docker.models.networks.Network = self._get_network()
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
@ -29,18 +29,14 @@ class DockerNetwork:
|
||||
return DOCKER_NETWORK
|
||||
|
||||
@property
|
||||
def containers(self) -> list[docker.models.containers.Container]:
|
||||
"""Return of connected containers from network."""
|
||||
containers: list[docker.models.containers.Container] = []
|
||||
for cid, _ in self.network.attrs.get("Containers", {}).items():
|
||||
try:
|
||||
containers.append(self.docker.containers.get(cid))
|
||||
except docker.errors.NotFound:
|
||||
_LOGGER.warning("Docker network is corrupt! %s", cid)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Unknown error with container lookup %s", err)
|
||||
def network(self) -> docker.models.networks.Network:
|
||||
"""Return docker network."""
|
||||
return self._network
|
||||
|
||||
return containers
|
||||
@property
|
||||
def containers(self) -> list[str]:
|
||||
"""Return of connected containers from network."""
|
||||
return list(self.network.attrs.get("Containers", {}).keys())
|
||||
|
||||
@property
|
||||
def gateway(self) -> IPv4Address:
|
||||
|
@ -25,19 +25,17 @@ class DockerObserver(DockerInterface, CoreSysAttributes):
|
||||
"""Return name of Docker container."""
|
||||
return OBSERVER_DOCKER_NAME
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
async def _run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
await self._stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
tag=str(self.sys_plugins.observer.version),
|
||||
init=False,
|
||||
|
@ -43,15 +43,14 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
if mount.get("Destination") == "/data"
|
||||
)
|
||||
|
||||
def _attach(
|
||||
async def _attach(
|
||||
self, version: AwesomeVersion, skip_state_event_if_down: bool = False
|
||||
) -> None:
|
||||
"""Attach to running docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
"""Attach to running docker container."""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
@ -63,12 +62,13 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
)
|
||||
|
||||
# If already attach
|
||||
if docker_container in self.sys_docker.network.containers:
|
||||
if docker_container.id in self.sys_docker.network.containers:
|
||||
return
|
||||
|
||||
# Attach to network
|
||||
_LOGGER.info("Connecting Supervisor to hassio-network")
|
||||
self.sys_docker.network.attach_container(
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.network.attach_container,
|
||||
docker_container,
|
||||
alias=["supervisor"],
|
||||
ipv4=self.sys_docker.network.supervisor,
|
||||
|
@ -318,10 +318,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
|
||||
@process_lock
|
||||
async def stop(self) -> None:
|
||||
"""Stop Home Assistant Docker.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
"""Stop Home Assistant Docker."""
|
||||
try:
|
||||
return await self.instance.stop(remove_container=False)
|
||||
except DockerError as err:
|
||||
|
@ -1,9 +1,9 @@
|
||||
"""Pulse host control."""
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import timedelta
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
import logging
|
||||
|
||||
import attr
|
||||
from pulsectl import Pulse, PulseError, PulseIndexError, PulseOperationFailed
|
||||
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
@ -16,57 +16,67 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
PULSE_NAME = "supervisor"
|
||||
|
||||
|
||||
class StreamType(str, Enum):
|
||||
class StreamType(StrEnum):
|
||||
"""INPUT/OUTPUT type of source."""
|
||||
|
||||
INPUT = "input"
|
||||
OUTPUT = "output"
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class AudioApplication:
|
||||
"""Represent a application on the stream."""
|
||||
|
||||
name: str = attr.ib()
|
||||
index: int = attr.ib()
|
||||
stream_index: str = attr.ib()
|
||||
stream_type: StreamType = attr.ib()
|
||||
volume: float = attr.ib()
|
||||
mute: bool = attr.ib()
|
||||
addon: str = attr.ib()
|
||||
name: str
|
||||
index: int
|
||||
stream_index: str
|
||||
stream_type: StreamType
|
||||
volume: float
|
||||
mute: bool
|
||||
addon: str
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class AudioStream:
|
||||
"""Represent a input/output stream."""
|
||||
|
||||
name: str = attr.ib()
|
||||
index: int = attr.ib()
|
||||
description: str = attr.ib()
|
||||
volume: float = attr.ib()
|
||||
mute: bool = attr.ib()
|
||||
default: bool = attr.ib()
|
||||
card: int | None = attr.ib()
|
||||
applications: list[AudioApplication] = attr.ib()
|
||||
name: str
|
||||
index: int
|
||||
description: str
|
||||
volume: float
|
||||
mute: bool
|
||||
default: bool
|
||||
card: int | None
|
||||
applications: list[AudioApplication]
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class SoundProfile:
|
||||
"""Represent a Sound Card profile."""
|
||||
|
||||
name: str = attr.ib()
|
||||
description: str = attr.ib()
|
||||
active: bool = attr.ib()
|
||||
name: str
|
||||
description: str
|
||||
active: bool
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class SoundCard:
|
||||
"""Represent a Sound Card."""
|
||||
|
||||
name: str = attr.ib()
|
||||
index: int = attr.ib()
|
||||
driver: str = attr.ib()
|
||||
profiles: list[SoundProfile] = attr.ib()
|
||||
name: str
|
||||
index: int
|
||||
driver: str
|
||||
profiles: list[SoundProfile]
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class PulseData:
|
||||
"""Represent pulse data we care about."""
|
||||
|
||||
cards: list[SoundCard] = field(default_factory=list)
|
||||
inputs: list[AudioStream] = field(default_factory=list)
|
||||
outputs: list[AudioStream] = field(default_factory=list)
|
||||
applications: list[AudioApplication] = field(default_factory=list)
|
||||
|
||||
|
||||
class SoundControl(CoreSysAttributes):
|
||||
@ -227,15 +237,15 @@ class SoundControl(CoreSysAttributes):
|
||||
"""Update properties over dbus."""
|
||||
_LOGGER.info("Updating PulseAudio information")
|
||||
|
||||
def _update():
|
||||
def _get_pulse_data() -> PulseData:
|
||||
data = PulseData()
|
||||
|
||||
try:
|
||||
with Pulse(PULSE_NAME) as pulse:
|
||||
server = pulse.server_info()
|
||||
|
||||
# Update applications
|
||||
self._applications.clear()
|
||||
for application in pulse.sink_input_list():
|
||||
self._applications.append(
|
||||
data.applications = [
|
||||
AudioApplication(
|
||||
application.proplist.get(
|
||||
"application.name", application.name
|
||||
@ -249,9 +259,9 @@ class SoundControl(CoreSysAttributes):
|
||||
"application.process.machine_id", ""
|
||||
).replace("-", "_"),
|
||||
)
|
||||
)
|
||||
for application in pulse.source_output_list():
|
||||
self._applications.append(
|
||||
for application in pulse.sink_input_list()
|
||||
]
|
||||
data.applications.extend(
|
||||
AudioApplication(
|
||||
application.proplist.get(
|
||||
"application.name", application.name
|
||||
@ -265,12 +275,11 @@ class SoundControl(CoreSysAttributes):
|
||||
"application.process.machine_id", ""
|
||||
).replace("-", "_"),
|
||||
)
|
||||
for application in pulse.source_output_list()
|
||||
)
|
||||
|
||||
# Update output
|
||||
self._outputs.clear()
|
||||
for sink in pulse.sink_list():
|
||||
self._outputs.append(
|
||||
data.outputs = [
|
||||
AudioStream(
|
||||
sink.name,
|
||||
sink.index,
|
||||
@ -281,20 +290,16 @@ class SoundControl(CoreSysAttributes):
|
||||
sink.card if sink.card != 0xFFFFFFFF else None,
|
||||
[
|
||||
application
|
||||
for application in self._applications
|
||||
for application in data.applications
|
||||
if application.stream_index == sink.index
|
||||
and application.stream_type == StreamType.OUTPUT
|
||||
],
|
||||
)
|
||||
)
|
||||
for sink in pulse.sink_list()
|
||||
]
|
||||
|
||||
# Update input
|
||||
self._inputs.clear()
|
||||
for source in pulse.source_list():
|
||||
# Filter monitor devices out because we did not use it now
|
||||
if source.name.endswith(".monitor"):
|
||||
continue
|
||||
self._inputs.append(
|
||||
data.inputs = [
|
||||
AudioStream(
|
||||
source.name,
|
||||
source.index,
|
||||
@ -305,35 +310,34 @@ class SoundControl(CoreSysAttributes):
|
||||
source.card if source.card != 0xFFFFFFFF else None,
|
||||
[
|
||||
application
|
||||
for application in self._applications
|
||||
for application in data.applications
|
||||
if application.stream_index == source.index
|
||||
and application.stream_type == StreamType.INPUT
|
||||
],
|
||||
)
|
||||
)
|
||||
for source in pulse.source_list()
|
||||
# Filter monitor devices out because we did not use it now
|
||||
if not source.name.endswith(".monitor")
|
||||
]
|
||||
|
||||
# Update Sound Card
|
||||
self._cards.clear()
|
||||
for card in pulse.card_list():
|
||||
sound_profiles: list[SoundProfile] = []
|
||||
|
||||
# Generate profiles
|
||||
for profile in card.profile_list:
|
||||
if not profile.available:
|
||||
continue
|
||||
sound_profiles.append(
|
||||
data.cards = [
|
||||
SoundCard(
|
||||
card.name,
|
||||
card.index,
|
||||
card.driver,
|
||||
[
|
||||
SoundProfile(
|
||||
profile.name,
|
||||
profile.description,
|
||||
profile.name == card.profile_active.name,
|
||||
)
|
||||
for profile in card.profile_list
|
||||
if profile.available
|
||||
],
|
||||
)
|
||||
|
||||
self._cards.append(
|
||||
SoundCard(
|
||||
card.name, card.index, card.driver, sound_profiles
|
||||
)
|
||||
)
|
||||
for card in pulse.card_list()
|
||||
]
|
||||
|
||||
except PulseOperationFailed as err:
|
||||
raise PulseAudioError(
|
||||
@ -342,5 +346,11 @@ class SoundControl(CoreSysAttributes):
|
||||
except PulseError as err:
|
||||
_LOGGER.debug("Can't update PulseAudio data: %s", err)
|
||||
|
||||
# Run update from pulse server
|
||||
await self.sys_run_in_executor(_update)
|
||||
return data
|
||||
|
||||
# Update data from pulse server
|
||||
data: PulseData = await self.sys_run_in_executor(_get_pulse_data)
|
||||
self._applications = data.applications
|
||||
self._cards = data.cards
|
||||
self._inputs = data.inputs
|
||||
self._outputs = data.outputs
|
||||
|
@ -154,7 +154,7 @@ class PluginDns(PluginBase):
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't read hosts.tmpl: %s", err)
|
||||
|
||||
self._init_hosts()
|
||||
await self._init_hosts()
|
||||
await super().load()
|
||||
|
||||
# Update supervisor
|
||||
@ -184,7 +184,7 @@ class PluginDns(PluginBase):
|
||||
self.save_data()
|
||||
|
||||
# Init Hosts
|
||||
self.write_hosts()
|
||||
await self.write_hosts()
|
||||
|
||||
@Job(
|
||||
conditions=PLUGIN_UPDATE_CONDITIONS,
|
||||
@ -254,7 +254,7 @@ class PluginDns(PluginBase):
|
||||
# Resets hosts
|
||||
with suppress(OSError):
|
||||
self.hosts.unlink()
|
||||
self._init_hosts()
|
||||
await self._init_hosts()
|
||||
|
||||
# Reset loop protection
|
||||
self._loop = False
|
||||
@ -333,32 +333,40 @@ class PluginDns(PluginBase):
|
||||
f"Can't update coredns config: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
def _init_hosts(self) -> None:
|
||||
async def _init_hosts(self) -> None:
|
||||
"""Import hosts entry."""
|
||||
# Generate Default
|
||||
self.add_host(IPv4Address("127.0.0.1"), ["localhost"], write=False)
|
||||
await asyncio.gather(
|
||||
self.add_host(IPv4Address("127.0.0.1"), ["localhost"], write=False),
|
||||
self.add_host(
|
||||
self.sys_docker.network.supervisor, ["hassio", "supervisor"], write=False
|
||||
)
|
||||
self.sys_docker.network.supervisor,
|
||||
["hassio", "supervisor"],
|
||||
write=False,
|
||||
),
|
||||
self.add_host(
|
||||
self.sys_docker.network.gateway,
|
||||
["homeassistant", "home-assistant"],
|
||||
write=False,
|
||||
),
|
||||
self.add_host(self.sys_docker.network.dns, ["dns"], write=False),
|
||||
self.add_host(self.sys_docker.network.observer, ["observer"], write=False),
|
||||
)
|
||||
self.add_host(self.sys_docker.network.dns, ["dns"], write=False)
|
||||
self.add_host(self.sys_docker.network.observer, ["observer"], write=False)
|
||||
|
||||
def write_hosts(self) -> None:
|
||||
async def write_hosts(self) -> None:
|
||||
"""Write hosts from memory to file."""
|
||||
# Generate config file
|
||||
data = self.hosts_template.render(entries=self._hosts)
|
||||
|
||||
try:
|
||||
self.hosts.write_text(data, encoding="utf-8")
|
||||
await self.sys_run_in_executor(
|
||||
self.hosts.write_text, data, encoding="utf-8"
|
||||
)
|
||||
except OSError as err:
|
||||
raise CoreDNSError(f"Can't update hosts: {err}", _LOGGER.error) from err
|
||||
|
||||
def add_host(self, ipv4: IPv4Address, names: list[str], write: bool = True) -> None:
|
||||
async def add_host(
|
||||
self, ipv4: IPv4Address, names: list[str], write: bool = True
|
||||
) -> None:
|
||||
"""Add a new host entry."""
|
||||
if not ipv4 or ipv4 == IPv4Address("0.0.0.0"):
|
||||
return
|
||||
@ -381,9 +389,9 @@ class PluginDns(PluginBase):
|
||||
|
||||
# Update hosts file
|
||||
if write:
|
||||
self.write_hosts()
|
||||
await self.write_hosts()
|
||||
|
||||
def delete_host(self, host: str, write: bool = True) -> None:
|
||||
async def delete_host(self, host: str, write: bool = True) -> None:
|
||||
"""Remove a entry from hosts."""
|
||||
entry = self._search_host([host])
|
||||
if not entry:
|
||||
@ -394,7 +402,7 @@ class PluginDns(PluginBase):
|
||||
|
||||
# Update hosts file
|
||||
if write:
|
||||
self.write_hosts()
|
||||
await self.write_hosts()
|
||||
|
||||
def _search_host(self, names: list[str]) -> HostEntry | None:
|
||||
"""Search a host entry."""
|
||||
|
@ -64,12 +64,29 @@ class EvaluateContainer(EvaluateBase):
|
||||
*(addon.image for addon in self.sys_addons.installed),
|
||||
}
|
||||
|
||||
async def evaluate(self) -> None:
|
||||
async def evaluate(self) -> bool:
|
||||
"""Run evaluation."""
|
||||
self.sys_resolution.evaluate.cached_images.clear()
|
||||
self._images.clear()
|
||||
|
||||
for image in await self.sys_run_in_executor(self._get_images):
|
||||
try:
|
||||
containers = await self.sys_run_in_executor(self.sys_docker.containers.list)
|
||||
except (DockerException, RequestException) as err:
|
||||
_LOGGER.error("Corrupt docker overlayfs detect: %s", err)
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.CORRUPT_DOCKER,
|
||||
ContextType.SYSTEM,
|
||||
suggestions=[SuggestionType.EXECUTE_REPAIR],
|
||||
)
|
||||
return False
|
||||
|
||||
images = {
|
||||
image
|
||||
for container in containers
|
||||
if (config := container.attrs.get("Config")) is not None
|
||||
and (image := config.get("Image")) is not None
|
||||
}
|
||||
for image in images:
|
||||
self.sys_resolution.evaluate.cached_images.add(image)
|
||||
|
||||
image_name = image.partition(":")[0]
|
||||
@ -86,22 +103,3 @@ class EvaluateContainer(EvaluateBase):
|
||||
self.sys_resolution.unhealthy = UnhealthyReason.DOCKER
|
||||
|
||||
return len(self._images) != 0
|
||||
|
||||
def _get_images(self) -> set[str]:
|
||||
"""Return a set of images."""
|
||||
try:
|
||||
return {
|
||||
image
|
||||
for container in self.sys_docker.containers.list()
|
||||
if (config := container.attrs.get("Config")) is not None
|
||||
and (image := config.get("Image")) is not None
|
||||
}
|
||||
except (DockerException, RequestException) as err:
|
||||
_LOGGER.error("Corrupt docker overlayfs detect: %s", err)
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.CORRUPT_DOCKER,
|
||||
ContextType.SYSTEM,
|
||||
suggestions=[SuggestionType.EXECUTE_REPAIR],
|
||||
)
|
||||
|
||||
return {}
|
||||
|
@ -4,7 +4,7 @@ import asyncio
|
||||
from datetime import timedelta
|
||||
from unittest.mock import MagicMock, PropertyMock, patch
|
||||
|
||||
from docker.errors import DockerException
|
||||
from docker.errors import DockerException, NotFound
|
||||
import pytest
|
||||
from securetar import SecureTarFile
|
||||
|
||||
@ -17,7 +17,7 @@ from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.addon import DockerAddon
|
||||
from supervisor.docker.const import ContainerState
|
||||
from supervisor.docker.monitor import DockerContainerStateEvent
|
||||
from supervisor.exceptions import AddonsJobError, AudioUpdateError
|
||||
from supervisor.exceptions import AddonsError, AddonsJobError, AudioUpdateError
|
||||
from supervisor.store.repository import Repository
|
||||
from supervisor.utils.dt import utcnow
|
||||
|
||||
@ -38,11 +38,6 @@ def _fire_test_event(coresys: CoreSys, name: str, state: ContainerState):
|
||||
)
|
||||
|
||||
|
||||
async def mock_current_state(state: ContainerState) -> ContainerState:
|
||||
"""Mock for current state method."""
|
||||
return state
|
||||
|
||||
|
||||
async def mock_stop() -> None:
|
||||
"""Mock for stop method."""
|
||||
|
||||
@ -142,7 +137,7 @@ async def test_addon_watchdog(coresys: CoreSys, install_addon_ssh: Addon) -> Non
|
||||
Addon, "start"
|
||||
) as start, patch.object(DockerAddon, "current_state") as current_state:
|
||||
# Restart if it becomes unhealthy
|
||||
current_state.return_value = mock_current_state(ContainerState.UNHEALTHY)
|
||||
current_state.return_value = ContainerState.UNHEALTHY
|
||||
_fire_test_event(coresys, f"addon_{TEST_ADDON_SLUG}", ContainerState.UNHEALTHY)
|
||||
await asyncio.sleep(0)
|
||||
restart.assert_called_once()
|
||||
@ -151,7 +146,7 @@ async def test_addon_watchdog(coresys: CoreSys, install_addon_ssh: Addon) -> Non
|
||||
restart.reset_mock()
|
||||
|
||||
# Rebuild if it failed
|
||||
current_state.return_value = mock_current_state(ContainerState.FAILED)
|
||||
current_state.return_value = ContainerState.FAILED
|
||||
with patch.object(DockerAddon, "stop", return_value=mock_stop()) as stop:
|
||||
_fire_test_event(coresys, f"addon_{TEST_ADDON_SLUG}", ContainerState.FAILED)
|
||||
await asyncio.sleep(0)
|
||||
@ -162,14 +157,14 @@ async def test_addon_watchdog(coresys: CoreSys, install_addon_ssh: Addon) -> Non
|
||||
start.reset_mock()
|
||||
|
||||
# Do not process event if container state has changed since fired
|
||||
current_state.return_value = mock_current_state(ContainerState.HEALTHY)
|
||||
current_state.return_value = ContainerState.HEALTHY
|
||||
_fire_test_event(coresys, f"addon_{TEST_ADDON_SLUG}", ContainerState.FAILED)
|
||||
await asyncio.sleep(0)
|
||||
restart.assert_not_called()
|
||||
start.assert_not_called()
|
||||
|
||||
# Other addons ignored
|
||||
current_state.return_value = mock_current_state(ContainerState.UNHEALTHY)
|
||||
current_state.return_value = ContainerState.UNHEALTHY
|
||||
_fire_test_event(coresys, "addon_local_non_installed", ContainerState.UNHEALTHY)
|
||||
await asyncio.sleep(0)
|
||||
restart.assert_not_called()
|
||||
@ -186,7 +181,7 @@ async def test_watchdog_on_stop(coresys: CoreSys, install_addon_ssh: Addon) -> N
|
||||
with patch.object(Addon, "restart") as restart, patch.object(
|
||||
DockerAddon,
|
||||
"current_state",
|
||||
return_value=mock_current_state(ContainerState.STOPPED),
|
||||
return_value=ContainerState.STOPPED,
|
||||
), patch.object(DockerAddon, "stop", return_value=mock_stop()):
|
||||
# Do not restart when addon stopped by user
|
||||
_fire_test_event(coresys, f"addon_{TEST_ADDON_SLUG}", ContainerState.RUNNING)
|
||||
@ -248,7 +243,7 @@ async def test_watchdog_during_attach(
|
||||
), patch.object(DockerAddon, "attach"), patch.object(
|
||||
DockerAddon,
|
||||
"current_state",
|
||||
return_value=mock_current_state(ContainerState.STOPPED),
|
||||
return_value=ContainerState.STOPPED,
|
||||
):
|
||||
coresys.config.last_boot = coresys.hardware.helper.last_boot + boot_timedelta
|
||||
addon = Addon(coresys, store.slug)
|
||||
@ -322,6 +317,7 @@ async def test_start(
|
||||
"""Test starting an addon without healthcheck."""
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
await install_addon_ssh.load()
|
||||
await asyncio.sleep(0)
|
||||
assert install_addon_ssh.state == AddonState.STOPPED
|
||||
|
||||
start_task = await install_addon_ssh.start()
|
||||
@ -345,6 +341,7 @@ async def test_start_wait_healthcheck(
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
container.attrs["Config"] = {"Healthcheck": "exists"}
|
||||
await install_addon_ssh.load()
|
||||
await asyncio.sleep(0)
|
||||
assert install_addon_ssh.state == AddonState.STOPPED
|
||||
|
||||
start_task = asyncio.create_task(await install_addon_ssh.start())
|
||||
@ -374,6 +371,7 @@ async def test_start_timeout(
|
||||
"""Test starting an addon times out while waiting."""
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
await install_addon_ssh.load()
|
||||
await asyncio.sleep(0)
|
||||
assert install_addon_ssh.state == AddonState.STOPPED
|
||||
|
||||
start_task = await install_addon_ssh.start()
|
||||
@ -398,6 +396,7 @@ async def test_restart(
|
||||
"""Test restarting an addon."""
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
await install_addon_ssh.load()
|
||||
await asyncio.sleep(0)
|
||||
assert install_addon_ssh.state == AddonState.STOPPED
|
||||
|
||||
start_task = await install_addon_ssh.restart()
|
||||
@ -426,6 +425,69 @@ async def test_backup(
|
||||
assert await install_addon_ssh.backup(tarfile) is None
|
||||
|
||||
|
||||
async def test_backup_with_pre_post_command(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
) -> None:
|
||||
"""Test backing up an addon with pre and post command."""
|
||||
container.status = "running"
|
||||
container.exec_run.return_value = (0, None)
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
await install_addon_ssh.load()
|
||||
|
||||
tarfile = SecureTarFile(coresys.config.path_tmp / "test.tar.gz", "w")
|
||||
with patch.object(
|
||||
Addon, "backup_pre", new=PropertyMock(return_value="backup_pre")
|
||||
), patch.object(Addon, "backup_post", new=PropertyMock(return_value="backup_post")):
|
||||
assert await install_addon_ssh.backup(tarfile) is None
|
||||
|
||||
assert container.exec_run.call_count == 2
|
||||
assert container.exec_run.call_args_list[0].args[0] == "backup_pre"
|
||||
assert container.exec_run.call_args_list[1].args[0] == "backup_post"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"get_error,exception_on_exec",
|
||||
[
|
||||
(NotFound("missing"), False),
|
||||
(DockerException(), False),
|
||||
(None, True),
|
||||
(None, False),
|
||||
],
|
||||
)
|
||||
async def test_backup_with_pre_command_error(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
get_error: DockerException | None,
|
||||
exception_on_exec: bool,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
) -> None:
|
||||
"""Test backing up an addon with error running pre command."""
|
||||
if get_error:
|
||||
coresys.docker.containers.get.side_effect = get_error
|
||||
|
||||
if exception_on_exec:
|
||||
container.exec_run.side_effect = DockerException()
|
||||
else:
|
||||
container.exec_run.return_value = (1, None)
|
||||
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
await install_addon_ssh.load()
|
||||
|
||||
tarfile = SecureTarFile(coresys.config.path_tmp / "test.tar.gz", "w")
|
||||
with patch.object(DockerAddon, "is_running", return_value=True), patch.object(
|
||||
Addon, "backup_pre", new=PropertyMock(return_value="backup_pre")
|
||||
), pytest.raises(AddonsError):
|
||||
assert await install_addon_ssh.backup(tarfile) is None
|
||||
|
||||
assert not tarfile.path.exists()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("status", ["running", "stopped"])
|
||||
async def test_backup_cold_mode(
|
||||
coresys: CoreSys,
|
||||
@ -444,7 +506,7 @@ async def test_backup_cold_mode(
|
||||
with patch.object(
|
||||
AddonModel, "backup_mode", new=PropertyMock(return_value=AddonBackupMode.COLD)
|
||||
), patch.object(
|
||||
DockerAddon, "_is_running", side_effect=[status == "running", False, False]
|
||||
DockerAddon, "is_running", side_effect=[status == "running", False, False]
|
||||
):
|
||||
start_task = await install_addon_ssh.backup(tarfile)
|
||||
|
||||
@ -465,7 +527,7 @@ async def test_restore(
|
||||
await install_addon_ssh.load()
|
||||
|
||||
tarfile = SecureTarFile(get_fixture_path(f"backup_local_ssh_{status}.tar.gz"), "r")
|
||||
with patch.object(DockerAddon, "_is_running", return_value=False), patch.object(
|
||||
with patch.object(DockerAddon, "is_running", return_value=False), patch.object(
|
||||
CpuArch, "supported", new=PropertyMock(return_value=["aarch64"])
|
||||
):
|
||||
start_task = await coresys.addons.restore(TEST_ADDON_SLUG, tarfile)
|
||||
@ -482,6 +544,7 @@ async def test_start_when_running(
|
||||
"""Test starting an addon without healthcheck."""
|
||||
container.status = "running"
|
||||
await install_addon_ssh.load()
|
||||
await asyncio.sleep(0)
|
||||
assert install_addon_ssh.state == AddonState.STARTED
|
||||
|
||||
caplog.clear()
|
||||
|
@ -202,6 +202,7 @@ async def test_boot_waits_for_addons(
|
||||
"""Test addon manager boot waits for addons."""
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
await install_addon_ssh.load()
|
||||
await asyncio.sleep(0)
|
||||
assert install_addon_ssh.state == AddonState.STOPPED
|
||||
|
||||
addon_state: AddonState | None = None
|
||||
@ -249,7 +250,7 @@ async def test_update(
|
||||
assert install_addon_ssh.need_update is True
|
||||
|
||||
with patch.object(DockerInterface, "_install"), patch.object(
|
||||
DockerAddon, "_is_running", return_value=False
|
||||
DockerAddon, "is_running", return_value=False
|
||||
):
|
||||
start_task = await coresys.addons.update(TEST_ADDON_SLUG)
|
||||
|
||||
@ -271,7 +272,7 @@ async def test_rebuild(
|
||||
await install_addon_ssh.load()
|
||||
|
||||
with patch.object(DockerAddon, "_build"), patch.object(
|
||||
DockerAddon, "_is_running", return_value=False
|
||||
DockerAddon, "is_running", return_value=False
|
||||
), patch.object(Addon, "need_build", new=PropertyMock(return_value=True)):
|
||||
start_task = await coresys.addons.rebuild(TEST_ADDON_SLUG)
|
||||
|
||||
|
@ -92,6 +92,7 @@ async def test_api_addon_start_healthcheck(
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
container.attrs["Config"] = {"Healthcheck": "exists"}
|
||||
await install_addon_ssh.load()
|
||||
await asyncio.sleep(0)
|
||||
assert install_addon_ssh.state == AddonState.STOPPED
|
||||
|
||||
state_changes: list[AddonState] = []
|
||||
@ -132,6 +133,7 @@ async def test_api_addon_restart_healthcheck(
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
container.attrs["Config"] = {"Healthcheck": "exists"}
|
||||
await install_addon_ssh.load()
|
||||
await asyncio.sleep(0)
|
||||
assert install_addon_ssh.state == AddonState.STOPPED
|
||||
|
||||
state_changes: list[AddonState] = []
|
||||
@ -173,6 +175,7 @@ async def test_api_addon_rebuild_healthcheck(
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
container.attrs["Config"] = {"Healthcheck": "exists"}
|
||||
await install_addon_ssh.load()
|
||||
await asyncio.sleep(0)
|
||||
assert install_addon_ssh.state == AddonState.STARTUP
|
||||
|
||||
state_changes: list[AddonState] = []
|
||||
@ -200,7 +203,7 @@ async def test_api_addon_rebuild_healthcheck(
|
||||
|
||||
with patch.object(
|
||||
AddonBuild, "is_valid", new=PropertyMock(return_value=True)
|
||||
), patch.object(DockerAddon, "_is_running", return_value=False), patch.object(
|
||||
), patch.object(DockerAddon, "is_running", return_value=False), patch.object(
|
||||
Addon, "need_build", new=PropertyMock(return_value=True)
|
||||
), patch.object(
|
||||
CpuArch, "supported", new=PropertyMock(return_value=["amd64"])
|
||||
|
@ -5,6 +5,10 @@ from unittest.mock import MagicMock
|
||||
from aiohttp.test_utils import TestClient
|
||||
import pytest
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
|
||||
from tests.common import load_json_fixture
|
||||
|
||||
|
||||
@pytest.mark.parametrize("legacy_route", [True, False])
|
||||
async def test_api_core_logs(
|
||||
@ -19,3 +23,19 @@ async def test_api_core_logs(
|
||||
"\x1b[36m22-10-11 14:04:23 DEBUG (MainThread) [supervisor.utils.dbus] D-Bus call - org.freedesktop.DBus.Properties.call_get_all on /io/hass/os\x1b[0m",
|
||||
"\x1b[36m22-10-11 14:04:23 DEBUG (MainThread) [supervisor.utils.dbus] D-Bus call - org.freedesktop.DBus.Properties.call_get_all on /io/hass/os/AppArmor\x1b[0m",
|
||||
]
|
||||
|
||||
|
||||
async def test_api_stats(api_client: TestClient, coresys: CoreSys):
|
||||
"""Test stats."""
|
||||
coresys.docker.containers.get.return_value.status = "running"
|
||||
coresys.docker.containers.get.return_value.stats.return_value = load_json_fixture(
|
||||
"container_stats.json"
|
||||
)
|
||||
|
||||
resp = await api_client.get("/homeassistant/stats")
|
||||
assert resp.status == 200
|
||||
result = await resp.json()
|
||||
assert result["data"]["cpu_percent"] == 90.0
|
||||
assert result["data"]["memory_usage"] == 59700000
|
||||
assert result["data"]["memory_limit"] == 4000000000
|
||||
assert result["data"]["memory_percent"] == 1.49
|
||||
|
@ -177,7 +177,7 @@ async def test_api_store_update_healthcheck(
|
||||
|
||||
with patch.object(DockerAddon, "run", new=container_events_task), patch.object(
|
||||
DockerInterface, "_install"
|
||||
), patch.object(DockerAddon, "_is_running", return_value=False), patch.object(
|
||||
), patch.object(DockerAddon, "is_running", return_value=False), patch.object(
|
||||
CpuArch, "supported", new=PropertyMock(return_value=["amd64"])
|
||||
):
|
||||
resp = await api_client.post(f"/store/addons/{TEST_ADDON_SLUG}/update")
|
||||
|
@ -18,9 +18,9 @@ from supervisor.const import FOLDER_HOMEASSISTANT, FOLDER_SHARE, AddonState, Cor
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.addon import DockerAddon
|
||||
from supervisor.docker.const import ContainerState
|
||||
from supervisor.docker.homeassistant import DockerHomeAssistant
|
||||
from supervisor.docker.monitor import DockerContainerStateEvent
|
||||
from supervisor.exceptions import AddonsError, DockerError
|
||||
from supervisor.homeassistant.core import HomeAssistantCore
|
||||
from supervisor.homeassistant.module import HomeAssistant
|
||||
from supervisor.mounts.mount import Mount
|
||||
|
||||
@ -423,10 +423,7 @@ async def test_backup_media_with_mounts(
|
||||
coresys.config.path_media.mkdir()
|
||||
|
||||
# Restore the backup and check that only the test files we made returned
|
||||
async def mock_async_true(*args, **kwargs):
|
||||
return True
|
||||
|
||||
with patch.object(HomeAssistantCore, "is_running", new=mock_async_true):
|
||||
with patch.object(DockerHomeAssistant, "is_running", return_value=True):
|
||||
await coresys.backups.do_restore_partial(backup, folders=["media"])
|
||||
|
||||
assert test_file_1.exists()
|
||||
@ -485,10 +482,7 @@ async def test_backup_share_with_mounts(
|
||||
coresys.config.path_share.mkdir()
|
||||
|
||||
# Restore the backup and check that only the test files we made returned
|
||||
async def mock_async_true(*args, **kwargs):
|
||||
return True
|
||||
|
||||
with patch.object(HomeAssistantCore, "is_running", new=mock_async_true):
|
||||
with patch.object(DockerHomeAssistant, "is_running", return_value=True):
|
||||
await coresys.backups.do_restore_partial(backup, folders=["share"])
|
||||
|
||||
assert test_file_1.exists()
|
||||
@ -532,10 +526,7 @@ async def test_full_backup_to_mount(
|
||||
# Remove marker file and restore. Confirm it comes back
|
||||
marker.unlink()
|
||||
|
||||
async def mock_async_true(*args, **kwargs):
|
||||
return True
|
||||
|
||||
with patch.object(HomeAssistantCore, "is_running", new=mock_async_true):
|
||||
with patch.object(DockerHomeAssistant, "is_running", return_value=True):
|
||||
await coresys.backups.do_restore_full(backup)
|
||||
|
||||
assert marker.exists()
|
||||
@ -588,10 +579,7 @@ async def test_partial_backup_to_mount(
|
||||
# Remove marker file and restore. Confirm it comes back
|
||||
marker.unlink()
|
||||
|
||||
async def mock_async_true(*args, **kwargs):
|
||||
return True
|
||||
|
||||
with patch.object(HomeAssistantCore, "is_running", new=mock_async_true):
|
||||
with patch.object(DockerHomeAssistant, "is_running", return_value=True):
|
||||
await coresys.backups.do_restore_partial(backup, homeassistant=True)
|
||||
|
||||
assert marker.exists()
|
||||
@ -718,6 +706,7 @@ async def test_backup_with_healthcheck(
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
await install_addon_ssh.load()
|
||||
await asyncio.sleep(0)
|
||||
assert install_addon_ssh.state == AddonState.STARTUP
|
||||
|
||||
state_changes: list[AddonState] = []
|
||||
@ -760,7 +749,7 @@ async def test_backup_with_healthcheck(
|
||||
|
||||
with patch.object(DockerAddon, "run", new=container_events_task), patch.object(
|
||||
AddonModel, "backup_mode", new=PropertyMock(return_value=AddonBackupMode.COLD)
|
||||
), patch.object(DockerAddon, "_is_running", side_effect=[True, False, False]):
|
||||
), patch.object(DockerAddon, "is_running", side_effect=[True, False, False]):
|
||||
backup = await coresys.backups.do_backup_partial(
|
||||
homeassistant=False, addons=["local_ssh"]
|
||||
)
|
||||
@ -785,6 +774,7 @@ async def test_restore_with_healthcheck(
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
await install_addon_ssh.load()
|
||||
await asyncio.sleep(0)
|
||||
assert install_addon_ssh.state == AddonState.STARTUP
|
||||
|
||||
backup = await coresys.backups.do_backup_partial(
|
||||
@ -828,7 +818,7 @@ async def test_restore_with_healthcheck(
|
||||
asyncio.create_task(container_events())
|
||||
|
||||
with patch.object(DockerAddon, "run", new=container_events_task), patch.object(
|
||||
DockerAddon, "_is_running", return_value=False
|
||||
DockerAddon, "is_running", return_value=False
|
||||
), patch.object(AddonModel, "_validate_availability"), patch.object(
|
||||
Addon, "with_ingress", new=PropertyMock(return_value=False)
|
||||
):
|
||||
|
@ -63,11 +63,6 @@ from .dbus_service_mocks.network_manager import NetworkManager as NetworkManager
|
||||
# pylint: disable=redefined-outer-name, protected-access
|
||||
|
||||
|
||||
async def mock_async_return_true(*args, **kwargs) -> bool:
|
||||
"""Mock methods to return True."""
|
||||
return True
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def path_extern() -> None:
|
||||
"""Set external path env for tests."""
|
||||
@ -76,7 +71,7 @@ async def path_extern() -> None:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def docker() -> DockerAPI:
|
||||
async def docker() -> DockerAPI:
|
||||
"""Mock DockerAPI."""
|
||||
images = [MagicMock(tags=["ghcr.io/home-assistant/amd64-hassio-supervisor:latest"])]
|
||||
|
||||
@ -96,12 +91,13 @@ def docker() -> DockerAPI:
|
||||
), patch(
|
||||
"supervisor.docker.manager.DockerConfig",
|
||||
return_value=MagicMock(),
|
||||
), patch(
|
||||
"supervisor.docker.manager.DockerAPI.load"
|
||||
), patch(
|
||||
"supervisor.docker.manager.DockerAPI.unload"
|
||||
):
|
||||
docker_obj = DockerAPI(MagicMock())
|
||||
with patch("supervisor.docker.monitor.DockerMonitor.load"):
|
||||
await docker_obj.load()
|
||||
|
||||
docker_obj.info.logging = "journald"
|
||||
docker_obj.info.storage = "overlay2"
|
||||
docker_obj.info.version = "1.0.0"
|
||||
@ -344,7 +340,7 @@ async def coresys(
|
||||
)
|
||||
|
||||
# WebSocket
|
||||
coresys_obj.homeassistant.api.check_api_state = mock_async_return_true
|
||||
coresys_obj.homeassistant.api.check_api_state = AsyncMock(return_value=True)
|
||||
coresys_obj.homeassistant._websocket._client = AsyncMock(
|
||||
ha_version=AwesomeVersion("2021.2.4")
|
||||
)
|
||||
|
@ -144,16 +144,16 @@ async def test_handling_bad_devices(
|
||||
|
||||
with patch.object(DBus, "init_proxy", side_effect=DBusFatalError()):
|
||||
await network_manager.update(
|
||||
{"Devices": ["/org/freedesktop/NetworkManager/Devices/100"]}
|
||||
{"Devices": [device := "/org/freedesktop/NetworkManager/Devices/100"]}
|
||||
)
|
||||
assert not caplog.text
|
||||
assert f"Can't process {device}" not in caplog.text
|
||||
|
||||
await network_manager.update()
|
||||
with patch.object(DBus, "properties", new=PropertyMock(return_value=None)):
|
||||
await network_manager.update(
|
||||
{"Devices": ["/org/freedesktop/NetworkManager/Devices/101"]}
|
||||
{"Devices": [device := "/org/freedesktop/NetworkManager/Devices/101"]}
|
||||
)
|
||||
assert not caplog.text
|
||||
assert f"Can't process {device}" not in caplog.text
|
||||
|
||||
# Unparseable introspections shouldn't happen, this one is logged and captured
|
||||
await network_manager.update()
|
||||
|
@ -1,4 +1,5 @@
|
||||
"""Test Docker interface."""
|
||||
import asyncio
|
||||
from typing import Any
|
||||
from unittest.mock import MagicMock, Mock, PropertyMock, call, patch
|
||||
|
||||
@ -151,6 +152,7 @@ async def test_attach_existing_container(
|
||||
"supervisor.docker.interface.time", return_value=1
|
||||
):
|
||||
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))
|
||||
await asyncio.sleep(0)
|
||||
fire_event.assert_called_once_with(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent("homeassistant", expected, "abc123", 1),
|
||||
@ -160,6 +162,7 @@ async def test_attach_existing_container(
|
||||
await coresys.homeassistant.core.instance.attach(
|
||||
AwesomeVersion("2022.7.3"), skip_state_event_if_down=True
|
||||
)
|
||||
await asyncio.sleep(0)
|
||||
if fired_when_skip_down:
|
||||
fire_event.assert_called_once_with(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
|
@ -1,19 +1,24 @@
|
||||
"""Test Home Assistant core."""
|
||||
from unittest.mock import MagicMock, Mock, PropertyMock, patch
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
from docker.errors import DockerException, ImageNotFound, NotFound
|
||||
import pytest
|
||||
|
||||
from supervisor.const import CpuArch
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.homeassistant import DockerHomeAssistant
|
||||
from supervisor.docker.interface import DockerInterface
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
from supervisor.exceptions import (
|
||||
AudioUpdateError,
|
||||
CodeNotaryError,
|
||||
DockerError,
|
||||
HomeAssistantError,
|
||||
HomeAssistantJobError,
|
||||
)
|
||||
from supervisor.homeassistant.core import HomeAssistantCore
|
||||
from supervisor.homeassistant.module import HomeAssistant
|
||||
from supervisor.updater import Updater
|
||||
|
||||
|
||||
@ -130,3 +135,131 @@ async def test_install_other_error(
|
||||
|
||||
assert "Error on Home Assistant installation. Retry in 30sec" in caplog.text
|
||||
capture_exception.assert_called_once_with(err)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"container_exists,image_exists", [(False, True), (True, False), (True, True)]
|
||||
)
|
||||
async def test_start(
|
||||
coresys: CoreSys, container_exists: bool, image_exists: bool, path_extern
|
||||
):
|
||||
"""Test starting Home Assistant."""
|
||||
if image_exists:
|
||||
coresys.docker.images.get.return_value.id = "123"
|
||||
else:
|
||||
coresys.docker.images.get.side_effect = ImageNotFound("missing")
|
||||
|
||||
if container_exists:
|
||||
coresys.docker.containers.get.return_value.image.id = "123"
|
||||
else:
|
||||
coresys.docker.containers.get.side_effect = NotFound("missing")
|
||||
|
||||
with patch.object(
|
||||
HomeAssistant,
|
||||
"version",
|
||||
new=PropertyMock(return_value=AwesomeVersion("2023.7.0")),
|
||||
), patch.object(DockerAPI, "run") as run, patch.object(
|
||||
HomeAssistantCore, "_block_till_run"
|
||||
) as block_till_run:
|
||||
await coresys.homeassistant.core.start()
|
||||
|
||||
block_till_run.assert_called_once()
|
||||
run.assert_called_once()
|
||||
assert (
|
||||
run.call_args.args[0] == "ghcr.io/home-assistant/qemux86-64-homeassistant"
|
||||
)
|
||||
assert run.call_args.kwargs["tag"] == AwesomeVersion("2023.7.0")
|
||||
assert run.call_args.kwargs["name"] == "homeassistant"
|
||||
assert run.call_args.kwargs["hostname"] == "homeassistant"
|
||||
|
||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||
if container_exists:
|
||||
coresys.docker.containers.get.return_value.remove.assert_called_once_with(
|
||||
force=True
|
||||
)
|
||||
else:
|
||||
coresys.docker.containers.get.return_value.remove.assert_not_called()
|
||||
|
||||
|
||||
async def test_start_existing_container(coresys: CoreSys, path_extern):
|
||||
"""Test starting Home Assistant when container exists and is viable."""
|
||||
coresys.docker.images.get.return_value.id = "123"
|
||||
coresys.docker.containers.get.return_value.image.id = "123"
|
||||
coresys.docker.containers.get.return_value.status = "exited"
|
||||
|
||||
with patch.object(
|
||||
HomeAssistant,
|
||||
"version",
|
||||
new=PropertyMock(return_value=AwesomeVersion("2023.7.0")),
|
||||
), patch.object(HomeAssistantCore, "_block_till_run") as block_till_run:
|
||||
await coresys.homeassistant.core.start()
|
||||
block_till_run.assert_called_once()
|
||||
|
||||
coresys.docker.containers.get.return_value.start.assert_called_once()
|
||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||
coresys.docker.containers.get.return_value.remove.assert_not_called()
|
||||
coresys.docker.containers.get.return_value.run.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("exists", [True, False])
|
||||
async def test_stop(coresys: CoreSys, exists: bool):
|
||||
"""Test stoppping Home Assistant."""
|
||||
if exists:
|
||||
coresys.docker.containers.get.return_value.status = "running"
|
||||
else:
|
||||
coresys.docker.containers.get.side_effect = NotFound("missing")
|
||||
|
||||
await coresys.homeassistant.core.stop()
|
||||
|
||||
coresys.docker.containers.get.return_value.remove.assert_not_called()
|
||||
if exists:
|
||||
coresys.docker.containers.get.return_value.stop.assert_called_once_with(
|
||||
timeout=240
|
||||
)
|
||||
else:
|
||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||
|
||||
|
||||
async def test_restart(coresys: CoreSys):
|
||||
"""Test restarting Home Assistant."""
|
||||
with patch.object(HomeAssistantCore, "_block_till_run") as block_till_run:
|
||||
await coresys.homeassistant.core.restart()
|
||||
block_till_run.assert_called_once()
|
||||
|
||||
coresys.docker.containers.get.return_value.restart.assert_called_once_with(
|
||||
timeout=240
|
||||
)
|
||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("get_error", [NotFound("missing"), DockerException(), None])
|
||||
async def test_restart_failures(coresys: CoreSys, get_error: DockerException | None):
|
||||
"""Test restart fails when container missing or can't be restarted."""
|
||||
coresys.docker.containers.get.return_value.restart.side_effect = DockerException()
|
||||
if get_error:
|
||||
coresys.docker.containers.get.side_effect = get_error
|
||||
|
||||
with pytest.raises(HomeAssistantError):
|
||||
await coresys.homeassistant.core.restart()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"get_error,status",
|
||||
[
|
||||
(NotFound("missing"), ""),
|
||||
(DockerException(), ""),
|
||||
(None, "stopped"),
|
||||
(None, "running"),
|
||||
],
|
||||
)
|
||||
async def test_stats_failures(
|
||||
coresys: CoreSys, get_error: DockerException | None, status: str
|
||||
):
|
||||
"""Test errors when getting stats."""
|
||||
coresys.docker.containers.get.return_value.status = status
|
||||
coresys.docker.containers.get.return_value.stats.side_effect = DockerException()
|
||||
if get_error:
|
||||
coresys.docker.containers.get.side_effect = get_error
|
||||
|
||||
with pytest.raises(HomeAssistantError):
|
||||
await coresys.homeassistant.core.stats()
|
||||
|
@ -11,11 +11,6 @@ from supervisor.docker.monitor import DockerContainerStateEvent
|
||||
from supervisor.exceptions import HomeAssistantError
|
||||
|
||||
|
||||
async def mock_current_state(state: ContainerState) -> ContainerState:
|
||||
"""Mock for current state method."""
|
||||
return state
|
||||
|
||||
|
||||
async def test_home_assistant_watchdog(coresys: CoreSys) -> None:
|
||||
"""Test homeassistant watchdog works correctly."""
|
||||
coresys.homeassistant.version = AwesomeVersion("2022.7.3")
|
||||
@ -34,7 +29,7 @@ async def test_home_assistant_watchdog(coresys: CoreSys) -> None:
|
||||
) as start, patch.object(
|
||||
type(coresys.homeassistant.core.instance), "current_state"
|
||||
) as current_state:
|
||||
current_state.return_value = mock_current_state(ContainerState.UNHEALTHY)
|
||||
current_state.return_value = ContainerState.UNHEALTHY
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
@ -49,7 +44,7 @@ async def test_home_assistant_watchdog(coresys: CoreSys) -> None:
|
||||
start.assert_not_called()
|
||||
|
||||
restart.reset_mock()
|
||||
current_state.return_value = mock_current_state(ContainerState.FAILED)
|
||||
current_state.return_value = ContainerState.FAILED
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
@ -65,7 +60,7 @@ async def test_home_assistant_watchdog(coresys: CoreSys) -> None:
|
||||
|
||||
start.reset_mock()
|
||||
# Do not process event if container state has changed since fired
|
||||
current_state.return_value = mock_current_state(ContainerState.HEALTHY)
|
||||
current_state.return_value = ContainerState.HEALTHY
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
@ -126,7 +121,7 @@ async def test_home_assistant_watchdog_rebuild_on_failure(coresys: CoreSys) -> N
|
||||
) as rebuild, patch.object(
|
||||
type(coresys.homeassistant.core.instance),
|
||||
"current_state",
|
||||
return_value=mock_current_state(ContainerState.FAILED),
|
||||
return_value=ContainerState.FAILED,
|
||||
):
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
|
@ -15,8 +15,6 @@ from supervisor.plugins.dns import HostEntry
|
||||
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
||||
from supervisor.resolution.data import Issue, Suggestion
|
||||
|
||||
from tests.plugins.test_plugin_base import mock_current_state, mock_is_running
|
||||
|
||||
|
||||
@pytest.fixture(name="docker_interface")
|
||||
async def fixture_docker_interface() -> tuple[AsyncMock, AsyncMock]:
|
||||
@ -131,11 +129,6 @@ async def test_reset(coresys: CoreSys):
|
||||
]
|
||||
|
||||
|
||||
async def mock_logs(logs: bytes) -> bytes:
|
||||
"""Mock for logs method."""
|
||||
return logs
|
||||
|
||||
|
||||
async def test_loop_detection_on_failure(coresys: CoreSys):
|
||||
"""Test loop detection when coredns fails."""
|
||||
assert len(coresys.resolution.issues) == 0
|
||||
@ -144,7 +137,7 @@ async def test_loop_detection_on_failure(coresys: CoreSys):
|
||||
with patch.object(type(coresys.plugins.dns.instance), "attach"), patch.object(
|
||||
type(coresys.plugins.dns.instance),
|
||||
"is_running",
|
||||
return_value=mock_is_running(True),
|
||||
return_value=True,
|
||||
):
|
||||
await coresys.plugins.dns.load()
|
||||
|
||||
@ -152,11 +145,11 @@ async def test_loop_detection_on_failure(coresys: CoreSys):
|
||||
type(coresys.plugins.dns.instance),
|
||||
"current_state",
|
||||
side_effect=[
|
||||
mock_current_state(ContainerState.FAILED),
|
||||
mock_current_state(ContainerState.FAILED),
|
||||
ContainerState.FAILED,
|
||||
ContainerState.FAILED,
|
||||
],
|
||||
), patch.object(type(coresys.plugins.dns.instance), "logs") as logs:
|
||||
logs.return_value = mock_logs(b"")
|
||||
logs.return_value = b""
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
@ -172,7 +165,7 @@ async def test_loop_detection_on_failure(coresys: CoreSys):
|
||||
rebuild.assert_called_once()
|
||||
|
||||
rebuild.reset_mock()
|
||||
logs.return_value = mock_logs(b"plugin/loop: Loop")
|
||||
logs.return_value = b"plugin/loop: Loop"
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
|
@ -53,21 +53,6 @@ async def fixture_plugin(
|
||||
yield coresys.plugins.observer
|
||||
|
||||
|
||||
async def mock_current_state(state: ContainerState) -> ContainerState:
|
||||
"""Mock for current state method."""
|
||||
return state
|
||||
|
||||
|
||||
async def mock_is_running(running: bool) -> bool:
|
||||
"""Mock for is running method."""
|
||||
return running
|
||||
|
||||
|
||||
async def mock_get_latest_version(version: AwesomeVersion) -> AwesomeVersion:
|
||||
"""Mock for get latest version method."""
|
||||
return version
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"plugin",
|
||||
[PluginAudio, PluginCli, PluginDns, PluginMulticast, PluginObserver],
|
||||
@ -76,14 +61,14 @@ async def mock_get_latest_version(version: AwesomeVersion) -> AwesomeVersion:
|
||||
async def test_plugin_watchdog(coresys: CoreSys, plugin: PluginBase) -> None:
|
||||
"""Test plugin watchdog works correctly."""
|
||||
with patch.object(type(plugin.instance), "attach"), patch.object(
|
||||
type(plugin.instance), "is_running", return_value=mock_is_running(True)
|
||||
type(plugin.instance), "is_running", return_value=True
|
||||
):
|
||||
await plugin.load()
|
||||
|
||||
with patch.object(type(plugin), "rebuild") as rebuild, patch.object(
|
||||
type(plugin), "start"
|
||||
) as start, patch.object(type(plugin.instance), "current_state") as current_state:
|
||||
current_state.return_value = mock_current_state(ContainerState.UNHEALTHY)
|
||||
current_state.return_value = ContainerState.UNHEALTHY
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
@ -98,7 +83,7 @@ async def test_plugin_watchdog(coresys: CoreSys, plugin: PluginBase) -> None:
|
||||
start.assert_not_called()
|
||||
|
||||
rebuild.reset_mock()
|
||||
current_state.return_value = mock_current_state(ContainerState.FAILED)
|
||||
current_state.return_value = ContainerState.FAILED
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
@ -114,7 +99,7 @@ async def test_plugin_watchdog(coresys: CoreSys, plugin: PluginBase) -> None:
|
||||
|
||||
rebuild.reset_mock()
|
||||
# Plugins are restarted anytime they stop, not just on failure
|
||||
current_state.return_value = mock_current_state(ContainerState.STOPPED)
|
||||
current_state.return_value = ContainerState.STOPPED
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
@ -130,7 +115,7 @@ async def test_plugin_watchdog(coresys: CoreSys, plugin: PluginBase) -> None:
|
||||
|
||||
start.reset_mock()
|
||||
# Do not process event if container state has changed since fired
|
||||
current_state.return_value = mock_current_state(ContainerState.HEALTHY)
|
||||
current_state.return_value = ContainerState.HEALTHY
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
@ -175,7 +160,7 @@ async def test_plugin_watchdog_rebuild_on_failure(
|
||||
) -> None:
|
||||
"""Test plugin watchdog rebuilds if start fails."""
|
||||
with patch.object(type(plugin.instance), "attach"), patch.object(
|
||||
type(plugin.instance), "is_running", return_value=mock_is_running(True)
|
||||
type(plugin.instance), "is_running", return_value=True
|
||||
):
|
||||
await plugin.load()
|
||||
|
||||
@ -187,8 +172,8 @@ async def test_plugin_watchdog_rebuild_on_failure(
|
||||
type(plugin.instance),
|
||||
"current_state",
|
||||
side_effect=[
|
||||
mock_current_state(ContainerState.STOPPED),
|
||||
mock_current_state(ContainerState.STOPPED),
|
||||
ContainerState.STOPPED,
|
||||
ContainerState.STOPPED,
|
||||
],
|
||||
):
|
||||
coresys.bus.fire_event(
|
||||
@ -228,9 +213,9 @@ async def test_plugin_load_running_container(
|
||||
) as start, patch.object(
|
||||
type(plugin.instance),
|
||||
"get_latest_version",
|
||||
return_value=mock_get_latest_version(test_version),
|
||||
return_value=test_version,
|
||||
), patch.object(
|
||||
type(plugin.instance), "is_running", return_value=mock_is_running(True)
|
||||
type(plugin.instance), "is_running", return_value=True
|
||||
):
|
||||
await plugin.load()
|
||||
register_event.assert_any_call(
|
||||
@ -264,9 +249,9 @@ async def test_plugin_load_stopped_container(
|
||||
) as start, patch.object(
|
||||
type(plugin.instance),
|
||||
"get_latest_version",
|
||||
return_value=mock_get_latest_version(test_version),
|
||||
return_value=test_version,
|
||||
), patch.object(
|
||||
type(plugin.instance), "is_running", return_value=mock_is_running(False)
|
||||
type(plugin.instance), "is_running", return_value=False
|
||||
):
|
||||
await plugin.load()
|
||||
register_event.assert_any_call(
|
||||
@ -300,9 +285,9 @@ async def test_plugin_load_missing_container(
|
||||
) as start, patch.object(
|
||||
type(plugin.instance),
|
||||
"get_latest_version",
|
||||
return_value=mock_get_latest_version(test_version),
|
||||
return_value=test_version,
|
||||
), patch.object(
|
||||
type(plugin.instance), "is_running", return_value=mock_is_running(False)
|
||||
type(plugin.instance), "is_running", return_value=False
|
||||
):
|
||||
await plugin.load()
|
||||
register_event.assert_any_call(
|
||||
@ -347,9 +332,7 @@ async def test_repair_failed(
|
||||
coresys: CoreSys, capture_exception: Mock, plugin: PluginBase
|
||||
):
|
||||
"""Test repair failed."""
|
||||
with patch.object(
|
||||
DockerInterface, "exists", return_value=mock_is_running(False)
|
||||
), patch.object(
|
||||
with patch.object(DockerInterface, "exists", return_value=False), patch.object(
|
||||
DockerInterface, "arch", new=PropertyMock(return_value=CpuArch.AMD64)
|
||||
), patch(
|
||||
"supervisor.security.module.cas_validate", side_effect=CodeNotaryUntrusted
|
||||
|
@ -13,8 +13,6 @@ from supervisor.resolution.checks.docker_config import CheckDockerConfig
|
||||
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
||||
from supervisor.resolution.data import Issue, Suggestion
|
||||
|
||||
from tests.conftest import mock_async_return_true
|
||||
|
||||
|
||||
def _make_mock_container_get(bad_config_names: list[str], folder: str = "media"):
|
||||
"""Make mock of container get."""
|
||||
@ -54,7 +52,7 @@ async def test_check(
|
||||
docker.containers.get = _make_mock_container_get(
|
||||
["homeassistant", "hassio_audio", "addon_local_ssh"], folder
|
||||
)
|
||||
with patch.object(DockerInterface, "is_running", new=mock_async_return_true):
|
||||
with patch.object(DockerInterface, "is_running", return_value=True):
|
||||
await coresys.plugins.load()
|
||||
await coresys.homeassistant.load()
|
||||
await coresys.addons.load()
|
||||
@ -107,7 +105,7 @@ async def test_check(
|
||||
|
||||
# IF config issue is resolved, all issues are removed except the main one. Which will be removed if check isn't approved
|
||||
docker.containers.get = _make_mock_container_get([])
|
||||
with patch.object(DockerInterface, "is_running", new=mock_async_return_true):
|
||||
with patch.object(DockerInterface, "is_running", return_value=True):
|
||||
await coresys.plugins.load()
|
||||
await coresys.homeassistant.load()
|
||||
await coresys.addons.load()
|
||||
|
@ -6,25 +6,19 @@ from docker.errors import DockerException
|
||||
|
||||
from supervisor.const import CoreState
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.resolution.const import UnhealthyReason
|
||||
from supervisor.resolution.const import ContextType, IssueType, UnhealthyReason
|
||||
from supervisor.resolution.data import Issue
|
||||
from supervisor.resolution.evaluations.container import EvaluateContainer
|
||||
|
||||
|
||||
def test_get_images(coresys: CoreSys):
|
||||
"""Test getting images form docker."""
|
||||
container = EvaluateContainer(coresys)
|
||||
with patch(
|
||||
"supervisor.resolution.evaluations.container.EvaluateContainer._get_images",
|
||||
return_value=[MagicMock(tags=["test"])],
|
||||
):
|
||||
images = container._get_images()
|
||||
assert images[0].tags[0] == "test"
|
||||
|
||||
with patch(
|
||||
"supervisor.docker.manager.DockerAPI.images.list", side_effect=DockerException
|
||||
):
|
||||
images = container._get_images()
|
||||
assert not images
|
||||
def _make_image_attr(image: str) -> MagicMock:
|
||||
out = MagicMock()
|
||||
out.attrs = {
|
||||
"Config": {
|
||||
"Image": image,
|
||||
},
|
||||
}
|
||||
return out
|
||||
|
||||
|
||||
async def test_evaluation(coresys: CoreSys):
|
||||
@ -35,15 +29,12 @@ async def test_evaluation(coresys: CoreSys):
|
||||
assert container.reason not in coresys.resolution.unsupported
|
||||
assert UnhealthyReason.DOCKER not in coresys.resolution.unhealthy
|
||||
|
||||
with patch(
|
||||
"supervisor.resolution.evaluations.container.EvaluateContainer._get_images",
|
||||
return_value=[
|
||||
"armhfbuild/watchtower:latest",
|
||||
"concerco/watchtowerv6:10.0.2",
|
||||
"containrrr/watchtower:1.1",
|
||||
"pyouroboros/ouroboros:1.4.3",
|
||||
],
|
||||
):
|
||||
coresys.docker.containers.list.return_value = [
|
||||
_make_image_attr("armhfbuild/watchtower:latest"),
|
||||
_make_image_attr("concerco/watchtowerv6:10.0.2"),
|
||||
_make_image_attr("containrrr/watchtower:1.1"),
|
||||
_make_image_attr("pyouroboros/ouroboros:1.4.3"),
|
||||
]
|
||||
await container()
|
||||
assert container.reason in coresys.resolution.unsupported
|
||||
assert UnhealthyReason.DOCKER in coresys.resolution.unhealthy
|
||||
@ -55,16 +46,26 @@ async def test_evaluation(coresys: CoreSys):
|
||||
"pyouroboros/ouroboros:1.4.3",
|
||||
}
|
||||
|
||||
with patch(
|
||||
"supervisor.resolution.evaluations.container.EvaluateContainer._get_images",
|
||||
return_value=[],
|
||||
):
|
||||
coresys.docker.containers.list.return_value = []
|
||||
await container()
|
||||
assert container.reason not in coresys.resolution.unsupported
|
||||
|
||||
assert coresys.resolution.evaluate.cached_images == set()
|
||||
|
||||
|
||||
async def test_corrupt_docker(coresys: CoreSys):
|
||||
"""Test corrupt docker issue."""
|
||||
container = EvaluateContainer(coresys)
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
|
||||
corrupt_docker = Issue(IssueType.CORRUPT_DOCKER, ContextType.SYSTEM)
|
||||
assert corrupt_docker not in coresys.resolution.issues
|
||||
|
||||
coresys.docker.containers.list.side_effect = DockerException
|
||||
await container()
|
||||
assert corrupt_docker in coresys.resolution.issues
|
||||
|
||||
|
||||
async def test_did_run(coresys: CoreSys):
|
||||
"""Test that the evaluation ran as expected."""
|
||||
container = EvaluateContainer(coresys)
|
||||
|
Loading…
x
Reference in New Issue
Block a user