Backup and restore track progress in job (#4503)

* Backup and restore track progress in job

* Change to stage only updates and fix tests

* Leave HA alone if it wasn't restored

* skip check HA stage message when we don't check

* Change to helper to get current job

* Fix tests

* Mark jobs as internal to skip notifying HA
This commit is contained in:
Mike Degatano 2023-08-30 16:01:03 -04:00 committed by GitHub
parent de06361cb0
commit f93b753c03
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 888 additions and 211 deletions

View File

@ -158,8 +158,7 @@ class AddonManager(CoreSysAttributes):
)
async def install(self, slug: str) -> None:
"""Install an add-on."""
if job := self.sys_jobs.get_job():
job.reference = slug
self.sys_jobs.current.reference = slug
if slug in self.local:
raise AddonsError(f"Add-on {slug} is already installed", _LOGGER.warning)
@ -263,8 +262,7 @@ class AddonManager(CoreSysAttributes):
Returns a coroutine that completes when addon has state 'started' (see addon.start)
if addon is started after update. Else nothing is returned.
"""
if job := self.sys_jobs.get_job():
job.reference = slug
self.sys_jobs.current.reference = slug
if slug not in self.local:
raise AddonsError(f"Add-on {slug} is not installed", _LOGGER.error)
@ -329,8 +327,7 @@ class AddonManager(CoreSysAttributes):
Returns a coroutine that completes when addon has state 'started' (see addon.start)
if addon is started after rebuild. Else nothing is returned.
"""
if job := self.sys_jobs.get_job():
job.reference = slug
self.sys_jobs.current.reference = slug
if slug not in self.local:
raise AddonsError(f"Add-on {slug} is not installed", _LOGGER.error)
@ -387,8 +384,7 @@ class AddonManager(CoreSysAttributes):
Returns a coroutine that completes when addon has state 'started' (see addon.start)
if addon is started after restore. Else nothing is returned.
"""
if job := self.sys_jobs.get_job():
job.reference = slug
self.sys_jobs.current.reference = slug
if slug not in self.local:
_LOGGER.debug("Add-on %s is not local available for restore", slug)

View File

@ -1,11 +1,37 @@
"""Backup consts."""
from enum import Enum
from enum import StrEnum
BUF_SIZE = 2**20 * 4 # 4MB
class BackupType(str, Enum):
class BackupType(StrEnum):
"""Backup type enum."""
FULL = "full"
PARTIAL = "partial"
class BackupJobStage(StrEnum):
"""Backup job stage enum."""
ADDON_REPOSITORIES = "addon_repositories"
ADDONS = "addons"
DOCKER_CONFIG = "docker_config"
FINISHING_FILE = "finishing_file"
FOLDERS = "folders"
HOME_ASSISTANT = "home_assistant"
AWAIT_ADDON_RESTARTS = "await_addon_restarts"
class RestoreJobStage(StrEnum):
"""Restore job stage enum."""
ADDON_REPOSITORIES = "addon_repositories"
ADDONS = "addons"
AWAIT_ADDON_RESTARTS = "await_addon_restarts"
AWAIT_HOME_ASSISTANT_RESTART = "await_home_assistant_restart"
CHECK_HOME_ASSISTANT = "check_home_assistant"
DOCKER_CONFIG = "docker_config"
FOLDERS = "folders"
HOME_ASSISTANT = "home_assistant"
REMOVE_DELTA_ADDONS = "remove_delta_addons"

View File

@ -23,7 +23,7 @@ from ..utils.dt import utcnow
from ..utils.sentinel import DEFAULT
from ..utils.sentry import capture_exception
from .backup import Backup
from .const import BackupType
from .const import BackupJobStage, BackupType, RestoreJobStage
from .utils import create_slug
from .validate import ALL_FOLDERS, SCHEMA_BACKUPS_CONFIG
@ -49,7 +49,7 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
"""Initialize a backup manager."""
super().__init__(FILE_HASSIO_BACKUPS, SCHEMA_BACKUPS_CONFIG)
self.coresys = coresys
self._backups = {}
self._backups: dict[str, Backup] = {}
self.lock = asyncio.Lock()
@property
@ -76,7 +76,7 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
if mount.state == UnitActiveState.ACTIVE
]
def get(self, slug):
def get(self, slug: str) -> Backup:
"""Return backup object."""
return self._backups.get(slug)
@ -90,6 +90,24 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
return self.sys_config.path_backup
def _change_stage(
self,
backup: Backup,
stage: BackupJobStage | RestoreJobStage,
restore: bool = False,
):
"""Change the stage of the current job during backup/restore.
Must be called from an existing backup/restore job.
"""
_LOGGER.info(
"%s %s starting stage %s",
"Restore" if restore else "Backup",
backup.slug,
stage,
)
self.sys_jobs.current.stage = stage
def _create_backup(
self,
name: str,
@ -98,7 +116,10 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
compressed: bool = True,
location: Mount | type[DEFAULT] | None = DEFAULT,
) -> Backup:
"""Initialize a new backup object from name."""
"""Initialize a new backup object from name.
Must be called from an existing backup job.
"""
date_str = utcnow().isoformat()
slug = create_slug(name, date_str)
tar_file = Path(self._get_base_path(location), f"{slug}.tar")
@ -107,23 +128,24 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
backup = Backup(self.coresys, tar_file)
backup.new(slug, name, date_str, sys_type, password, compressed)
backup.store_repositories()
backup.store_dockerconfig()
# Add backup ID to job
if job := self.sys_jobs.get_job():
job.reference = backup.slug
self.sys_jobs.current.reference = backup.slug
self._change_stage(backup, BackupJobStage.ADDON_REPOSITORIES)
backup.store_repositories()
self._change_stage(backup, BackupJobStage.DOCKER_CONFIG)
backup.store_dockerconfig()
return backup
def load(self):
def load(self) -> Awaitable[None]:
"""Load exists backups data.
Return a coroutine.
"""
return self.reload()
async def reload(self):
async def reload(self) -> None:
"""Load exists backups."""
self._backups = {}
@ -143,7 +165,7 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
if tasks:
await asyncio.wait(tasks)
def remove(self, backup):
def remove(self, backup: Backup) -> bool:
"""Remove a backup."""
try:
backup.tarfile.unlink()
@ -156,7 +178,7 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
return True
async def import_backup(self, tar_file):
async def import_backup(self, tar_file: Path) -> Backup | None:
"""Check backup tarfile and import it."""
backup = Backup(self.coresys, tar_file)
@ -193,26 +215,34 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
addon_list: list[Addon],
folder_list: list[str],
homeassistant: bool,
):
) -> Backup | None:
"""Create a backup.
Must be called from an existing backup job.
"""
addon_start_tasks: list[Awaitable[None]] | None = None
try:
self.sys_core.state = CoreState.FREEZE
async with backup:
# Backup add-ons
if addon_list:
_LOGGER.info("Backing up %s store Add-ons", backup.slug)
self._change_stage(backup, BackupJobStage.ADDONS)
addon_start_tasks = await backup.store_addons(addon_list)
# HomeAssistant Folder is for v1
if homeassistant:
self._change_stage(backup, BackupJobStage.HOME_ASSISTANT)
await backup.store_homeassistant()
# Backup folders
if folder_list:
_LOGGER.info("Backing up %s store folders", backup.slug)
self._change_stage(backup, BackupJobStage.FOLDERS)
await backup.store_folders(folder_list)
self._change_stage(backup, BackupJobStage.FINISHING_FILE)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("Backup %s error", backup.slug)
capture_exception(err)
@ -221,6 +251,7 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
self._backups[backup.slug] = backup
if addon_start_tasks:
self._change_stage(backup, BackupJobStage.AWAIT_ADDON_RESTARTS)
# Ignore exceptions from waiting for addon startup, addon errors handled elsewhere
await asyncio.gather(*addon_start_tasks, return_exceptions=True)
@ -234,11 +265,11 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
)
async def do_backup_full(
self,
name="",
password=None,
compressed=True,
name: str = "",
password: str | None = None,
compressed: bool = True,
location: Mount | type[DEFAULT] | None = DEFAULT,
):
) -> Backup | None:
"""Create a full backup."""
if self.lock.locked():
_LOGGER.error("A backup/restore process is already running")
@ -270,7 +301,7 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
homeassistant: bool = False,
compressed: bool = True,
location: Mount | type[DEFAULT] | None = DEFAULT,
):
) -> Backup | None:
"""Create a partial backup."""
if self.lock.locked():
_LOGGER.error("A backup/restore process is already running")
@ -315,28 +346,37 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
folder_list: list[str],
homeassistant: bool,
replace: bool,
):
) -> bool:
"""Restore from a backup.
Must be called from an existing restore job.
"""
addon_start_tasks: list[Awaitable[None]] | None = None
try:
task_hass: asyncio.Task | None = None
async with backup:
# Restore docker config
_LOGGER.info("Restoring %s Docker config", backup.slug)
self._change_stage(backup, RestoreJobStage.DOCKER_CONFIG, restore=True)
backup.restore_dockerconfig(replace)
# Process folders
if folder_list:
_LOGGER.info("Restoring %s folders", backup.slug)
self._change_stage(backup, RestoreJobStage.FOLDERS, restore=True)
await backup.restore_folders(folder_list)
# Process Home-Assistant
if homeassistant:
_LOGGER.info("Restoring %s Home Assistant Core", backup.slug)
self._change_stage(
backup, RestoreJobStage.HOME_ASSISTANT, restore=True
)
task_hass = await backup.restore_homeassistant()
# Delete delta add-ons
if replace:
_LOGGER.info("Removing Add-ons not in the backup %s", backup.slug)
self._change_stage(
backup, RestoreJobStage.REMOVE_DELTA_ADDONS, restore=True
)
for addon in self.sys_addons.installed:
if addon.slug in backup.addon_list:
continue
@ -349,15 +389,21 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
_LOGGER.warning("Can't uninstall Add-on %s", addon.slug)
if addon_list:
_LOGGER.info("Restoring %s Repositories", backup.slug)
self._change_stage(
backup, RestoreJobStage.ADDON_REPOSITORIES, restore=True
)
await backup.restore_repositories(replace)
_LOGGER.info("Restoring %s Add-ons", backup.slug)
self._change_stage(backup, RestoreJobStage.ADDONS, restore=True)
addon_start_tasks = await backup.restore_addons(addon_list)
# Wait for Home Assistant Core update/downgrade
if task_hass:
_LOGGER.info("Restore %s wait for Home-Assistant", backup.slug)
self._change_stage(
backup,
RestoreJobStage.AWAIT_HOME_ASSISTANT_RESTART,
restore=True,
)
await task_hass
except Exception as err: # pylint: disable=broad-except
@ -366,19 +412,28 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
return False
else:
if addon_start_tasks:
self._change_stage(
backup, RestoreJobStage.AWAIT_ADDON_RESTARTS, restore=True
)
# Ignore exceptions from waiting for addon startup, addon errors handled elsewhere
await asyncio.gather(*addon_start_tasks, return_exceptions=True)
return True
finally:
# Do we need start Home Assistant Core?
if not await self.sys_homeassistant.core.is_running():
await self.sys_homeassistant.core.start()
# Leave Home Assistant alone if it wasn't part of the restore
if homeassistant:
self._change_stage(
backup, RestoreJobStage.CHECK_HOME_ASSISTANT, restore=True
)
# Check If we can access to API / otherwise restart
if not await self.sys_homeassistant.api.check_api_state():
_LOGGER.warning("Need restart HomeAssistant for API")
await self.sys_homeassistant.core.restart()
# Do we need start Home Assistant Core?
if not await self.sys_homeassistant.core.is_running():
await self.sys_homeassistant.core.start()
# Check If we can access to API / otherwise restart
if not await self.sys_homeassistant.api.check_api_state():
_LOGGER.warning("Need restart HomeAssistant for API")
await self.sys_homeassistant.core.restart()
@Job(
name="backup_manager_full_restore",
@ -390,11 +445,12 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
JobCondition.RUNNING,
],
)
async def do_restore_full(self, backup: Backup, password=None):
async def do_restore_full(
self, backup: Backup, password: str | None = None
) -> bool:
"""Restore a backup."""
# Add backup ID to job
if job := self.sys_jobs.get_job():
job.reference = backup.slug
self.sys_jobs.current.reference = backup.slug
if self.lock.locked():
_LOGGER.error("A backup/restore process is already running")
@ -431,6 +487,7 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
if success:
_LOGGER.info("Full-Restore %s done", backup.slug)
return success
@Job(
name="backup_manager_partial_restore",
@ -449,11 +506,10 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
addons: list[str] | None = None,
folders: list[Path] | None = None,
password: str | None = None,
):
) -> bool:
"""Restore a backup."""
# Add backup ID to job
if job := self.sys_jobs.get_job():
job.reference = backup.slug
self.sys_jobs.current.reference = backup.slug
if self.lock.locked():
_LOGGER.error("A backup/restore process is already running")
@ -495,3 +551,4 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
if success:
_LOGGER.info("Partial-Restore %s done", backup.slug)
return success

View File

@ -746,6 +746,7 @@ class DockerAddon(DockerInterface):
name="docker_addon_hardware_events",
conditions=[JobCondition.OS_AGENT],
limit=JobExecutionLimit.SINGLE_WAIT,
internal=True,
)
async def _hardware_events(self, device: Device) -> None:
"""Process Hardware events for adjust device access."""

View File

@ -44,6 +44,10 @@ class JobNotFound(JobException):
"""Exception for job not found."""
class JobInvalidUpdate(JobException):
"""Exception for invalid update to a job."""
class JobGroupExecutionLimitExceeded(JobException):
"""Exception when job group execution limit exceeded."""

View File

@ -35,6 +35,7 @@ class HomeAssistantAPI(CoreSysAttributes):
@Job(
name="home_assistant_api_ensure_access_token",
limit=JobExecutionLimit.SINGLE_WAIT,
internal=True,
)
async def ensure_access_token(self) -> None:
"""Ensure there is an access token."""

View File

@ -35,5 +35,6 @@ class WSEvent(str, Enum):
HEALTH_CHANGED = "health_changed"
ISSUE_CHANGED = "issue_changed"
ISSUE_REMOVED = "issue_removed"
JOB = "job"
SUPERVISOR_UPDATE = "supervisor_update"
SUPPORTED_CHANGED = "supported_changed"

View File

@ -439,6 +439,7 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
name="home_assistant_get_users",
limit=JobExecutionLimit.THROTTLE_WAIT,
throttle_period=timedelta(minutes=5),
internal=True,
)
async def get_users(self) -> list[IngressSessionDataUser]:
"""Get list of all configured users."""

View File

@ -44,6 +44,7 @@ class HomeAssistantSecrets(CoreSysAttributes):
name="home_assistant_secrets_read",
limit=JobExecutionLimit.THROTTLE_WAIT,
throttle_period=timedelta(seconds=60),
internal=True,
)
async def _read_secrets(self):
"""Read secrets.yaml into memory."""

View File

@ -107,7 +107,11 @@ class NetworkManager(CoreSysAttributes):
return Interface.from_dbus_interface(self.sys_dbus.network.get(inet_name))
@Job(name="network_manager_load", conditions=[JobCondition.HOST_NETWORK])
@Job(
name="network_manager_load",
conditions=[JobCondition.HOST_NETWORK],
internal=True,
)
async def load(self):
"""Load network information and reapply defaults over dbus."""
# Apply current settings on each interface so OS can update any out of date defaults

View File

@ -3,15 +3,18 @@ from collections.abc import Callable
from contextlib import contextmanager
from contextvars import ContextVar, Token
import logging
from typing import Any
from uuid import UUID, uuid4
from attrs import define, field
from attrs.setters import frozen
from attrs import Attribute, define, field
from attrs.setters import convert as attr_convert, frozen, validate as attr_validate
from attrs.validators import ge, le
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import JobNotFound, JobStartException
from ..homeassistant.const import WSEvent
from ..utils.common import FileConfiguration
from ..utils.sentry import capture_exception
from .const import ATTR_IGNORE_CONDITIONS, FILE_CONFIG_JOBS, JobCondition
from .validate import SCHEMA_JOBS_CONFIG
@ -24,19 +27,57 @@ _CURRENT_JOB: ContextVar[UUID] = ContextVar("current_job")
_LOGGER: logging.Logger = logging.getLogger(__name__)
def _invalid_if_done(instance: "SupervisorJob", *_) -> None:
"""Validate that job is not done."""
if instance.done:
raise ValueError("Cannot update a job that is done")
def _on_change(instance: "SupervisorJob", attribute: Attribute, value: Any) -> Any:
"""Forward a change to a field on to the listener if defined."""
value = attr_convert(instance, attribute, value)
value = attr_validate(instance, attribute, value)
if instance.on_change:
instance.on_change(instance, attribute, value)
return value
@define
class SupervisorJob:
"""Representation of a job running in supervisor."""
name: str = field(on_setattr=frozen)
reference: str | None = None
progress: int = field(default=0, validator=[ge(0), le(100)])
stage: str | None = None
reference: str | None = field(default=None, on_setattr=_on_change)
progress: float = field(
default=0,
validator=[ge(0), le(100), _invalid_if_done],
on_setattr=_on_change,
converter=lambda val: round(val, 1),
)
stage: str | None = field(
default=None, validator=[_invalid_if_done], on_setattr=_on_change
)
uuid: UUID = field(init=False, factory=lambda: uuid4().hex, on_setattr=frozen)
parent_id: UUID = field(
parent_id: UUID | None = field(
init=False, factory=lambda: _CURRENT_JOB.get(None), on_setattr=frozen
)
done: bool = field(init=False, default=False)
done: bool | None = field(init=False, default=None, on_setattr=_on_change)
on_change: Callable[["SupervisorJob", Attribute, Any], None] | None = field(
default=None, on_setattr=frozen
)
internal: bool = field(default=False, on_setattr=frozen)
def as_dict(self) -> dict[str, Any]:
"""Return dictionary representation."""
return {
"name": self.name,
"reference": self.reference,
"uuid": self.uuid,
"progress": self.progress,
"stage": self.stage,
"done": self.done,
"parent_id": self.parent_id,
}
@contextmanager
def start(self, *, on_done: Callable[["SupervisorJob"], None] | None = None):
@ -46,11 +87,12 @@ class SupervisorJob:
This is to ensure that each asyncio task can only be doing one job at a time as that
determines what resources it can and cannot access.
"""
if self.done:
raise JobStartException("Job is already complete")
if self.done is not None:
raise JobStartException("Job has already been started")
if _CURRENT_JOB.get(None) != self.parent_id:
raise JobStartException("Job has a different parent from current job")
self.done = False
token: Token[UUID] | None = None
try:
token = _CURRENT_JOB.set(self.uuid)
@ -87,23 +129,54 @@ class JobManager(FileConfiguration, CoreSysAttributes):
"""Set a list of ignored condition."""
self._data[ATTR_IGNORE_CONDITIONS] = value
@property
def current(self) -> SupervisorJob:
"""Return current job of the asyncio task.
Must be called from within a job. Raises RuntimeError if there is no current job.
"""
try:
return self.get_job(_CURRENT_JOB.get())
except (LookupError, JobNotFound) as err:
capture_exception(err)
raise RuntimeError("No job for the current asyncio task!") from None
@property
def is_job(self) -> bool:
"""Return true if there is an active job for the current asyncio task."""
return bool(_CURRENT_JOB.get(None))
def _notify_on_job_change(
self, job: SupervisorJob, attribute: Attribute, value: Any
) -> None:
"""Notify Home Assistant of a change to a job."""
self.sys_homeassistant.websocket.supervisor_event(
WSEvent.JOB, job.as_dict() | {attribute.alias: value}
)
def new_job(
self, name: str, reference: str | None = None, initial_stage: str | None = None
self,
name: str,
reference: str | None = None,
initial_stage: str | None = None,
internal: bool = False,
) -> SupervisorJob:
"""Create a new job."""
job = SupervisorJob(name, reference=reference, stage=initial_stage)
job = SupervisorJob(
name,
reference=reference,
stage=initial_stage,
on_change=None if internal else self._notify_on_job_change,
internal=internal,
)
self._jobs[job.uuid] = job
return job
def get_job(self, uuid: UUID | None = None) -> SupervisorJob | None:
"""Return a job by uuid if it exists. Returns the current job of the asyncio task if uuid omitted."""
if uuid:
return self._jobs.get(uuid)
if uuid := _CURRENT_JOB.get(None):
return self._jobs.get(uuid)
return None
def get_job(self, uuid: UUID) -> SupervisorJob:
"""Return a job by uuid. Raises if it does not exist."""
if uuid not in self._jobs:
raise JobNotFound(f"No job found with id {uuid}")
return self._jobs[uuid]
def remove_job(self, job: SupervisorJob) -> None:
"""Remove a job by UUID."""

View File

@ -38,6 +38,7 @@ class Job(CoreSysAttributes):
| Callable[[CoreSys, datetime, list[datetime] | None], timedelta]
| None = None,
throttle_max_calls: int | None = None,
internal: bool = False,
):
"""Initialize the Job class."""
if name in _JOB_NAMES:
@ -55,6 +56,7 @@ class Job(CoreSysAttributes):
self._method = None
self._last_call: dict[str | None, datetime] = {}
self._rate_limited_calls: dict[str, list[datetime]] | None = None
self._internal = internal
# Validate Options
if (
@ -186,7 +188,9 @@ class Job(CoreSysAttributes):
job_group = self._post_init(obj)
group_name: str | None = job_group.group_name if job_group else None
job = self.sys_jobs.new_job(
self.name, job_group.job_reference if job_group else None
self.name,
job_group.job_reference if job_group else None,
internal=self._internal,
)
# Handle condition

View File

@ -42,8 +42,8 @@ class JobGroup(CoreSysAttributes):
"""Return true if current task has the lock on this job group."""
return (
self.active_job
and (task_job := self.sys_jobs.get_job())
and self.active_job == task_job
and self.sys_jobs.is_job
and self.active_job == self.sys_jobs.current
)
@property

View File

@ -188,8 +188,7 @@ class MountManager(FileConfiguration, CoreSysAttributes):
async def create_mount(self, mount: Mount) -> None:
"""Add/update a mount."""
# Add mount name to job
if job := self.sys_jobs.get_job():
job.reference = mount.name
self.sys_jobs.current.reference = mount.name
if mount.name in self._mounts:
_LOGGER.debug("Mount '%s' exists, unmounting then mounting from new config")
@ -216,8 +215,7 @@ class MountManager(FileConfiguration, CoreSysAttributes):
async def remove_mount(self, name: str, *, retain_entry: bool = False) -> None:
"""Remove a mount."""
# Add mount name to job
if job := self.sys_jobs.get_job():
job.reference = name
self.sys_jobs.current.reference = name
if name not in self._mounts:
raise MountNotFound(
@ -247,8 +245,7 @@ class MountManager(FileConfiguration, CoreSysAttributes):
async def reload_mount(self, name: str) -> None:
"""Reload a mount to retry mounting with same config."""
# Add mount name to job
if job := self.sys_jobs.get_job():
job.reference = name
self.sys_jobs.current.reference = name
if name not in self._mounts:
raise MountNotFound(

View File

@ -165,7 +165,7 @@ class DataDisk(CoreSysAttributes):
if block.drive == drive.object_path
]
@Job(name="data_disk_load", conditions=[JobCondition.OS_AGENT])
@Job(name="data_disk_load", conditions=[JobCondition.OS_AGENT], internal=True)
async def load(self) -> None:
"""Load DataDisk feature."""
# Update datadisk details on OS-Agent

View File

@ -227,7 +227,7 @@ class OSManager(CoreSysAttributes):
)
raise HassOSUpdateError()
@Job(name="os_manager_mark_healthy", conditions=[JobCondition.HAOS])
@Job(name="os_manager_mark_healthy", conditions=[JobCondition.HAOS], internal=True)
async def mark_healthy(self) -> None:
"""Set booted partition as good for rauc."""
try:

View File

@ -202,6 +202,7 @@ async def test_watchdog_on_stop(coresys: CoreSys, install_addon_ssh: Addon) -> N
async def test_listener_attached_on_install(coresys: CoreSys, repository):
"""Test events listener attached on addon install."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
container_collection = MagicMock()
container_collection.get.side_effect = DockerException()
with patch(
@ -217,7 +218,7 @@ async def test_listener_attached_on_install(coresys: CoreSys, repository):
"supervisor.addons.model.AddonModel.with_ingress",
new=PropertyMock(return_value=False),
):
await coresys.addons.install.__wrapped__(coresys.addons, TEST_ADDON_SLUG)
await coresys.addons.install(TEST_ADDON_SLUG)
_fire_test_event(coresys, f"addon_{TEST_ADDON_SLUG}", ContainerState.RUNNING)
await asyncio.sleep(0)

View File

@ -1,14 +1,13 @@
"""Test ingress API."""
# pylint: disable=protected-access
from unittest.mock import patch
import pytest
from unittest.mock import AsyncMock, patch
# pylint: disable=redefined-outer-name
from aiohttp.test_utils import TestClient
from supervisor.coresys import CoreSys
@pytest.mark.asyncio
async def test_validate_session(api_client, coresys):
async def test_validate_session(api_client: TestClient, coresys: CoreSys):
"""Test validating ingress session."""
with patch("aiohttp.web_request.BaseRequest.__getitem__", return_value=None):
resp = await api_client.post(
@ -40,8 +39,9 @@ async def test_validate_session(api_client, coresys):
assert coresys.ingress.sessions[session] > valid_time
@pytest.mark.asyncio
async def test_validate_session_with_user_id(api_client, coresys):
async def test_validate_session_with_user_id(
api_client: TestClient, coresys: CoreSys, ha_ws_client: AsyncMock
):
"""Test validating ingress session with user ID passed."""
with patch("aiohttp.web_request.BaseRequest.__getitem__", return_value=None):
resp = await api_client.post(
@ -54,15 +54,16 @@ async def test_validate_session_with_user_id(api_client, coresys):
"aiohttp.web_request.BaseRequest.__getitem__",
return_value=coresys.homeassistant,
):
client = coresys.homeassistant.websocket._client
client.async_send_command.return_value = [
ha_ws_client.async_send_command.return_value = [
{"id": "some-id", "name": "Some Name", "username": "sn"}
]
resp = await api_client.post("/ingress/session", json={"user_id": "some-id"})
result = await resp.json()
client.async_send_command.assert_called_with({"type": "config/auth/list"})
assert {"type": "config/auth/list"} in [
call.args[0] for call in ha_ws_client.async_send_command.call_args_list
]
assert "session" in result["data"]
session = result["data"]["session"]

View File

@ -2,7 +2,7 @@
import asyncio
from shutil import rmtree
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, PropertyMock, patch
from awesomeversion import AwesomeVersion
from dbus_fast import DBusError
@ -21,6 +21,7 @@ from supervisor.docker.const import ContainerState
from supervisor.docker.homeassistant import DockerHomeAssistant
from supervisor.docker.monitor import DockerContainerStateEvent
from supervisor.exceptions import AddonsError, DockerError
from supervisor.homeassistant.core import HomeAssistantCore
from supervisor.homeassistant.module import HomeAssistant
from supervisor.mounts.mount import Mount
@ -827,3 +828,312 @@ async def test_restore_with_healthcheck(
assert state_changes == [AddonState.STOPPED, AddonState.STARTUP]
assert install_addon_ssh.state == AddonState.STARTED
assert coresys.core.state == CoreState.RUNNING
def _make_backup_message_for_assert(
*,
full: bool = True,
restore: bool = False,
reference: str,
stage: str | None,
done: bool = False,
):
"""Make a backup message to use for assert test."""
return {
"type": "supervisor/event",
"data": {
"event": "job",
"data": {
"name": f"backup_manager_{'full' if full else 'partial'}_{'restore' if restore else 'backup'}",
"reference": reference,
"uuid": ANY,
"progress": 0,
"stage": stage,
"done": done,
"parent_id": None,
},
},
}
async def test_backup_progress(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
ha_ws_client: AsyncMock,
tmp_supervisor_data,
path_extern,
):
"""Test progress is tracked during backups."""
container.status = "running"
install_addon_ssh.path_data.mkdir()
coresys.core.state = CoreState.RUNNING
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
with patch.object(
AddonModel, "backup_mode", new=PropertyMock(return_value=AddonBackupMode.COLD)
), patch("supervisor.addons.addon.asyncio.Event.wait"):
full_backup: Backup = await coresys.backups.do_backup_full()
await asyncio.sleep(0)
messages = [
call.args[0]
for call in ha_ws_client.async_send_command.call_args_list
if call.args[0]["data"].get("data", {}).get("name")
== "backup_manager_full_backup"
]
assert messages == [
_make_backup_message_for_assert(reference=None, stage=None),
_make_backup_message_for_assert(reference=full_backup.slug, stage=None),
_make_backup_message_for_assert(
reference=full_backup.slug, stage="addon_repositories"
),
_make_backup_message_for_assert(
reference=full_backup.slug, stage="docker_config"
),
_make_backup_message_for_assert(reference=full_backup.slug, stage="addons"),
_make_backup_message_for_assert(
reference=full_backup.slug, stage="home_assistant"
),
_make_backup_message_for_assert(reference=full_backup.slug, stage="folders"),
_make_backup_message_for_assert(
reference=full_backup.slug, stage="finishing_file"
),
_make_backup_message_for_assert(
reference=full_backup.slug, stage="await_addon_restarts"
),
_make_backup_message_for_assert(
reference=full_backup.slug, stage="await_addon_restarts", done=True
),
]
ha_ws_client.async_send_command.reset_mock()
partial_backup: Backup = await coresys.backups.do_backup_partial(
addons=["local_ssh"], folders=["media", "share", "ssl"]
)
await asyncio.sleep(0)
messages = [
call.args[0]
for call in ha_ws_client.async_send_command.call_args_list
if call.args[0]["data"].get("data", {}).get("name")
== "backup_manager_partial_backup"
]
assert messages == [
_make_backup_message_for_assert(full=False, reference=None, stage=None),
_make_backup_message_for_assert(
full=False, reference=partial_backup.slug, stage=None
),
_make_backup_message_for_assert(
full=False, reference=partial_backup.slug, stage="addon_repositories"
),
_make_backup_message_for_assert(
full=False, reference=partial_backup.slug, stage="docker_config"
),
_make_backup_message_for_assert(
full=False, reference=partial_backup.slug, stage="addons"
),
_make_backup_message_for_assert(
full=False, reference=partial_backup.slug, stage="folders"
),
_make_backup_message_for_assert(
full=False, reference=partial_backup.slug, stage="finishing_file"
),
_make_backup_message_for_assert(
full=False,
reference=partial_backup.slug,
stage="finishing_file",
done=True,
),
]
async def test_restore_progress(
request: pytest.FixtureRequest,
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
ha_ws_client: AsyncMock,
tmp_supervisor_data,
path_extern,
):
"""Test progress is tracked during backups."""
container.status = "running"
install_addon_ssh.path_data.mkdir()
install_addon_ssh.state = AddonState.STARTED
coresys.core.state = CoreState.RUNNING
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
full_backup: Backup = await coresys.backups.do_backup_full()
await asyncio.sleep(0)
ha_ws_client.async_send_command.reset_mock()
# Install another addon to be uninstalled
request.getfixturevalue("install_addon_example")
with patch("supervisor.addons.addon.asyncio.Event.wait"), patch.object(
HomeAssistant, "restore"
), patch.object(HomeAssistantCore, "update"), patch.object(
AddonModel, "_validate_availability"
), patch.object(
AddonModel, "with_ingress", new=PropertyMock(return_value=False)
):
await coresys.backups.do_restore_full(full_backup)
await asyncio.sleep(0)
messages = [
call.args[0]
for call in ha_ws_client.async_send_command.call_args_list
if call.args[0]["data"].get("data", {}).get("name")
== "backup_manager_full_restore"
]
assert messages == [
_make_backup_message_for_assert(restore=True, reference=None, stage=None),
_make_backup_message_for_assert(
restore=True, reference=full_backup.slug, stage=None
),
_make_backup_message_for_assert(
restore=True, reference=full_backup.slug, stage="docker_config"
),
_make_backup_message_for_assert(
restore=True, reference=full_backup.slug, stage="folders"
),
_make_backup_message_for_assert(
restore=True,
reference=full_backup.slug,
stage="home_assistant",
),
_make_backup_message_for_assert(
restore=True,
reference=full_backup.slug,
stage="remove_delta_addons",
),
_make_backup_message_for_assert(
restore=True,
reference=full_backup.slug,
stage="addon_repositories",
),
_make_backup_message_for_assert(
restore=True, reference=full_backup.slug, stage="addons"
),
_make_backup_message_for_assert(
restore=True,
reference=full_backup.slug,
stage="await_home_assistant_restart",
),
_make_backup_message_for_assert(
restore=True,
reference=full_backup.slug,
stage="await_addon_restarts",
),
_make_backup_message_for_assert(
restore=True,
reference=full_backup.slug,
stage="check_home_assistant",
),
_make_backup_message_for_assert(
restore=True,
reference=full_backup.slug,
stage="check_home_assistant",
done=True,
),
]
folders_backup: Backup = await coresys.backups.do_backup_partial(
folders=["media", "share", "ssl"]
)
ha_ws_client.async_send_command.reset_mock()
await coresys.backups.do_restore_partial(
folders_backup, folders=["media", "share", "ssl"]
)
await asyncio.sleep(0)
messages = [
call.args[0]
for call in ha_ws_client.async_send_command.call_args_list
if call.args[0]["data"].get("data", {}).get("name")
== "backup_manager_partial_restore"
]
assert messages == [
_make_backup_message_for_assert(
full=False, restore=True, reference=None, stage=None
),
_make_backup_message_for_assert(
full=False,
restore=True,
reference=folders_backup.slug,
stage=None,
),
_make_backup_message_for_assert(
full=False,
restore=True,
reference=folders_backup.slug,
stage="docker_config",
),
_make_backup_message_for_assert(
full=False,
restore=True,
reference=folders_backup.slug,
stage="folders",
),
_make_backup_message_for_assert(
full=False,
restore=True,
reference=folders_backup.slug,
stage="folders",
done=True,
),
]
container.status = "stopped"
install_addon_ssh.state = AddonState.STOPPED
addon_backup: Backup = await coresys.backups.do_backup_partial(addons=["local_ssh"])
ha_ws_client.async_send_command.reset_mock()
with patch.object(AddonModel, "_validate_availability"), patch.object(
HomeAssistantCore, "start"
):
await coresys.backups.do_restore_partial(addon_backup, addons=["local_ssh"])
await asyncio.sleep(0)
messages = [
call.args[0]
for call in ha_ws_client.async_send_command.call_args_list
if call.args[0]["data"].get("data", {}).get("name")
== "backup_manager_partial_restore"
]
assert messages == [
_make_backup_message_for_assert(
full=False, restore=True, reference=None, stage=None
),
_make_backup_message_for_assert(
full=False,
restore=True,
reference=addon_backup.slug,
stage=None,
),
_make_backup_message_for_assert(
full=False,
restore=True,
reference=addon_backup.slug,
stage="docker_config",
),
_make_backup_message_for_assert(
full=False,
restore=True,
reference=addon_backup.slug,
stage="addon_repositories",
),
_make_backup_message_for_assert(
full=False,
restore=True,
reference=addon_backup.slug,
stage="addons",
),
_make_backup_message_for_assert(
full=False,
restore=True,
reference=addon_backup.slug,
stage="addons",
done=True,
),
]

View File

@ -371,6 +371,12 @@ async def coresys(
await coresys_obj.websession.close()
@pytest.fixture
def ha_ws_client(coresys: CoreSys) -> AsyncMock:
"""Return HA WS client mock for assertions."""
return coresys.homeassistant.websocket._client
@pytest.fixture
async def tmp_supervisor_data(coresys: CoreSys, tmp_path: Path) -> Path:
"""Patch supervisor data to be tmp_path."""
@ -516,6 +522,18 @@ def install_addon_ssh(coresys: CoreSys, repository):
yield addon
@pytest.fixture
def install_addon_example(coresys: CoreSys, repository):
"""Install local_example add-on."""
store = coresys.addons.store["local_example"]
coresys.addons.data.install(store)
coresys.addons.data._data = coresys.addons.data._schema(coresys.addons.data._data)
addon = Addon(coresys, store.slug)
coresys.addons.local[addon.slug] = addon
yield addon
@pytest.fixture
async def mock_full_backup(coresys: CoreSys, tmp_path) -> Backup:
"""Mock a full backup."""

View File

View File

@ -0,0 +1,14 @@
# https://developers.home-assistant.io/docs/add-ons/configuration#add-on-dockerfile
build_from:
aarch64: "ghcr.io/home-assistant/aarch64-base:3.15"
amd64: "ghcr.io/home-assistant/amd64-base:3.15"
armhf: "ghcr.io/home-assistant/armhf-base:3.15"
armv7: "ghcr.io/home-assistant/armv7-base:3.15"
i386: "ghcr.io/home-assistant/i386-base:3.15"
labels:
org.opencontainers.image.title: "Home Assistant Add-on: Example add-on"
org.opencontainers.image.description: "Example add-on to use as a blueprint for new add-ons."
org.opencontainers.image.source: "https://github.com/home-assistant/addons-example"
org.opencontainers.image.licenses: "Apache License 2.0"
args:
TEMPIO_VERSION: "2021.09.0"

View File

@ -0,0 +1,19 @@
# https://developers.home-assistant.io/docs/add-ons/configuration#add-on-config
name: Example add-on
version: "1.2.0"
slug: example
description: Example add-on
url: "https://github.com/home-assistant/addons-example/tree/main/example"
arch:
- armhf
- armv7
- aarch64
- amd64
- i386
init: false
map:
- share:rw
options:
message: "Hello world..."
schema:
message: "str?"

View File

@ -1,7 +1,7 @@
"""Test network manager."""
import asyncio
from ipaddress import IPv4Address, IPv6Address
from unittest.mock import patch
from unittest.mock import AsyncMock, patch
from dbus_fast import Variant
import pytest
@ -159,13 +159,11 @@ async def test_scan_wifi_with_failures(
async def test_host_connectivity_changed(
coresys: CoreSys, network_manager_service: NetworkManagerService
coresys: CoreSys,
network_manager_service: NetworkManagerService,
ha_ws_client: AsyncMock,
):
"""Test host connectivity changed."""
# pylint: disable=protected-access
client = coresys.homeassistant.websocket._client
# pylint: enable=protected-access
await coresys.host.load()
assert coresys.host.network.connectivity is True
@ -173,48 +171,42 @@ async def test_host_connectivity_changed(
await network_manager_service.ping()
assert coresys.host.network.connectivity is False
await asyncio.sleep(0)
client.async_send_command.assert_called_once_with(
{
"type": WSType.SUPERVISOR_EVENT,
"data": {
"event": WSEvent.SUPERVISOR_UPDATE,
"update_key": "network",
"data": {"host_internet": False},
},
}
)
assert {
"type": WSType.SUPERVISOR_EVENT,
"data": {
"event": WSEvent.SUPERVISOR_UPDATE,
"update_key": "network",
"data": {"host_internet": False},
},
} in [call.args[0] for call in ha_ws_client.async_send_command.call_args_list]
client.async_send_command.reset_mock()
ha_ws_client.async_send_command.reset_mock()
network_manager_service.emit_properties_changed({}, ["Connectivity"])
await network_manager_service.ping()
await network_manager_service.ping()
assert coresys.host.network.connectivity is True
await asyncio.sleep(0)
client.async_send_command.assert_called_once_with(
{
"type": WSType.SUPERVISOR_EVENT,
"data": {
"event": WSEvent.SUPERVISOR_UPDATE,
"update_key": "network",
"data": {"host_internet": True},
},
}
)
assert {
"type": WSType.SUPERVISOR_EVENT,
"data": {
"event": WSEvent.SUPERVISOR_UPDATE,
"update_key": "network",
"data": {"host_internet": True},
},
} in [call.args[0] for call in ha_ws_client.async_send_command.call_args_list]
async def test_host_connectivity_disabled(
coresys: CoreSys, network_manager_service: NetworkManagerService
coresys: CoreSys,
network_manager_service: NetworkManagerService,
ha_ws_client: AsyncMock,
):
"""Test host connectivity check disabled."""
# pylint: disable=protected-access
client = coresys.homeassistant.websocket._client
# pylint: enable=protected-access
await coresys.host.network.load()
coresys.core.state = CoreState.RUNNING
await asyncio.sleep(0)
client.async_send_command.reset_mock()
ha_ws_client.async_send_command.reset_mock()
assert "connectivity_check" not in coresys.resolution.unsupported
assert coresys.host.network.connectivity is True
@ -223,7 +215,7 @@ async def test_host_connectivity_disabled(
await network_manager_service.ping()
assert coresys.host.network.connectivity is None
await asyncio.sleep(0)
client.async_send_command.assert_any_call(
ha_ws_client.async_send_command.assert_any_call(
{
"type": WSType.SUPERVISOR_EVENT,
"data": {
@ -235,13 +227,13 @@ async def test_host_connectivity_disabled(
)
assert "connectivity_check" in coresys.resolution.unsupported
client.async_send_command.reset_mock()
ha_ws_client.async_send_command.reset_mock()
network_manager_service.emit_properties_changed({"ConnectivityCheckEnabled": True})
await network_manager_service.ping()
await network_manager_service.ping()
assert coresys.host.network.connectivity is True
await asyncio.sleep(0)
client.async_send_command.assert_any_call(
ha_ws_client.async_send_command.assert_any_call(
{
"type": WSType.SUPERVISOR_EVENT,
"data": {

View File

@ -2,7 +2,7 @@
# pylint: disable=protected-access,import-error
import asyncio
from datetime import timedelta
from unittest.mock import AsyncMock, Mock, PropertyMock, patch
from unittest.mock import ANY, AsyncMock, Mock, PropertyMock, patch
from uuid import uuid4
from aiohttp.client_exceptions import ClientError
@ -736,7 +736,7 @@ async def test_job_cleanup(coresys: CoreSys, loop: asyncio.BaseEventLoop):
@Job(name="test_job_cleanup_execute", limit=JobExecutionLimit.ONCE)
async def execute(self):
"""Execute the class method."""
self.job = coresys.jobs.get_job()
self.job = coresys.jobs.current
await self.event.wait()
return True
@ -773,7 +773,7 @@ async def test_job_skip_cleanup(coresys: CoreSys, loop: asyncio.BaseEventLoop):
)
async def execute(self):
"""Execute the class method."""
self.job = coresys.jobs.get_job()
self.job = coresys.jobs.current
await self.event.wait()
return True
@ -940,3 +940,53 @@ async def test_execution_limit_group_throttle_rate_limit(
assert test1.call == 3
assert test2.call == 3
async def test_internal_jobs_no_notify(coresys: CoreSys):
"""Test internal jobs do not send any notifications."""
class TestClass:
"""Test class."""
def __init__(self, coresys: CoreSys):
"""Initialize the test class."""
self.coresys = coresys
@Job(name="test_internal_jobs_no_notify_internal", internal=True)
async def execute_internal(self) -> bool:
"""Execute the class method."""
return True
@Job(name="test_internal_jobs_no_notify_default")
async def execute_default(self) -> bool:
"""Execute the class method."""
return True
test1 = TestClass(coresys)
client = coresys.homeassistant.websocket._client
client.async_send_command.reset_mock()
await test1.execute_internal()
await asyncio.sleep(0)
client.async_send_command.assert_not_called()
await test1.execute_default()
await asyncio.sleep(0)
client.async_send_command.call_count == 2
client.async_send_command.assert_called_with(
{
"type": "supervisor/event",
"data": {
"event": "job",
"data": {
"name": "test_internal_jobs_no_notify_default",
"reference": None,
"uuid": ANY,
"progress": 0,
"stage": None,
"done": True,
"parent_id": None,
},
},
}
)

View File

@ -1,5 +1,8 @@
"""Test the condition decorators."""
import asyncio
from unittest.mock import ANY
import pytest
# pylint: disable=protected-access,import-error
@ -31,13 +34,14 @@ async def test_job_done(coresys: CoreSys):
"""Test done set correctly with jobs."""
job = coresys.jobs.new_job(TEST_JOB)
assert not job.done
assert coresys.jobs.get_job() != job
assert not coresys.jobs.is_job
with job.start():
assert coresys.jobs.get_job() == job
assert coresys.jobs.is_job
assert coresys.jobs.current == job
assert not job.done
assert coresys.jobs.get_job() != job
assert not coresys.jobs.is_job
assert job.done
with pytest.raises(JobStartException):
@ -56,7 +60,7 @@ async def test_job_start_bad_parent(coresys: CoreSys):
pass
with job2.start():
assert coresys.jobs.get_job() == job2
assert coresys.jobs.current == job2
async def test_update_job(coresys: CoreSys):
@ -74,3 +78,107 @@ async def test_update_job(coresys: CoreSys):
with pytest.raises(ValueError):
job.progress = -10
async def test_notify_on_change(coresys: CoreSys):
"""Test jobs notify Home Assistant on changes."""
job = coresys.jobs.new_job(TEST_JOB)
job.progress = 50
await asyncio.sleep(0)
coresys.homeassistant.websocket._client.async_send_command.assert_called_with(
{
"type": "supervisor/event",
"data": {
"event": "job",
"data": {
"name": TEST_JOB,
"reference": None,
"uuid": ANY,
"progress": 50,
"stage": None,
"done": None,
"parent_id": None,
},
},
}
)
job.stage = "test"
await asyncio.sleep(0)
coresys.homeassistant.websocket._client.async_send_command.assert_called_with(
{
"type": "supervisor/event",
"data": {
"event": "job",
"data": {
"name": TEST_JOB,
"reference": None,
"uuid": ANY,
"progress": 50,
"stage": "test",
"done": None,
"parent_id": None,
},
},
}
)
job.reference = "test"
await asyncio.sleep(0)
coresys.homeassistant.websocket._client.async_send_command.assert_called_with(
{
"type": "supervisor/event",
"data": {
"event": "job",
"data": {
"name": TEST_JOB,
"reference": "test",
"uuid": ANY,
"progress": 50,
"stage": "test",
"done": None,
"parent_id": None,
},
},
}
)
with job.start():
await asyncio.sleep(0)
coresys.homeassistant.websocket._client.async_send_command.assert_called_with(
{
"type": "supervisor/event",
"data": {
"event": "job",
"data": {
"name": TEST_JOB,
"reference": "test",
"uuid": ANY,
"progress": 50,
"stage": "test",
"done": False,
"parent_id": None,
},
},
}
)
await asyncio.sleep(0)
coresys.homeassistant.websocket._client.async_send_command.assert_called_with(
{
"type": "supervisor/event",
"data": {
"event": "job",
"data": {
"name": TEST_JOB,
"reference": "test",
"uuid": ANY,
"progress": 50,
"stage": "test",
"done": True,
"parent_id": None,
},
},
}
)

View File

@ -190,83 +190,81 @@ def _supervisor_event_message(event: str, data: dict[str, Any]) -> dict[str, Any
}
async def test_events_on_issue_changes(coresys: CoreSys):
async def test_events_on_issue_changes(coresys: CoreSys, ha_ws_client: AsyncMock):
"""Test events fired when an issue changes."""
with patch.object(
type(coresys.homeassistant.websocket), "async_send_message"
) as send_message:
# Creating an issue with a suggestion should fire exactly one issue changed event
assert coresys.resolution.issues == []
assert coresys.resolution.suggestions == []
coresys.resolution.create_issue(
IssueType.CORRUPT_REPOSITORY,
ContextType.STORE,
"test_repo",
[SuggestionType.EXECUTE_RESET],
)
await asyncio.sleep(0)
# Creating an issue with a suggestion should fire exactly one issue changed event
assert coresys.resolution.issues == []
assert coresys.resolution.suggestions == []
coresys.resolution.create_issue(
IssueType.CORRUPT_REPOSITORY,
ContextType.STORE,
"test_repo",
[SuggestionType.EXECUTE_RESET],
)
await asyncio.sleep(0)
assert len(coresys.resolution.issues) == 1
assert len(coresys.resolution.suggestions) == 1
issue = coresys.resolution.issues[0]
suggestion = coresys.resolution.suggestions[0]
issue_expected = {
"type": "corrupt_repository",
"context": "store",
"reference": "test_repo",
"uuid": issue.uuid,
}
suggestion_expected = {
"type": "execute_reset",
"context": "store",
"reference": "test_repo",
"uuid": suggestion.uuid,
}
send_message.assert_called_once_with(
_supervisor_event_message(
"issue_changed", issue_expected | {"suggestions": [suggestion_expected]}
)
)
assert len(coresys.resolution.issues) == 1
assert len(coresys.resolution.suggestions) == 1
issue = coresys.resolution.issues[0]
suggestion = coresys.resolution.suggestions[0]
issue_expected = {
"type": "corrupt_repository",
"context": "store",
"reference": "test_repo",
"uuid": issue.uuid,
}
suggestion_expected = {
"type": "execute_reset",
"context": "store",
"reference": "test_repo",
"uuid": suggestion.uuid,
}
assert _supervisor_event_message(
"issue_changed", issue_expected | {"suggestions": [suggestion_expected]}
) in [call.args[0] for call in ha_ws_client.async_send_command.call_args_list]
# Adding a suggestion that fixes the issue changes it
send_message.reset_mock()
coresys.resolution.suggestions = execute_remove = Suggestion(
SuggestionType.EXECUTE_REMOVE, ContextType.STORE, "test_repo"
)
await asyncio.sleep(0)
send_message.assert_called_once()
sent_data = send_message.call_args.args[0]
assert sent_data["type"] == "supervisor/event"
assert sent_data["data"]["event"] == "issue_changed"
assert sent_data["data"]["data"].items() >= issue_expected.items()
assert len(sent_data["data"]["data"]["suggestions"]) == 2
assert suggestion_expected in sent_data["data"]["data"]["suggestions"]
assert {
"type": "execute_remove",
"context": "store",
"reference": "test_repo",
"uuid": execute_remove.uuid,
} in sent_data["data"]["data"]["suggestions"]
# Adding a suggestion that fixes the issue changes it
ha_ws_client.async_send_command.reset_mock()
coresys.resolution.suggestions = execute_remove = Suggestion(
SuggestionType.EXECUTE_REMOVE, ContextType.STORE, "test_repo"
)
await asyncio.sleep(0)
messages = [
call.args[0]
for call in ha_ws_client.async_send_command.call_args_list
if call.args[0].get("data", {}).get("event") == "issue_changed"
]
assert len(messages) == 1
sent_data = messages[0]
assert sent_data["type"] == "supervisor/event"
assert sent_data["data"]["event"] == "issue_changed"
assert sent_data["data"]["data"].items() >= issue_expected.items()
assert len(sent_data["data"]["data"]["suggestions"]) == 2
assert suggestion_expected in sent_data["data"]["data"]["suggestions"]
assert {
"type": "execute_remove",
"context": "store",
"reference": "test_repo",
"uuid": execute_remove.uuid,
} in sent_data["data"]["data"]["suggestions"]
# Removing a suggestion that fixes the issue changes it again
send_message.reset_mock()
coresys.resolution.dismiss_suggestion(execute_remove)
await asyncio.sleep(0)
send_message.assert_called_once_with(
_supervisor_event_message(
"issue_changed", issue_expected | {"suggestions": [suggestion_expected]}
)
)
# Removing a suggestion that fixes the issue changes it again
ha_ws_client.async_send_command.reset_mock()
coresys.resolution.dismiss_suggestion(execute_remove)
await asyncio.sleep(0)
assert _supervisor_event_message(
"issue_changed", issue_expected | {"suggestions": [suggestion_expected]}
) in [call.args[0] for call in ha_ws_client.async_send_command.call_args_list]
# Applying a suggestion should only fire an issue removed event
send_message.reset_mock()
with patch("shutil.disk_usage", return_value=(42, 42, 2 * (1024.0**3))):
await coresys.resolution.apply_suggestion(suggestion)
# Applying a suggestion should only fire an issue removed event
ha_ws_client.async_send_command.reset_mock()
with patch("shutil.disk_usage", return_value=(42, 42, 2 * (1024.0**3))):
await coresys.resolution.apply_suggestion(suggestion)
await asyncio.sleep(0)
send_message.assert_called_once_with(
_supervisor_event_message("issue_removed", issue_expected)
)
await asyncio.sleep(0)
assert _supervisor_event_message("issue_removed", issue_expected) in [
call.args[0] for call in ha_ws_client.async_send_command.call_args_list
]
async def test_resolution_apply_suggestion_multiple_copies(coresys: CoreSys):