Fix and extend cloud backup support (#5464)

* Fix and extend cloud backup support

* Clean up task for cloud backup and remove by location

* Args to kwargs on backup methods

* Fix backup remove error test and typing clean up
This commit is contained in:
Mike Degatano 2024-12-05 00:07:04 -05:00 committed by GitHub
parent 9b52fee0a3
commit 6e32144e9a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 587 additions and 335 deletions

View File

@ -1,5 +1,7 @@
"""Backups RESTful API.""" """Backups RESTful API."""
from __future__ import annotations
import asyncio import asyncio
from collections.abc import Callable from collections.abc import Callable
import errno import errno
@ -14,7 +16,7 @@ from aiohttp.hdrs import CONTENT_DISPOSITION
import voluptuous as vol import voluptuous as vol
from ..backups.backup import Backup from ..backups.backup import Backup
from ..backups.const import LOCATION_CLOUD_BACKUP from ..backups.const import LOCATION_CLOUD_BACKUP, LOCATION_TYPE
from ..backups.validate import ALL_FOLDERS, FOLDER_HOMEASSISTANT, days_until_stale from ..backups.validate import ALL_FOLDERS, FOLDER_HOMEASSISTANT, days_until_stale
from ..const import ( from ..const import (
ATTR_ADDONS, ATTR_ADDONS,
@ -23,7 +25,7 @@ from ..const import (
ATTR_CONTENT, ATTR_CONTENT,
ATTR_DATE, ATTR_DATE,
ATTR_DAYS_UNTIL_STALE, ATTR_DAYS_UNTIL_STALE,
ATTR_FILENAME, ATTR_EXTRA,
ATTR_FOLDERS, ATTR_FOLDERS,
ATTR_HOMEASSISTANT, ATTR_HOMEASSISTANT,
ATTR_HOMEASSISTANT_EXCLUDE_DATABASE, ATTR_HOMEASSISTANT_EXCLUDE_DATABASE,
@ -48,7 +50,12 @@ from ..exceptions import APIError, APIForbidden
from ..jobs import JobSchedulerOptions from ..jobs import JobSchedulerOptions
from ..mounts.const import MountUsage from ..mounts.const import MountUsage
from ..resolution.const import UnhealthyReason from ..resolution.const import UnhealthyReason
from .const import ATTR_BACKGROUND, ATTR_LOCATIONS, CONTENT_TYPE_TAR from .const import (
ATTR_ADDITIONAL_LOCATIONS,
ATTR_BACKGROUND,
ATTR_LOCATIONS,
CONTENT_TYPE_TAR,
)
from .utils import api_process, api_validate from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__) _LOGGER: logging.Logger = logging.getLogger(__name__)
@ -60,6 +67,14 @@ RE_BACKUP_FILENAME = re.compile(r"^[^\\\/]+\.tar$")
# Remove: 2022.08 # Remove: 2022.08
_ALL_FOLDERS = ALL_FOLDERS + [FOLDER_HOMEASSISTANT] _ALL_FOLDERS = ALL_FOLDERS + [FOLDER_HOMEASSISTANT]
def _ensure_list(item: Any) -> list:
"""Ensure value is a list."""
if not isinstance(item, list):
return [item]
return item
# pylint: disable=no-value-for-parameter # pylint: disable=no-value-for-parameter
SCHEMA_RESTORE_FULL = vol.Schema( SCHEMA_RESTORE_FULL = vol.Schema(
{ {
@ -81,9 +96,12 @@ SCHEMA_BACKUP_FULL = vol.Schema(
vol.Optional(ATTR_NAME): str, vol.Optional(ATTR_NAME): str,
vol.Optional(ATTR_PASSWORD): vol.Maybe(str), vol.Optional(ATTR_PASSWORD): vol.Maybe(str),
vol.Optional(ATTR_COMPRESSED): vol.Maybe(vol.Boolean()), vol.Optional(ATTR_COMPRESSED): vol.Maybe(vol.Boolean()),
vol.Optional(ATTR_LOCATION): vol.Maybe(str), vol.Optional(ATTR_LOCATION): vol.All(
_ensure_list, [vol.Maybe(str)], vol.Unique()
),
vol.Optional(ATTR_HOMEASSISTANT_EXCLUDE_DATABASE): vol.Boolean(), vol.Optional(ATTR_HOMEASSISTANT_EXCLUDE_DATABASE): vol.Boolean(),
vol.Optional(ATTR_BACKGROUND, default=False): vol.Boolean(), vol.Optional(ATTR_BACKGROUND, default=False): vol.Boolean(),
vol.Optional(ATTR_EXTRA): dict,
} }
) )
@ -106,12 +124,6 @@ SCHEMA_FREEZE = vol.Schema(
vol.Optional(ATTR_TIMEOUT): vol.All(int, vol.Range(min=1)), vol.Optional(ATTR_TIMEOUT): vol.All(int, vol.Range(min=1)),
} }
) )
SCHEMA_RELOAD = vol.Schema(
{
vol.Inclusive(ATTR_LOCATION, "file"): vol.Maybe(str),
vol.Inclusive(ATTR_FILENAME, "file"): vol.Match(RE_BACKUP_FILENAME),
}
)
class APIBackups(CoreSysAttributes): class APIBackups(CoreSysAttributes):
@ -177,13 +189,10 @@ class APIBackups(CoreSysAttributes):
self.sys_backups.save_data() self.sys_backups.save_data()
@api_process @api_process
async def reload(self, request: web.Request): async def reload(self, _):
"""Reload backup list.""" """Reload backup list."""
body = await api_validate(SCHEMA_RELOAD, request) await asyncio.shield(self.sys_backups.reload())
self._validate_cloud_backup_location(request, body.get(ATTR_LOCATION)) return True
backup = self._location_to_mount(body)
return await asyncio.shield(self.sys_backups.reload(**backup))
@api_process @api_process
async def backup_info(self, request): async def backup_info(self, request):
@ -217,27 +226,35 @@ class APIBackups(CoreSysAttributes):
ATTR_REPOSITORIES: backup.repositories, ATTR_REPOSITORIES: backup.repositories,
ATTR_FOLDERS: backup.folders, ATTR_FOLDERS: backup.folders,
ATTR_HOMEASSISTANT_EXCLUDE_DATABASE: backup.homeassistant_exclude_database, ATTR_HOMEASSISTANT_EXCLUDE_DATABASE: backup.homeassistant_exclude_database,
ATTR_EXTRA: backup.extra,
} }
def _location_to_mount(self, body: dict[str, Any]) -> dict[str, Any]: def _location_to_mount(self, location: str | None) -> LOCATION_TYPE:
"""Change location field to mount if necessary.""" """Convert a single location to a mount if possible."""
if not body.get(ATTR_LOCATION) or body[ATTR_LOCATION] == LOCATION_CLOUD_BACKUP: if not location or location == LOCATION_CLOUD_BACKUP:
return body return location
body[ATTR_LOCATION] = self.sys_mounts.get(body[ATTR_LOCATION]) mount = self.sys_mounts.get(location)
if body[ATTR_LOCATION].usage != MountUsage.BACKUP: if mount.usage != MountUsage.BACKUP:
raise APIError( raise APIError(
f"Mount {body[ATTR_LOCATION].name} is not used for backups, cannot backup to there" f"Mount {mount.name} is not used for backups, cannot backup to there"
) )
return mount
def _location_field_to_mount(self, body: dict[str, Any]) -> dict[str, Any]:
"""Change location field to mount if necessary."""
body[ATTR_LOCATION] = self._location_to_mount(body.get(ATTR_LOCATION))
return body return body
def _validate_cloud_backup_location( def _validate_cloud_backup_location(
self, request: web.Request, location: str | None self, request: web.Request, location: list[str | None] | str | None
) -> None: ) -> None:
"""Cloud backup location is only available to Home Assistant.""" """Cloud backup location is only available to Home Assistant."""
if not isinstance(location, list):
location = [location]
if ( if (
location == LOCATION_CLOUD_BACKUP LOCATION_CLOUD_BACKUP in location
and request.get(REQUEST_FROM) != self.sys_homeassistant and request.get(REQUEST_FROM) != self.sys_homeassistant
): ):
raise APIForbidden( raise APIForbidden(
@ -278,10 +295,22 @@ class APIBackups(CoreSysAttributes):
async def backup_full(self, request: web.Request): async def backup_full(self, request: web.Request):
"""Create full backup.""" """Create full backup."""
body = await api_validate(SCHEMA_BACKUP_FULL, request) body = await api_validate(SCHEMA_BACKUP_FULL, request)
self._validate_cloud_backup_location(request, body.get(ATTR_LOCATION)) locations: list[LOCATION_TYPE] | None = None
if ATTR_LOCATION in body:
location_names: list[str | None] = body.pop(ATTR_LOCATION)
self._validate_cloud_backup_location(request, location_names)
locations = [
self._location_to_mount(location) for location in location_names
]
body[ATTR_LOCATION] = locations.pop(0)
if locations:
body[ATTR_ADDITIONAL_LOCATIONS] = locations
background = body.pop(ATTR_BACKGROUND) background = body.pop(ATTR_BACKGROUND)
backup_task, job_id = await self._background_backup_task( backup_task, job_id = await self._background_backup_task(
self.sys_backups.do_backup_full, **self._location_to_mount(body) self.sys_backups.do_backup_full, **body
) )
if background and not backup_task.done(): if background and not backup_task.done():
@ -299,10 +328,22 @@ class APIBackups(CoreSysAttributes):
async def backup_partial(self, request: web.Request): async def backup_partial(self, request: web.Request):
"""Create a partial backup.""" """Create a partial backup."""
body = await api_validate(SCHEMA_BACKUP_PARTIAL, request) body = await api_validate(SCHEMA_BACKUP_PARTIAL, request)
self._validate_cloud_backup_location(request, body.get(ATTR_LOCATION)) locations: list[LOCATION_TYPE] | None = None
if ATTR_LOCATION in body:
location_names: list[str | None] = body.pop(ATTR_LOCATION)
self._validate_cloud_backup_location(request, location_names)
locations = [
self._location_to_mount(location) for location in location_names
]
body[ATTR_LOCATION] = locations.pop(0)
if locations:
body[ATTR_ADDITIONAL_LOCATIONS] = locations
background = body.pop(ATTR_BACKGROUND) background = body.pop(ATTR_BACKGROUND)
backup_task, job_id = await self._background_backup_task( backup_task, job_id = await self._background_backup_task(
self.sys_backups.do_backup_partial, **self._location_to_mount(body) self.sys_backups.do_backup_partial, **body
) )
if background and not backup_task.done(): if background and not backup_task.done():
@ -370,9 +411,11 @@ class APIBackups(CoreSysAttributes):
self._validate_cloud_backup_location(request, backup.location) self._validate_cloud_backup_location(request, backup.location)
return self.sys_backups.remove(backup) return self.sys_backups.remove(backup)
@api_process
async def download(self, request: web.Request): async def download(self, request: web.Request):
"""Download a backup file.""" """Download a backup file."""
backup = self._extract_slug(request) backup = self._extract_slug(request)
self._validate_cloud_backup_location(request, backup.location)
_LOGGER.info("Downloading backup %s", backup.slug) _LOGGER.info("Downloading backup %s", backup.slug)
response = web.FileResponse(backup.tarfile) response = web.FileResponse(backup.tarfile)
@ -385,7 +428,23 @@ class APIBackups(CoreSysAttributes):
@api_process @api_process
async def upload(self, request: web.Request): async def upload(self, request: web.Request):
"""Upload a backup file.""" """Upload a backup file."""
with TemporaryDirectory(dir=str(self.sys_config.path_tmp)) as temp_dir: location: LOCATION_TYPE = None
locations: list[LOCATION_TYPE] | None = None
tmp_path = self.sys_config.path_tmp
if ATTR_LOCATION in request.query:
location_names: list[str] = request.query.getall(ATTR_LOCATION)
self._validate_cloud_backup_location(request, location_names)
# Convert empty string to None if necessary
locations = [
self._location_to_mount(location) if location else None
for location in location_names
]
location = locations.pop(0)
if location and location != LOCATION_CLOUD_BACKUP:
tmp_path = location.local_where
with TemporaryDirectory(dir=tmp_path.as_posix()) as temp_dir:
tar_file = Path(temp_dir, "backup.tar") tar_file = Path(temp_dir, "backup.tar")
reader = await request.multipart() reader = await request.multipart()
contents = await reader.next() contents = await reader.next()
@ -398,7 +457,10 @@ class APIBackups(CoreSysAttributes):
backup.write(chunk) backup.write(chunk)
except OSError as err: except OSError as err:
if err.errno == errno.EBADMSG: if err.errno == errno.EBADMSG and location in {
LOCATION_CLOUD_BACKUP,
None,
}:
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
_LOGGER.error("Can't write new backup file: %s", err) _LOGGER.error("Can't write new backup file: %s", err)
return False return False
@ -406,7 +468,11 @@ class APIBackups(CoreSysAttributes):
except asyncio.CancelledError: except asyncio.CancelledError:
return False return False
backup = await asyncio.shield(self.sys_backups.import_backup(tar_file)) backup = await asyncio.shield(
self.sys_backups.import_backup(
tar_file, location=location, additional_locations=locations
)
)
if backup: if backup:
return {ATTR_SLUG: backup.slug} return {ATTR_SLUG: backup.slug}

View File

@ -12,6 +12,7 @@ CONTENT_TYPE_X_LOG = "text/x-log"
COOKIE_INGRESS = "ingress_session" COOKIE_INGRESS = "ingress_session"
ATTR_ADDITIONAL_LOCATIONS = "additional_locations"
ATTR_AGENT_VERSION = "agent_version" ATTR_AGENT_VERSION = "agent_version"
ATTR_APPARMOR_VERSION = "apparmor_version" ATTR_APPARMOR_VERSION = "apparmor_version"
ATTR_ATTRIBUTES = "attributes" ATTR_ATTRIBUTES = "attributes"

View File

@ -6,15 +6,14 @@ from collections import defaultdict
from collections.abc import Awaitable from collections.abc import Awaitable
from copy import deepcopy from copy import deepcopy
from datetime import timedelta from datetime import timedelta
from functools import cached_property
import io import io
import json import json
import logging import logging
from pathlib import Path, PurePath from pathlib import Path
import tarfile import tarfile
from tempfile import TemporaryDirectory from tempfile import TemporaryDirectory
import time import time
from typing import Any, Literal from typing import Any, Self
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
from cryptography.hazmat.backends import default_backend from cryptography.hazmat.backends import default_backend
@ -32,6 +31,7 @@ from ..const import (
ATTR_DATE, ATTR_DATE,
ATTR_DOCKER, ATTR_DOCKER,
ATTR_EXCLUDE_DATABASE, ATTR_EXCLUDE_DATABASE,
ATTR_EXTRA,
ATTR_FOLDERS, ATTR_FOLDERS,
ATTR_HOMEASSISTANT, ATTR_HOMEASSISTANT,
ATTR_NAME, ATTR_NAME,
@ -48,7 +48,6 @@ from ..const import (
CRYPTO_AES128, CRYPTO_AES128,
) )
from ..coresys import CoreSys from ..coresys import CoreSys
from ..docker.const import PATH_BACKUP, PATH_CLOUD_BACKUP
from ..exceptions import AddonsError, BackupError, BackupInvalidError from ..exceptions import AddonsError, BackupError, BackupInvalidError
from ..jobs.const import JOB_GROUP_BACKUP from ..jobs.const import JOB_GROUP_BACKUP
from ..jobs.decorator import Job from ..jobs.decorator import Job
@ -63,6 +62,11 @@ from .validate import SCHEMA_BACKUP
_LOGGER: logging.Logger = logging.getLogger(__name__) _LOGGER: logging.Logger = logging.getLogger(__name__)
def location_sort_key(value: str | None) -> str:
"""Sort locations, None is always first else alphabetical."""
return value if value else ""
class Backup(JobGroup): class Backup(JobGroup):
"""A single Supervisor backup.""" """A single Supervisor backup."""
@ -78,15 +82,13 @@ class Backup(JobGroup):
super().__init__( super().__init__(
coresys, JOB_GROUP_BACKUP.format_map(defaultdict(str, slug=slug)), slug coresys, JOB_GROUP_BACKUP.format_map(defaultdict(str, slug=slug)), slug
) )
self._tarfile: Path = tar_file
self._data: dict[str, Any] = data or {ATTR_SLUG: slug} self._data: dict[str, Any] = data or {ATTR_SLUG: slug}
self._tmp = None self._tmp = None
self._outer_secure_tarfile: SecureTarFile | None = None self._outer_secure_tarfile: SecureTarFile | None = None
self._outer_secure_tarfile_tarfile: tarfile.TarFile | None = None self._outer_secure_tarfile_tarfile: tarfile.TarFile | None = None
self._key: bytes | None = None self._key: bytes | None = None
self._aes: Cipher | None = None self._aes: Cipher | None = None
# Order is maintained in dict keys so this is effectively an ordered set self._locations: dict[str | None, Path] = {location: tar_file}
self._locations: dict[str | None, Literal[None]] = {location: None}
@property @property
def version(self) -> int: def version(self) -> int:
@ -172,6 +174,11 @@ class Backup(JobGroup):
"""Return backup Supervisor version.""" """Return backup Supervisor version."""
return self._data[ATTR_SUPERVISOR_VERSION] return self._data[ATTR_SUPERVISOR_VERSION]
@property
def extra(self) -> dict:
"""Get extra metadata added by client."""
return self._data[ATTR_EXTRA]
@property @property
def docker(self) -> dict[str, Any]: def docker(self) -> dict[str, Any]:
"""Return backup Docker config data.""" """Return backup Docker config data."""
@ -188,39 +195,23 @@ class Backup(JobGroup):
return self.locations[0] return self.locations[0]
@property @property
def all_locations(self) -> set[str | None]: def all_locations(self) -> dict[str | None, Path]:
"""Return all locations this backup was found in.""" """Return all locations this backup was found in."""
return self._locations.keys() return self._locations
@property @property
def locations(self) -> list[str | None]: def locations(self) -> list[str | None]:
"""Return locations this backup was found in except cloud backup (unless that's the only one).""" """Return locations this backup was found in except cloud backup (unless that's the only one)."""
if len(self._locations) == 1: if len(self._locations) == 1:
return list(self._locations) return list(self._locations)
return [ return sorted(
location [
for location in self._locations location
if location != LOCATION_CLOUD_BACKUP for location in self._locations
] if location != LOCATION_CLOUD_BACKUP
],
@cached_property key=location_sort_key,
def container_path(self) -> PurePath | None: )
"""Return where this is made available in managed containers (core, addons, etc.).
This returns none if the tarfile is not in a place mapped into other containers.
"""
path_map: dict[Path, PurePath] = {
self.sys_config.path_backup: PATH_BACKUP,
self.sys_config.path_core_backup: PATH_CLOUD_BACKUP,
} | {
mount.local_where: mount.container_where
for mount in self.sys_mounts.backup_mounts
}
for source, target in path_map.items():
if self.tarfile.is_relative_to(source):
return target / self.tarfile.relative_to(source)
return None
@property @property
def size(self) -> float: def size(self) -> float:
@ -237,7 +228,7 @@ class Backup(JobGroup):
@property @property
def tarfile(self) -> Path: def tarfile(self) -> Path:
"""Return path to backup tarfile.""" """Return path to backup tarfile."""
return self._tarfile return self._locations[self.location]
@property @property
def is_current(self) -> bool: def is_current(self) -> bool:
@ -251,9 +242,21 @@ class Backup(JobGroup):
"""Returns a copy of the data.""" """Returns a copy of the data."""
return deepcopy(self._data) return deepcopy(self._data)
def add_location(self, location: str | None) -> None: def __eq__(self, other: Any) -> bool:
"""Add a location the backup exists.""" """Return true if backups have same metadata."""
self._locations[location] = None return isinstance(other, Backup) and self._data == other._data
def consolidate(self, backup: Self) -> None:
"""Consolidate two backups with same slug in different locations."""
if self.slug != backup.slug:
raise ValueError(
f"Backup {self.slug} and {backup.slug} are not the same backup"
)
if self != backup:
raise BackupInvalidError(
f"Backup in {backup.location} and {self.location} both have slug {self.slug} but are not the same!"
)
self._locations.update(backup.all_locations)
def new( def new(
self, self,
@ -262,6 +265,7 @@ class Backup(JobGroup):
sys_type: BackupType, sys_type: BackupType,
password: str | None = None, password: str | None = None,
compressed: bool = True, compressed: bool = True,
extra: dict | None = None,
): ):
"""Initialize a new backup.""" """Initialize a new backup."""
# Init metadata # Init metadata
@ -270,6 +274,7 @@ class Backup(JobGroup):
self._data[ATTR_DATE] = date self._data[ATTR_DATE] = date
self._data[ATTR_TYPE] = sys_type self._data[ATTR_TYPE] = sys_type
self._data[ATTR_SUPERVISOR_VERSION] = self.sys_supervisor.version self._data[ATTR_SUPERVISOR_VERSION] = self.sys_supervisor.version
self._data[ATTR_EXTRA] = extra or {}
# Add defaults # Add defaults
self._data = SCHEMA_BACKUP(self._data) self._data = SCHEMA_BACKUP(self._data)

View File

@ -1,11 +1,16 @@
"""Backup consts.""" """Backup consts."""
from enum import StrEnum from enum import StrEnum
from typing import Literal
from ..mounts.mount import Mount
BUF_SIZE = 2**20 * 4 # 4MB BUF_SIZE = 2**20 * 4 # 4MB
DEFAULT_FREEZE_TIMEOUT = 600 DEFAULT_FREEZE_TIMEOUT = 600
LOCATION_CLOUD_BACKUP = ".cloud_backup" LOCATION_CLOUD_BACKUP = ".cloud_backup"
LOCATION_TYPE = Mount | Literal[LOCATION_CLOUD_BACKUP] | None
class BackupType(StrEnum): class BackupType(StrEnum):
"""Backup type enum.""" """Backup type enum."""
@ -23,6 +28,7 @@ class BackupJobStage(StrEnum):
FINISHING_FILE = "finishing_file" FINISHING_FILE = "finishing_file"
FOLDERS = "folders" FOLDERS = "folders"
HOME_ASSISTANT = "home_assistant" HOME_ASSISTANT = "home_assistant"
COPY_ADDITONAL_LOCATIONS = "copy_additional_locations"
AWAIT_ADDON_RESTARTS = "await_addon_restarts" AWAIT_ADDON_RESTARTS = "await_addon_restarts"

View File

@ -7,28 +7,23 @@ from collections.abc import Awaitable, Iterable
import errno import errno
import logging import logging
from pathlib import Path from pathlib import Path
from typing import Literal from shutil import copy
from ..addons.addon import Addon from ..addons.addon import Addon
from ..const import ( from ..const import (
ATTR_DATA,
ATTR_DAYS_UNTIL_STALE, ATTR_DAYS_UNTIL_STALE,
ATTR_JOB_ID,
ATTR_PATH,
ATTR_SLUG,
ATTR_TYPE,
FILE_HASSIO_BACKUPS, FILE_HASSIO_BACKUPS,
FOLDER_HOMEASSISTANT, FOLDER_HOMEASSISTANT,
CoreState, CoreState,
) )
from ..dbus.const import UnitActiveState from ..dbus.const import UnitActiveState
from ..exceptions import ( from ..exceptions import (
BackupDataDiskBadMessageError,
BackupError, BackupError,
BackupInvalidError, BackupInvalidError,
BackupJobError, BackupJobError,
BackupMountDownError, BackupMountDownError,
) )
from ..homeassistant.const import WSType
from ..jobs.const import JOB_GROUP_BACKUP_MANAGER, JobCondition, JobExecutionLimit from ..jobs.const import JOB_GROUP_BACKUP_MANAGER, JobCondition, JobExecutionLimit
from ..jobs.decorator import Job from ..jobs.decorator import Job
from ..jobs.job_group import JobGroup from ..jobs.job_group import JobGroup
@ -42,6 +37,7 @@ from .backup import Backup
from .const import ( from .const import (
DEFAULT_FREEZE_TIMEOUT, DEFAULT_FREEZE_TIMEOUT,
LOCATION_CLOUD_BACKUP, LOCATION_CLOUD_BACKUP,
LOCATION_TYPE,
BackupJobStage, BackupJobStage,
BackupType, BackupType,
RestoreJobStage, RestoreJobStage,
@ -64,9 +60,9 @@ class BackupManager(FileConfiguration, JobGroup):
self._thaw_event: asyncio.Event = asyncio.Event() self._thaw_event: asyncio.Event = asyncio.Event()
@property @property
def list_backups(self) -> set[Backup]: def list_backups(self) -> list[Backup]:
"""Return a list of all backup objects.""" """Return a list of all backup objects."""
return set(self._backups.values()) return self._backups.values()
@property @property
def days_until_stale(self) -> int: def days_until_stale(self) -> int:
@ -96,10 +92,7 @@ class BackupManager(FileConfiguration, JobGroup):
def _get_base_path( def _get_base_path(
self, self,
location: Mount location: LOCATION_TYPE | type[DEFAULT] = DEFAULT,
| Literal[LOCATION_CLOUD_BACKUP]
| type[DEFAULT]
| None = DEFAULT,
) -> Path: ) -> Path:
"""Get base path for backup using location or default location.""" """Get base path for backup using location or default location."""
if location == LOCATION_CLOUD_BACKUP: if location == LOCATION_CLOUD_BACKUP:
@ -119,10 +112,7 @@ class BackupManager(FileConfiguration, JobGroup):
def _get_location_name( def _get_location_name(
self, self,
location: Mount location: LOCATION_TYPE | type[DEFAULT] = DEFAULT,
| Literal[LOCATION_CLOUD_BACKUP]
| type[DEFAULT]
| None = DEFAULT,
) -> str | None: ) -> str | None:
"""Get name of location (or None for local backup folder).""" """Get name of location (or None for local backup folder)."""
if location == LOCATION_CLOUD_BACKUP: if location == LOCATION_CLOUD_BACKUP:
@ -169,7 +159,10 @@ class BackupManager(FileConfiguration, JobGroup):
if path.is_dir(): if path.is_dir():
return path.glob("*.tar") return path.glob("*.tar")
except OSError as err: except OSError as err:
if err.errno == errno.EBADMSG and path == self.sys_config.path_backup: if err.errno == errno.EBADMSG and path in {
self.sys_config.path_backup,
self.sys_config.path_core_backup,
}:
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
_LOGGER.error("Could not list backups from %s: %s", path.as_posix(), err) _LOGGER.error("Could not list backups from %s: %s", path.as_posix(), err)
@ -181,10 +174,8 @@ class BackupManager(FileConfiguration, JobGroup):
sys_type: BackupType, sys_type: BackupType,
password: str | None, password: str | None,
compressed: bool = True, compressed: bool = True,
location: Mount location: LOCATION_TYPE | type[DEFAULT] = DEFAULT,
| Literal[LOCATION_CLOUD_BACKUP] extra: dict | None = None,
| type[DEFAULT]
| None = DEFAULT,
) -> Backup: ) -> Backup:
"""Initialize a new backup object from name. """Initialize a new backup object from name.
@ -196,7 +187,7 @@ class BackupManager(FileConfiguration, JobGroup):
# init object # init object
backup = Backup(self.coresys, tar_file, slug, self._get_location_name(location)) backup = Backup(self.coresys, tar_file, slug, self._get_location_name(location))
backup.new(name, date_str, sys_type, password, compressed) backup.new(name, date_str, sys_type, password, compressed, extra)
# Add backup ID to job # Add backup ID to job
self.sys_jobs.current.reference = backup.slug self.sys_jobs.current.reference = backup.slug
@ -217,12 +208,9 @@ class BackupManager(FileConfiguration, JobGroup):
async def reload( async def reload(
self, self,
location: Mount location: LOCATION_TYPE | type[DEFAULT] = DEFAULT,
| Literal[LOCATION_CLOUD_BACKUP]
| type[DEFAULT]
| None = DEFAULT,
filename: str | None = None, filename: str | None = None,
) -> None: ) -> bool:
"""Load exists backups.""" """Load exists backups."""
async def _load_backup(location: str | None, tar_file: Path) -> bool: async def _load_backup(location: str | None, tar_file: Path) -> bool:
@ -230,12 +218,23 @@ class BackupManager(FileConfiguration, JobGroup):
backup = Backup(self.coresys, tar_file, "temp", location) backup = Backup(self.coresys, tar_file, "temp", location)
if await backup.load(): if await backup.load():
if backup.slug in self._backups: if backup.slug in self._backups:
self._backups[backup.slug].add_location(location) try:
self._backups[backup.slug].consolidate(backup)
except BackupInvalidError as err:
_LOGGER.error(
"Ignoring backup %s in %s due to: %s",
backup.slug,
backup.location,
err,
)
return False
else: else:
self._backups[backup.slug] = Backup( self._backups[backup.slug] = Backup(
self.coresys, tar_file, backup.slug, location, backup.data self.coresys, tar_file, backup.slug, location, backup.data
) )
return True return True
return False return False
if location != DEFAULT and filename: if location != DEFAULT and filename:
@ -256,25 +255,91 @@ class BackupManager(FileConfiguration, JobGroup):
await asyncio.wait(tasks) await asyncio.wait(tasks)
return True return True
def remove(self, backup: Backup) -> bool: def remove(
self,
backup: Backup,
locations: list[LOCATION_TYPE] | None = None,
) -> bool:
"""Remove a backup.""" """Remove a backup."""
try: targets = (
backup.tarfile.unlink() [
self._backups.pop(backup.slug, None) self._get_location_name(location)
_LOGGER.info("Removed backup file %s", backup.slug) for location in locations
if location in backup.all_locations
]
if locations
else list(backup.all_locations.keys())
)
for location in targets:
try:
backup.all_locations[location].unlink()
del backup.all_locations[location]
except OSError as err:
if err.errno == errno.EBADMSG and location in {
None,
LOCATION_CLOUD_BACKUP,
}:
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
_LOGGER.error("Can't remove backup %s: %s", backup.slug, err)
return False
except OSError as err: # If backup has been removed from all locations, remove it from cache
if ( if not backup.all_locations:
err.errno == errno.EBADMSG del self._backups[backup.slug]
and backup.tarfile.parent == self.sys_config.path_backup
):
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
_LOGGER.error("Can't remove backup %s: %s", backup.slug, err)
return False
return True return True
async def import_backup(self, tar_file: Path) -> Backup | None: async def _copy_to_additional_locations(
self,
backup: Backup,
locations: list[LOCATION_TYPE],
):
"""Copy a backup file to additional locations."""
def copy_to_additional_locations() -> dict[str | None, Path]:
"""Copy backup file to additional locations."""
all_locations: dict[str | None, Path] = {}
for location in locations:
try:
if location == LOCATION_CLOUD_BACKUP:
all_locations[LOCATION_CLOUD_BACKUP] = Path(
copy(backup.tarfile, self.sys_config.path_core_backup)
)
elif location:
all_locations[location.name] = Path(
copy(backup.tarfile, location.local_where)
)
else:
all_locations[None] = Path(
copy(backup.tarfile, self.sys_config.path_backup)
)
except OSError as err:
msg = f"Could not copy backup to {location.name if isinstance(location, Mount) else location} due to: {err!s}"
if err.errno == errno.EBADMSG and location in {
LOCATION_CLOUD_BACKUP,
None,
}:
raise BackupDataDiskBadMessageError(msg, _LOGGER.error) from err
raise BackupError(msg, _LOGGER.error) from err
return all_locations
try:
backup.all_locations.update(
await self.sys_run_in_executor(copy_to_additional_locations)
)
except BackupDataDiskBadMessageError:
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
raise
@Job(name="backup_manager_import_backup")
async def import_backup(
self,
tar_file: Path,
location: LOCATION_TYPE = None,
additional_locations: list[LOCATION_TYPE] | None = None,
) -> Backup | None:
"""Check backup tarfile and import it.""" """Check backup tarfile and import it."""
backup = Backup(self.coresys, tar_file, "temp", None) backup = Backup(self.coresys, tar_file, "temp", None)
@ -282,18 +347,13 @@ class BackupManager(FileConfiguration, JobGroup):
if not await backup.load(): if not await backup.load():
return None return None
# Already exists? # Move backup to destination folder
if backup.slug in self._backups: tar_origin = Path(self._get_base_path(location), f"{backup.slug}.tar")
_LOGGER.warning("Backup %s already exists! overwriting", backup.slug)
self.remove(self.get(backup.slug))
# Move backup to backup
tar_origin = Path(self.sys_config.path_backup, f"{backup.slug}.tar")
try: try:
backup.tarfile.rename(tar_origin) backup.tarfile.rename(tar_origin)
except OSError as err: except OSError as err:
if err.errno == errno.EBADMSG: if err.errno == errno.EBADMSG and location in {LOCATION_CLOUD_BACKUP, None}:
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
_LOGGER.error("Can't move backup file to storage: %s", err) _LOGGER.error("Can't move backup file to storage: %s", err)
return None return None
@ -301,10 +361,30 @@ class BackupManager(FileConfiguration, JobGroup):
# Load new backup # Load new backup
backup = Backup(self.coresys, tar_origin, backup.slug, None, backup.data) backup = Backup(self.coresys, tar_origin, backup.slug, None, backup.data)
if not await backup.load(): if not await backup.load():
# Remove invalid backup from location it was moved to
backup.tarfile.unlink()
return None return None
_LOGGER.info("Successfully imported %s", backup.slug) _LOGGER.info("Successfully imported %s", backup.slug)
self._backups[backup.slug] = backup # Already exists?
if (
backup.slug in self._backups
and backup.all_locations != self._backups[backup].all_locations
):
_LOGGER.warning("Backup %s already exists! consolidating", backup.slug)
try:
self._backups[backup.slug].consolidate(backup)
except BackupInvalidError as err:
backup.tarfile.unlink()
raise BackupInvalidError(
f"Cannot import backup {backup.slug} due to: {err!s}", _LOGGER.error
) from err
else:
self._backups[backup.slug] = backup
if additional_locations:
await self._copy_to_additional_locations(backup, additional_locations)
return backup return backup
async def _do_backup( async def _do_backup(
@ -314,6 +394,7 @@ class BackupManager(FileConfiguration, JobGroup):
folder_list: list[str], folder_list: list[str],
homeassistant: bool, homeassistant: bool,
homeassistant_exclude_database: bool | None, homeassistant_exclude_database: bool | None,
additional_locations: list[LOCATION_TYPE] | None = None,
) -> Backup | None: ) -> Backup | None:
"""Create a backup. """Create a backup.
@ -358,16 +439,15 @@ class BackupManager(FileConfiguration, JobGroup):
return None return None
else: else:
self._backups[backup.slug] = backup self._backups[backup.slug] = backup
await self.sys_homeassistant.websocket.async_send_message(
{ if additional_locations:
ATTR_TYPE: WSType.BACKUP_COMPLETE, self._change_stage(BackupJobStage.COPY_ADDITONAL_LOCATIONS, backup)
ATTR_DATA: { try:
ATTR_JOB_ID: self.sys_jobs.current.uuid, await self._copy_to_additional_locations(
ATTR_SLUG: backup.slug, backup, additional_locations
ATTR_PATH: backup.container_path.as_posix(), )
}, except BackupError as err:
} self.sys_jobs.capture_error(err)
)
if addon_start_tasks: if addon_start_tasks:
self._change_stage(BackupJobStage.AWAIT_ADDON_RESTARTS, backup) self._change_stage(BackupJobStage.AWAIT_ADDON_RESTARTS, backup)
@ -388,13 +468,13 @@ class BackupManager(FileConfiguration, JobGroup):
async def do_backup_full( async def do_backup_full(
self, self,
name: str = "", name: str = "",
*,
password: str | None = None, password: str | None = None,
compressed: bool = True, compressed: bool = True,
location: Mount location: LOCATION_TYPE | type[DEFAULT] = DEFAULT,
| Literal[LOCATION_CLOUD_BACKUP]
| type[DEFAULT]
| None = DEFAULT,
homeassistant_exclude_database: bool | None = None, homeassistant_exclude_database: bool | None = None,
extra: dict | None = None,
additional_locations: list[LOCATION_TYPE] | None = None,
) -> Backup | None: ) -> Backup | None:
"""Create a full backup.""" """Create a full backup."""
if self._get_base_path(location) in { if self._get_base_path(location) in {
@ -406,7 +486,7 @@ class BackupManager(FileConfiguration, JobGroup):
) )
backup = self._create_backup( backup = self._create_backup(
name, BackupType.FULL, password, compressed, location name, BackupType.FULL, password, compressed, location, extra
) )
_LOGGER.info("Creating new full backup with slug %s", backup.slug) _LOGGER.info("Creating new full backup with slug %s", backup.slug)
@ -416,6 +496,7 @@ class BackupManager(FileConfiguration, JobGroup):
ALL_FOLDERS, ALL_FOLDERS,
True, True,
homeassistant_exclude_database, homeassistant_exclude_database,
additional_locations,
) )
if backup: if backup:
_LOGGER.info("Creating full backup with slug %s completed", backup.slug) _LOGGER.info("Creating full backup with slug %s completed", backup.slug)
@ -431,16 +512,16 @@ class BackupManager(FileConfiguration, JobGroup):
async def do_backup_partial( async def do_backup_partial(
self, self,
name: str = "", name: str = "",
*,
addons: list[str] | None = None, addons: list[str] | None = None,
folders: list[str] | None = None, folders: list[str] | None = None,
password: str | None = None, password: str | None = None,
homeassistant: bool = False, homeassistant: bool = False,
compressed: bool = True, compressed: bool = True,
location: Mount location: LOCATION_TYPE | type[DEFAULT] = DEFAULT,
| Literal[LOCATION_CLOUD_BACKUP]
| type[DEFAULT]
| None = DEFAULT,
homeassistant_exclude_database: bool | None = None, homeassistant_exclude_database: bool | None = None,
extra: dict | None = None,
additional_locations: list[LOCATION_TYPE] | None = None,
) -> Backup | None: ) -> Backup | None:
"""Create a partial backup.""" """Create a partial backup."""
if self._get_base_path(location) in { if self._get_base_path(location) in {
@ -463,7 +544,7 @@ class BackupManager(FileConfiguration, JobGroup):
_LOGGER.error("Nothing to create backup for") _LOGGER.error("Nothing to create backup for")
backup = self._create_backup( backup = self._create_backup(
name, BackupType.PARTIAL, password, compressed, location name, BackupType.PARTIAL, password, compressed, location, extra
) )
_LOGGER.info("Creating new partial backup with slug %s", backup.slug) _LOGGER.info("Creating new partial backup with slug %s", backup.slug)
@ -476,7 +557,12 @@ class BackupManager(FileConfiguration, JobGroup):
_LOGGER.warning("Add-on %s not found/installed", addon_slug) _LOGGER.warning("Add-on %s not found/installed", addon_slug)
backup = await self._do_backup( backup = await self._do_backup(
backup, addon_list, folders, homeassistant, homeassistant_exclude_database backup,
addon_list,
folders,
homeassistant,
homeassistant_exclude_database,
additional_locations,
) )
if backup: if backup:
_LOGGER.info("Creating partial backup with slug %s completed", backup.slug) _LOGGER.info("Creating partial backup with slug %s completed", backup.slug)

View File

@ -16,6 +16,7 @@ from ..const import (
ATTR_DAYS_UNTIL_STALE, ATTR_DAYS_UNTIL_STALE,
ATTR_DOCKER, ATTR_DOCKER,
ATTR_EXCLUDE_DATABASE, ATTR_EXCLUDE_DATABASE,
ATTR_EXTRA,
ATTR_FOLDERS, ATTR_FOLDERS,
ATTR_HOMEASSISTANT, ATTR_HOMEASSISTANT,
ATTR_NAME, ATTR_NAME,
@ -132,6 +133,7 @@ SCHEMA_BACKUP = vol.Schema(
unique_addons, unique_addons,
), ),
vol.Optional(ATTR_REPOSITORIES, default=list): repositories, vol.Optional(ATTR_REPOSITORIES, default=list): repositories,
vol.Optional(ATTR_EXTRA, default=dict): dict,
}, },
extra=vol.ALLOW_EXTRA, extra=vol.ALLOW_EXTRA,
) )

View File

@ -173,6 +173,7 @@ ATTR_ENABLED = "enabled"
ATTR_ENVIRONMENT = "environment" ATTR_ENVIRONMENT = "environment"
ATTR_EVENT = "event" ATTR_EVENT = "event"
ATTR_EXCLUDE_DATABASE = "exclude_database" ATTR_EXCLUDE_DATABASE = "exclude_database"
ATTR_EXTRA = "extra"
ATTR_FEATURES = "features" ATTR_FEATURES = "features"
ATTR_FILENAME = "filename" ATTR_FILENAME = "filename"
ATTR_FLAGS = "flags" ATTR_FLAGS = "flags"

View File

@ -428,7 +428,6 @@ class DockerAddon(DockerInterface):
target=addon_mapping[MappingType.BACKUP].path target=addon_mapping[MappingType.BACKUP].path
or PATH_BACKUP.as_posix(), or PATH_BACKUP.as_posix(),
read_only=addon_mapping[MappingType.BACKUP].read_only, read_only=addon_mapping[MappingType.BACKUP].read_only,
propagation=PropagationMode.RSLAVE,
) )
) )

View File

@ -22,8 +22,6 @@ from .const import (
MOUNT_DEV, MOUNT_DEV,
MOUNT_MACHINE_ID, MOUNT_MACHINE_ID,
MOUNT_UDEV, MOUNT_UDEV,
PATH_BACKUP,
PATH_CLOUD_BACKUP,
PATH_MEDIA, PATH_MEDIA,
PATH_PUBLIC_CONFIG, PATH_PUBLIC_CONFIG,
PATH_SHARE, PATH_SHARE,
@ -132,19 +130,6 @@ class DockerHomeAssistant(DockerInterface):
read_only=False, read_only=False,
propagation=PropagationMode.RSLAVE.value, propagation=PropagationMode.RSLAVE.value,
), ),
Mount(
type=MountType.BIND,
source=self.sys_config.path_extern_backup.as_posix(),
target=PATH_BACKUP.as_posix(),
read_only=False,
propagation=PropagationMode.RSLAVE.value,
),
Mount(
type=MountType.BIND,
source=self.sys_config.path_extern_core_backup.as_posix(),
target=PATH_CLOUD_BACKUP.as_posix(),
read_only=False,
),
# Configuration audio # Configuration audio
Mount( Mount(
type=MountType.BIND, type=MountType.BIND,

View File

@ -645,6 +645,10 @@ class BackupMountDownError(BackupError):
"""Raise if mount specified for backup is down.""" """Raise if mount specified for backup is down."""
class BackupDataDiskBadMessageError(BackupError):
"""Raise if bad message error received from data disk during backup."""
class BackupJobError(BackupError, JobException): class BackupJobError(BackupError, JobException):
"""Raise on Backup job error.""" """Raise on Backup job error."""

View File

@ -32,7 +32,6 @@ class WSType(StrEnum):
SUPERVISOR_EVENT = "supervisor/event" SUPERVISOR_EVENT = "supervisor/event"
BACKUP_START = "backup/start" BACKUP_START = "backup/start"
BACKUP_END = "backup/end" BACKUP_END = "backup/end"
BACKUP_COMPLETE = "backup/supervisor/backup_complete"
class WSEvent(StrEnum): class WSEvent(StrEnum):

View File

@ -34,7 +34,6 @@ MIN_VERSION = {
WSType.SUPERVISOR_EVENT: "2021.2.4", WSType.SUPERVISOR_EVENT: "2021.2.4",
WSType.BACKUP_START: "2022.1.0", WSType.BACKUP_START: "2022.1.0",
WSType.BACKUP_END: "2022.1.0", WSType.BACKUP_END: "2022.1.0",
WSType.BACKUP_COMPLETE: "2025.11.99",
} }
_LOGGER: logging.Logger = logging.getLogger(__name__) _LOGGER: logging.Logger = logging.getLogger(__name__)

View File

@ -2,10 +2,11 @@
import asyncio import asyncio
from collections.abc import Awaitable from collections.abc import Awaitable
from datetime import timedelta from datetime import datetime, timedelta
import logging import logging
from ..addons.const import ADDON_UPDATE_CONDITIONS from ..addons.const import ADDON_UPDATE_CONDITIONS
from ..backups.const import LOCATION_CLOUD_BACKUP
from ..const import AddonState from ..const import AddonState
from ..coresys import CoreSysAttributes from ..coresys import CoreSysAttributes
from ..exceptions import AddonsError, HomeAssistantError, ObserverError from ..exceptions import AddonsError, HomeAssistantError, ObserverError
@ -42,8 +43,12 @@ RUN_WATCHDOG_HOMEASSISTANT_API = 120
RUN_WATCHDOG_ADDON_APPLICATON = 120 RUN_WATCHDOG_ADDON_APPLICATON = 120
RUN_WATCHDOG_OBSERVER_APPLICATION = 180 RUN_WATCHDOG_OBSERVER_APPLICATION = 180
RUN_CORE_BACKUP_CLEANUP = 86200
PLUGIN_AUTO_UPDATE_CONDITIONS = PLUGIN_UPDATE_CONDITIONS + [JobCondition.RUNNING] PLUGIN_AUTO_UPDATE_CONDITIONS = PLUGIN_UPDATE_CONDITIONS + [JobCondition.RUNNING]
OLD_BACKUP_THRESHOLD = timedelta(days=2)
class Tasks(CoreSysAttributes): class Tasks(CoreSysAttributes):
"""Handle Tasks inside Supervisor.""" """Handle Tasks inside Supervisor."""
@ -83,6 +88,11 @@ class Tasks(CoreSysAttributes):
self._watchdog_addon_application, RUN_WATCHDOG_ADDON_APPLICATON self._watchdog_addon_application, RUN_WATCHDOG_ADDON_APPLICATON
) )
# Cleanup
self.sys_scheduler.register_task(
self._core_backup_cleanup, RUN_CORE_BACKUP_CLEANUP
)
_LOGGER.info("All core tasks are scheduled") _LOGGER.info("All core tasks are scheduled")
@Job( @Job(
@ -343,3 +353,15 @@ class Tasks(CoreSysAttributes):
# If there's a new version of supervisor, start update immediately # If there's a new version of supervisor, start update immediately
if self.sys_supervisor.need_update: if self.sys_supervisor.need_update:
await self._update_supervisor() await self._update_supervisor()
@Job(name="tasks_core_backup_cleanup", conditions=[JobCondition.HEALTHY])
async def _core_backup_cleanup(self) -> None:
"""Core backup is intended for transient use, remove any old backups that got left behind."""
old_backups = [
backup
for backup in self.sys_backups.list_backups
if LOCATION_CLOUD_BACKUP in backup.all_locations
and datetime.fromisoformat(backup.date) < utcnow() - OLD_BACKUP_THRESHOLD
]
for backup in old_backups:
self.sys_backups.remove(backup, [LOCATION_CLOUD_BACKUP])

View File

@ -141,15 +141,6 @@ class MountManager(FileConfiguration, CoreSysAttributes):
] ]
) )
# Bind all backup mounts to directories in backup
if self.backup_mounts:
await asyncio.wait(
[
self.sys_create_task(self._bind_backup(mount))
for mount in self.backup_mounts
]
)
@Job(name="mount_manager_reload", conditions=[JobCondition.MOUNT_AVAILABLE]) @Job(name="mount_manager_reload", conditions=[JobCondition.MOUNT_AVAILABLE])
async def reload(self) -> None: async def reload(self) -> None:
"""Update mounts info via dbus and reload failed mounts.""" """Update mounts info via dbus and reload failed mounts."""
@ -215,8 +206,6 @@ class MountManager(FileConfiguration, CoreSysAttributes):
await self._bind_media(mount) await self._bind_media(mount)
elif mount.usage == MountUsage.SHARE: elif mount.usage == MountUsage.SHARE:
await self._bind_share(mount) await self._bind_share(mount)
elif mount.usage == MountUsage.BACKUP:
await self._bind_backup(mount)
@Job( @Job(
name="mount_manager_remove_mount", name="mount_manager_remove_mount",
@ -269,10 +258,6 @@ class MountManager(FileConfiguration, CoreSysAttributes):
if (bound_mount := self._bound_mounts.get(name)) and bound_mount.emergency: if (bound_mount := self._bound_mounts.get(name)) and bound_mount.emergency:
await self._bind_mount(bound_mount.mount, bound_mount.bind_mount.where) await self._bind_mount(bound_mount.mount, bound_mount.bind_mount.where)
async def _bind_backup(self, mount: Mount) -> None:
"""Bind a backup mount to backup directory."""
await self._bind_mount(mount, self.sys_config.path_extern_backup / mount.name)
async def _bind_media(self, mount: Mount) -> None: async def _bind_media(self, mount: Mount) -> None:
"""Bind a media mount to media directory.""" """Bind a media mount to media directory."""
await self._bind_mount(mount, self.sys_config.path_extern_media / mount.name) await self._bind_mount(mount, self.sys_config.path_extern_media / mount.name)

View File

@ -30,7 +30,7 @@ from ..dbus.const import (
UnitActiveState, UnitActiveState,
) )
from ..dbus.systemd import SystemdUnit from ..dbus.systemd import SystemdUnit
from ..docker.const import PATH_BACKUP, PATH_MEDIA, PATH_SHARE from ..docker.const import PATH_MEDIA, PATH_SHARE
from ..exceptions import ( from ..exceptions import (
DBusError, DBusError,
DBusSystemdNoSuchUnit, DBusSystemdNoSuchUnit,
@ -171,8 +171,6 @@ class Mount(CoreSysAttributes, ABC):
This returns none if it is not made available in managed containers. This returns none if it is not made available in managed containers.
""" """
match self.usage: match self.usage:
case MountUsage.BACKUP:
return PurePath(PATH_BACKUP, self.name)
case MountUsage.MEDIA: case MountUsage.MEDIA:
return PurePath(PATH_MEDIA, self.name) return PurePath(PATH_MEDIA, self.name)
case MountUsage.SHARE: case MountUsage.SHARE:

View File

@ -6,6 +6,7 @@ from shutil import copy
from typing import Any from typing import Any
from unittest.mock import ANY, AsyncMock, PropertyMock, patch from unittest.mock import ANY, AsyncMock, PropertyMock, patch
from aiohttp import MultipartWriter
from aiohttp.test_utils import TestClient from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion from awesomeversion import AwesomeVersion
import pytest import pytest
@ -499,65 +500,10 @@ async def test_reload(
assert backup.locations == [location] assert backup.locations == [location]
@pytest.mark.parametrize(
("folder", "location"), [("backup", None), ("core/backup", ".cloud_backup")]
)
async def test_partial_reload(
request: pytest.FixtureRequest,
api_client: TestClient,
coresys: CoreSys,
tmp_supervisor_data: Path,
folder: str,
location: str | None,
):
"""Test partial backups reload."""
assert not coresys.backups.list_backups
backup_file = get_fixture_path("backup_example.tar")
copy(backup_file, tmp_supervisor_data / folder)
resp = await api_client.post(
"/backups/reload", json={"location": location, "filename": "backup_example.tar"}
)
assert resp.status == 200
assert len(coresys.backups.list_backups) == 1
assert (backup := coresys.backups.get("7fed74c8"))
assert backup.location == location
assert backup.locations == [location]
async def test_invalid_reload(api_client: TestClient):
"""Test invalid reload."""
resp = await api_client.post("/backups/reload", json={"location": "no_filename"})
assert resp.status == 400
resp = await api_client.post(
"/backups/reload", json={"filename": "no_location.tar"}
)
assert resp.status == 400
resp = await api_client.post(
"/backups/reload", json={"location": None, "filename": "no/sub/paths.tar"}
)
assert resp.status == 400
resp = await api_client.post(
"/backups/reload", json={"location": None, "filename": "not_tar.tar.gz"}
)
assert resp.status == 400
@pytest.mark.usefixtures("install_addon_ssh") @pytest.mark.usefixtures("install_addon_ssh")
@pytest.mark.parametrize("api_client", TEST_ADDON_SLUG, indirect=True) @pytest.mark.parametrize("api_client", [TEST_ADDON_SLUG], indirect=True)
async def test_cloud_backup_core_only(api_client: TestClient, mock_full_backup: Backup): async def test_cloud_backup_core_only(api_client: TestClient, mock_full_backup: Backup):
"""Test only core can access cloud backup location.""" """Test only core can access cloud backup location."""
resp = await api_client.post(
"/backups/reload",
json={"location": ".cloud_backup", "filename": "caller_not_core.tar"},
)
assert resp.status == 403
resp = await api_client.post( resp = await api_client.post(
"/backups/new/full", "/backups/new/full",
json={ json={
@ -589,14 +535,132 @@ async def test_cloud_backup_core_only(api_client: TestClient, mock_full_backup:
resp = await api_client.delete(f"/backups/{mock_full_backup.slug}") resp = await api_client.delete(f"/backups/{mock_full_backup.slug}")
assert resp.status == 403 assert resp.status == 403
resp = await api_client.get(f"/backups/{mock_full_backup.slug}/download")
assert resp.status == 403
async def test_partial_reload_errors_no_file(
async def test_upload_download(
api_client: TestClient, coresys: CoreSys, tmp_supervisor_data: Path
):
"""Test upload and download of a backup."""
# Capture our backup initially
backup_file = get_fixture_path("backup_example.tar")
backup = Backup(coresys, backup_file, "in", None)
await backup.load()
# Upload it and confirm it matches what we had
with backup_file.open("rb") as file, MultipartWriter("form-data") as mp:
mp.append(file)
resp = await api_client.post("/backups/new/upload", data=mp)
assert resp.status == 200
body = await resp.json()
assert body["data"]["slug"] == "7fed74c8"
assert backup == coresys.backups.get("7fed74c8")
# Download it and confirm it against the original again
resp = await api_client.get("/backups/7fed74c8/download")
assert resp.status == 200
out_file = tmp_supervisor_data / "backup_example.tar"
with out_file.open("wb") as out:
out.write(await resp.read())
out_backup = Backup(coresys, out_file, "out", None)
await out_backup.load()
assert backup == out_backup
@pytest.mark.usefixtures("path_extern")
@pytest.mark.parametrize(
("backup_type", "inputs"), [("full", {}), ("partial", {"folders": ["ssl"]})]
)
async def test_backup_to_multiple_locations(
api_client: TestClient,
coresys: CoreSys,
tmp_supervisor_data: Path,
backup_type: str,
inputs: dict[str, Any],
):
"""Test making a backup to multiple locations."""
coresys.core.state = CoreState.RUNNING
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
resp = await api_client.post(
f"/backups/new/{backup_type}",
json={"name": "Multiple locations test", "location": [None, ".cloud_backup"]}
| inputs,
)
assert resp.status == 200
result = await resp.json()
assert result["result"] == "ok"
slug = result["data"]["slug"]
orig_backup = coresys.config.path_backup / f"{slug}.tar"
copy_backup = coresys.config.path_core_backup / f"{slug}.tar"
assert orig_backup.exists()
assert copy_backup.exists()
assert coresys.backups.get(slug).all_locations == {
None: orig_backup,
".cloud_backup": copy_backup,
}
assert coresys.backups.get(slug).location is None
@pytest.mark.parametrize(
("backup_type", "inputs"), [("full", {}), ("partial", {"folders": ["ssl"]})]
)
async def test_backup_with_extras(
api_client: TestClient,
coresys: CoreSys,
tmp_supervisor_data: Path,
backup_type: str,
inputs: dict[str, Any],
):
"""Test backup including extra metdata."""
coresys.core.state = CoreState.RUNNING
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
resp = await api_client.post(
f"/backups/new/{backup_type}",
json={"name": "Extras test", "extra": {"user": "test", "scheduled": True}}
| inputs,
)
assert resp.status == 200
result = await resp.json()
assert result["result"] == "ok"
slug = result["data"]["slug"]
resp = await api_client.get(f"/backups/{slug}/info")
assert resp.status == 200
result = await resp.json()
assert result["result"] == "ok"
slug = result["data"]["extra"] == {"user": "test", "scheduled": True}
async def test_upload_to_multiple_locations(
api_client: TestClient, api_client: TestClient,
coresys: CoreSys, coresys: CoreSys,
tmp_supervisor_data: Path, tmp_supervisor_data: Path,
): ):
"""Partial reload returns error when asked to reload non-existent file.""" """Test uploading a backup to multiple locations."""
resp = await api_client.post( backup_file = get_fixture_path("backup_example.tar")
"/backups/reload", json={"location": None, "filename": "does_not_exist.tar"}
) with backup_file.open("rb") as file, MultipartWriter("form-data") as mp:
assert resp.status == 400 mp.append(file)
resp = await api_client.post(
"/backups/new/upload?location=&location=.cloud_backup", data=mp
)
assert resp.status == 200
body = await resp.json()
assert body["data"]["slug"] == "7fed74c8"
orig_backup = coresys.config.path_backup / "7fed74c8.tar"
copy_backup = coresys.config.path_core_backup / "7fed74c8.tar"
assert orig_backup.exists()
assert copy_backup.exists()
assert coresys.backups.get("7fed74c8").all_locations == {
None: orig_backup,
".cloud_backup": copy_backup,
}
assert coresys.backups.get("7fed74c8").location is None

View File

@ -81,7 +81,7 @@ async def test_api_create_mount(
"share": "backups", "share": "backups",
"state": "active", "state": "active",
"read_only": False, "read_only": False,
"user_path": "/backup/backup_test", "user_path": None,
} }
] ]
coresys.mounts.save_data.assert_called_once() coresys.mounts.save_data.assert_called_once()
@ -258,7 +258,7 @@ async def test_api_update_mount(
"share": "new_backups", "share": "new_backups",
"state": "active", "state": "active",
"read_only": False, "read_only": False,
"user_path": "/backup/backup_test", "user_path": None,
} }
] ]
coresys.mounts.save_data.assert_called_once() coresys.mounts.save_data.assert_called_once()
@ -294,9 +294,8 @@ async def test_api_update_dbus_error_mount_remains(
"""Test mount remains in list with unsuccessful state if dbus error occurs during update.""" """Test mount remains in list with unsuccessful state if dbus error occurs during update."""
systemd_service: SystemdService = all_dbus_services["systemd"] systemd_service: SystemdService = all_dbus_services["systemd"]
systemd_unit_service: SystemdUnitService = all_dbus_services["systemd_unit"] systemd_unit_service: SystemdUnitService = all_dbus_services["systemd_unit"]
systemd_unit_service.active_state = ["failed", "inactive", "failed", "inactive"] systemd_unit_service.active_state = ["failed", "inactive"]
systemd_service.response_get_unit = [ systemd_service.response_get_unit = [
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount", "/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
DBusError("org.freedesktop.systemd1.NoSuchUnit", "error"), DBusError("org.freedesktop.systemd1.NoSuchUnit", "error"),
] ]
@ -328,7 +327,7 @@ async def test_api_update_dbus_error_mount_remains(
"share": "backups", "share": "backups",
"state": None, "state": None,
"read_only": False, "read_only": False,
"user_path": "/backup/backup_test", "user_path": None,
} }
] ]
@ -376,7 +375,7 @@ async def test_api_update_dbus_error_mount_remains(
"share": "backups", "share": "backups",
"state": None, "state": None,
"read_only": False, "read_only": False,
"user_path": "/backup/backup_test", "user_path": None,
} }
] ]

View File

@ -4,8 +4,10 @@ from unittest.mock import AsyncMock, MagicMock, patch
import pytest import pytest
from supervisor.backups.const import BackupType from supervisor.backups.const import LOCATION_CLOUD_BACKUP, LOCATION_TYPE, BackupType
from supervisor.backups.validate import ALL_FOLDERS from supervisor.backups.validate import ALL_FOLDERS
from supervisor.coresys import CoreSys
from supervisor.mounts.mount import Mount
from tests.const import TEST_ADDON_SLUG from tests.const import TEST_ADDON_SLUG
@ -50,3 +52,34 @@ def full_backup_mock(backup_mock):
backup_instance.addon_list = [TEST_ADDON_SLUG] backup_instance.addon_list = [TEST_ADDON_SLUG]
backup_instance.supervisor_version = "99.9.9dev" backup_instance.supervisor_version = "99.9.9dev"
yield backup_mock yield backup_mock
@pytest.fixture(name="backup_locations")
async def fixture_backup_locations(
request: pytest.FixtureRequest, coresys: CoreSys, mount_propagation, mock_is_mount
) -> list[LOCATION_TYPE]:
"""Return a list of prcoessed backup locations."""
locations: list[LOCATION_TYPE] = []
loaded = False
for location in request.param:
if location in {None, LOCATION_CLOUD_BACKUP}:
locations.append(location)
else:
if not loaded:
await coresys.mounts.load()
await coresys.mounts.create_mount(
Mount.from_dict(
coresys,
{
"name": location,
"usage": "backup",
"type": "cifs",
"server": "test.local",
"share": "test",
},
)
)
locations.append(coresys.mounts.get(location))
return locations

View File

@ -15,7 +15,7 @@ from supervisor.addons.addon import Addon
from supervisor.addons.const import AddonBackupMode from supervisor.addons.const import AddonBackupMode
from supervisor.addons.model import AddonModel from supervisor.addons.model import AddonModel
from supervisor.backups.backup import Backup from supervisor.backups.backup import Backup
from supervisor.backups.const import BackupType from supervisor.backups.const import LOCATION_TYPE, BackupType
from supervisor.backups.manager import BackupManager from supervisor.backups.manager import BackupManager
from supervisor.const import FOLDER_HOMEASSISTANT, FOLDER_SHARE, AddonState, CoreState from supervisor.const import FOLDER_HOMEASSISTANT, FOLDER_SHARE, AddonState, CoreState
from supervisor.coresys import CoreSys from supervisor.coresys import CoreSys
@ -34,7 +34,6 @@ from supervisor.homeassistant.api import HomeAssistantAPI
from supervisor.homeassistant.const import WSType from supervisor.homeassistant.const import WSType
from supervisor.homeassistant.core import HomeAssistantCore from supervisor.homeassistant.core import HomeAssistantCore
from supervisor.homeassistant.module import HomeAssistant from supervisor.homeassistant.module import HomeAssistant
from supervisor.jobs import JobSchedulerOptions
from supervisor.jobs.const import JobCondition from supervisor.jobs.const import JobCondition
from supervisor.mounts.mount import Mount from supervisor.mounts.mount import Mount
from supervisor.utils.json import read_json_file, write_json_file from supervisor.utils.json import read_json_file, write_json_file
@ -1718,29 +1717,35 @@ async def test_skip_homeassistant_database(
assert not test_db_shm.exists() assert not test_db_shm.exists()
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
@pytest.mark.parametrize( @pytest.mark.parametrize(
"tar_parent,healthy_expected", ("backup_locations", "location_name", "healthy_expected"),
[ [
(Path("/data/mounts/test"), True), (["test"], "test", True),
(Path("/data/backup"), False), ([None], None, False),
], ],
indirect=["backup_locations"],
) )
def test_backup_remove_error( async def test_backup_remove_error(
coresys: CoreSys, coresys: CoreSys,
full_backup_mock: Backup, backup_locations: list[LOCATION_TYPE],
tar_parent: Path, location_name: str | None,
healthy_expected: bool, healthy_expected: bool,
): ):
"""Test removing a backup error.""" """Test removing a backup error."""
full_backup_mock.tarfile.unlink.side_effect = (err := OSError()) copy(get_fixture_path("backup_example.tar"), coresys.config.path_backup)
full_backup_mock.tarfile.parent = tar_parent await coresys.backups.reload(location=None, filename="backup_example.tar")
assert (backup := coresys.backups.get("7fed74c8"))
backup.all_locations[location_name] = (tar_mock := MagicMock())
tar_mock.unlink.side_effect = (err := OSError())
err.errno = errno.EBUSY err.errno = errno.EBUSY
assert coresys.backups.remove(full_backup_mock) is False assert coresys.backups.remove(backup) is False
assert coresys.core.healthy is True assert coresys.core.healthy is True
err.errno = errno.EBADMSG err.errno = errno.EBADMSG
assert coresys.backups.remove(full_backup_mock) is False assert coresys.backups.remove(backup) is False
assert coresys.core.healthy is healthy_expected assert coresys.core.healthy is healthy_expected
@ -1900,7 +1905,7 @@ async def test_reload_multiple_locations(coresys: CoreSys, tmp_supervisor_data:
assert (backup := coresys.backups.get("7fed74c8")) assert (backup := coresys.backups.get("7fed74c8"))
assert backup.location == ".cloud_backup" assert backup.location == ".cloud_backup"
assert backup.locations == [".cloud_backup"] assert backup.locations == [".cloud_backup"]
assert backup.all_locations == {".cloud_backup"} assert backup.all_locations.keys() == {".cloud_backup"}
copy(backup_file, tmp_supervisor_data / "backup") copy(backup_file, tmp_supervisor_data / "backup")
await coresys.backups.reload() await coresys.backups.reload()
@ -1909,7 +1914,7 @@ async def test_reload_multiple_locations(coresys: CoreSys, tmp_supervisor_data:
assert (backup := coresys.backups.get("7fed74c8")) assert (backup := coresys.backups.get("7fed74c8"))
assert backup.location is None assert backup.location is None
assert backup.locations == [None] assert backup.locations == [None]
assert backup.all_locations == {".cloud_backup", None} assert backup.all_locations.keys() == {".cloud_backup", None}
copy(backup_file, mount_dir) copy(backup_file, mount_dir)
await coresys.backups.reload() await coresys.backups.reload()
@ -1919,7 +1924,7 @@ async def test_reload_multiple_locations(coresys: CoreSys, tmp_supervisor_data:
assert backup.location in {None, "backup_test"} assert backup.location in {None, "backup_test"}
assert None in backup.locations assert None in backup.locations
assert "backup_test" in backup.locations assert "backup_test" in backup.locations
assert backup.all_locations == {".cloud_backup", None, "backup_test"} assert backup.all_locations.keys() == {".cloud_backup", None, "backup_test"}
@pytest.mark.usefixtures("mount_propagation", "mock_is_mount", "path_extern") @pytest.mark.usefixtures("mount_propagation", "mock_is_mount", "path_extern")
@ -1951,7 +1956,7 @@ async def test_partial_reload_multiple_locations(
assert (backup := coresys.backups.get("7fed74c8")) assert (backup := coresys.backups.get("7fed74c8"))
assert backup.location == ".cloud_backup" assert backup.location == ".cloud_backup"
assert backup.locations == [".cloud_backup"] assert backup.locations == [".cloud_backup"]
assert backup.all_locations == {".cloud_backup"} assert backup.all_locations.keys() == {".cloud_backup"}
copy(backup_file, tmp_supervisor_data / "backup") copy(backup_file, tmp_supervisor_data / "backup")
await coresys.backups.reload(location=None, filename="backup_example.tar") await coresys.backups.reload(location=None, filename="backup_example.tar")
@ -1960,7 +1965,7 @@ async def test_partial_reload_multiple_locations(
assert (backup := coresys.backups.get("7fed74c8")) assert (backup := coresys.backups.get("7fed74c8"))
assert backup.location is None assert backup.location is None
assert backup.locations == [None] assert backup.locations == [None]
assert backup.all_locations == {".cloud_backup", None} assert backup.all_locations.keys() == {".cloud_backup", None}
copy(backup_file, mount_dir) copy(backup_file, mount_dir)
await coresys.backups.reload(location=mount, filename="backup_example.tar") await coresys.backups.reload(location=mount, filename="backup_example.tar")
@ -1968,66 +1973,42 @@ async def test_partial_reload_multiple_locations(
assert coresys.backups.list_backups assert coresys.backups.list_backups
assert (backup := coresys.backups.get("7fed74c8")) assert (backup := coresys.backups.get("7fed74c8"))
assert backup.location is None assert backup.location is None
assert None in backup.locations assert backup.locations == [None, "backup_test"]
assert "backup_test" in backup.locations assert backup.all_locations.keys() == {".cloud_backup", None, "backup_test"}
assert backup.all_locations == {".cloud_backup", None, "backup_test"}
@pytest.mark.parametrize( async def test_backup_remove_multiple_locations(
("location", "folder"), [(None, "backup"), (".cloud_backup", "cloud_backup")] coresys: CoreSys, tmp_supervisor_data: Path
)
@pytest.mark.usefixtures("tmp_supervisor_data")
async def test_partial_backup_complete_ws_message(
coresys: CoreSys, ha_ws_client: AsyncMock, location: str | None, folder: str
): ):
"""Test WS message notifies core when a partial backup is complete.""" """Test removing a backup that exists in multiple locations."""
coresys.core.state = CoreState.RUNNING backup_file = get_fixture_path("backup_example.tar")
coresys.hardware.disk.get_disk_free_space = lambda x: 5000 location_1 = Path(copy(backup_file, coresys.config.path_backup))
ha_ws_client.ha_version = AwesomeVersion("2025.12.0") location_2 = Path(copy(backup_file, coresys.config.path_core_backup))
# Test a partial backup await coresys.backups.reload()
job, backup_task = coresys.jobs.schedule_job( assert (backup := coresys.backups.get("7fed74c8"))
coresys.backups.do_backup_partial, assert backup.all_locations == {None: location_1, ".cloud_backup": location_2}
JobSchedulerOptions(),
"test",
folders=["media"],
location=location,
)
backup: Backup = await backup_task
assert ha_ws_client.async_send_command.call_args_list[-3].args[0] == { coresys.backups.remove(backup)
"type": "backup/supervisor/backup_complete", assert not location_1.exists()
"data": { assert not location_2.exists()
"job_id": job.uuid, assert not coresys.backups.get("7fed74c8")
"slug": backup.slug,
"path": f"/{folder}/{backup.slug}.tar",
},
}
@pytest.mark.parametrize( async def test_backup_remove_one_location_of_multiple(
("location", "folder"), [(None, "backup"), (".cloud_backup", "cloud_backup")] coresys: CoreSys, tmp_supervisor_data: Path
)
@pytest.mark.usefixtures("tmp_supervisor_data")
async def test_full_backup_complete_ws_message(
coresys: CoreSys, ha_ws_client: AsyncMock, location: str | None, folder: str
): ):
"""Test WS message notifies core when a full backup is complete.""" """Test removing a backup that exists in multiple locations from one location."""
coresys.core.state = CoreState.RUNNING backup_file = get_fixture_path("backup_example.tar")
coresys.hardware.disk.get_disk_free_space = lambda x: 5000 location_1 = Path(copy(backup_file, coresys.config.path_backup))
ha_ws_client.ha_version = AwesomeVersion("2025.12.0") location_2 = Path(copy(backup_file, coresys.config.path_core_backup))
# Test a full backup await coresys.backups.reload()
job, backup_task = coresys.jobs.schedule_job( assert (backup := coresys.backups.get("7fed74c8"))
coresys.backups.do_backup_full, JobSchedulerOptions(), "test", location=location assert backup.all_locations == {None: location_1, ".cloud_backup": location_2}
)
backup: Backup = await backup_task
assert ha_ws_client.async_send_command.call_args_list[-3].args[0] == { coresys.backups.remove(backup, locations=[".cloud_backup"])
"type": "backup/supervisor/backup_complete", assert location_1.exists()
"data": { assert not location_2.exists()
"job_id": job.uuid, assert coresys.backups.get("7fed74c8")
"slug": backup.slug, assert backup.all_locations == {None: location_1}
"path": f"/{folder}/{backup.slug}.tar",
},
}

View File

@ -77,19 +77,6 @@ async def test_homeassistant_start(
read_only=False, read_only=False,
propagation="rslave", propagation="rslave",
), ),
Mount(
type="bind",
source=coresys.config.path_extern_backup.as_posix(),
target="/backup",
read_only=False,
propagation="rslave",
),
Mount(
type="bind",
source=coresys.config.path_extern_core_backup.as_posix(),
target="/cloud_backup",
read_only=False,
),
Mount( Mount(
type="bind", type="bind",
source=coresys.homeassistant.path_extern_pulse.as_posix(), source=coresys.homeassistant.path_extern_pulse.as_posix(),

View File

@ -2,6 +2,8 @@
from collections.abc import AsyncGenerator from collections.abc import AsyncGenerator
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from pathlib import Path
from shutil import copy
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
from awesomeversion import AwesomeVersion from awesomeversion import AwesomeVersion
@ -16,7 +18,7 @@ from supervisor.homeassistant.core import HomeAssistantCore
from supervisor.misc.tasks import Tasks from supervisor.misc.tasks import Tasks
from supervisor.supervisor import Supervisor from supervisor.supervisor import Supervisor
from tests.common import load_fixture from tests.common import get_fixture_path, load_fixture
# pylint: disable=protected-access # pylint: disable=protected-access
@ -208,3 +210,32 @@ async def test_reload_updater_triggers_supervisor_update(
version_resp.read.return_value = version_data.replace("2024.10.0", "2024.10.1") version_resp.read.return_value = version_data.replace("2024.10.0", "2024.10.1")
await tasks._reload_updater() await tasks._reload_updater()
update.assert_called_once() update.assert_called_once()
@pytest.mark.usefixtures("path_extern")
async def test_core_backup_cleanup(
tasks: Tasks, coresys: CoreSys, tmp_supervisor_data: Path
):
"""Test core backup task cleans up old backup files."""
coresys.core.state = CoreState.RUNNING
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
# Put an old and new backup in folder
copy(get_fixture_path("backup_example.tar"), coresys.config.path_core_backup)
await coresys.backups.reload(
location=".cloud_backup", filename="backup_example.tar"
)
assert (old_backup := coresys.backups.get("7fed74c8"))
new_backup = await coresys.backups.do_backup_partial(
name="test", folders=["ssl"], location=".cloud_backup"
)
old_tar = old_backup.tarfile
new_tar = new_backup.tarfile
# pylint: disable-next=protected-access
await tasks._core_backup_cleanup()
assert coresys.backups.get(new_backup.slug)
assert not coresys.backups.get("7fed74c8")
assert new_tar.exists()
assert not old_tar.exists()

View File

@ -46,14 +46,13 @@ async def test_fixup(
suggestions=[SuggestionType.EXECUTE_RELOAD, SuggestionType.EXECUTE_REMOVE], suggestions=[SuggestionType.EXECUTE_RELOAD, SuggestionType.EXECUTE_REMOVE],
) )
systemd_unit_service.active_state = ["active", "inactive", "active", "inactive"] systemd_unit_service.active_state = ["active", "inactive"]
await mount_execute_remove() await mount_execute_remove()
assert coresys.resolution.issues == [] assert coresys.resolution.issues == []
assert coresys.resolution.suggestions == [] assert coresys.resolution.suggestions == []
assert coresys.mounts.mounts == [] assert coresys.mounts.mounts == []
assert systemd_service.StopUnit.calls == [ assert systemd_service.StopUnit.calls == [
("mnt-data-supervisor-backup-test.mount", "fail"), ("mnt-data-supervisor-mounts-test.mount", "fail")
("mnt-data-supervisor-mounts-test.mount", "fail"),
] ]
coresys.mounts.save_data.assert_called_once() coresys.mounts.save_data.assert_called_once()