mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-07-09 02:06:30 +00:00
Add support for cloud backups in Core (#5438)
* Add support for cloud backups in Core * Test cases and small fixes identified * Add test for partial reload no file failure
This commit is contained in:
parent
a45d507bee
commit
5519f6a53b
@ -47,7 +47,7 @@ from ..const import (
|
||||
ATTR_JOURNALD,
|
||||
ATTR_KERNEL_MODULES,
|
||||
ATTR_LEGACY,
|
||||
ATTR_LOCATON,
|
||||
ATTR_LOCATION,
|
||||
ATTR_MACHINE,
|
||||
ATTR_MAP,
|
||||
ATTR_NAME,
|
||||
@ -581,7 +581,7 @@ class AddonModel(JobGroup, ABC):
|
||||
@property
|
||||
def path_location(self) -> Path:
|
||||
"""Return path to this add-on."""
|
||||
return Path(self.data[ATTR_LOCATON])
|
||||
return Path(self.data[ATTR_LOCATION])
|
||||
|
||||
@property
|
||||
def path_icon(self) -> Path:
|
||||
|
@ -55,7 +55,7 @@ from ..const import (
|
||||
ATTR_KERNEL_MODULES,
|
||||
ATTR_LABELS,
|
||||
ATTR_LEGACY,
|
||||
ATTR_LOCATON,
|
||||
ATTR_LOCATION,
|
||||
ATTR_MACHINE,
|
||||
ATTR_MAP,
|
||||
ATTR_NAME,
|
||||
@ -483,7 +483,7 @@ SCHEMA_ADDON_SYSTEM = vol.All(
|
||||
_migrate_addon_config(),
|
||||
_SCHEMA_ADDON_CONFIG.extend(
|
||||
{
|
||||
vol.Required(ATTR_LOCATON): str,
|
||||
vol.Required(ATTR_LOCATION): str,
|
||||
vol.Required(ATTR_REPOSITORY): str,
|
||||
vol.Required(ATTR_TRANSLATIONS, default=dict): {
|
||||
str: SCHEMA_ADDON_TRANSLATIONS
|
||||
|
@ -14,6 +14,7 @@ from aiohttp.hdrs import CONTENT_DISPOSITION
|
||||
import voluptuous as vol
|
||||
|
||||
from ..backups.backup import Backup
|
||||
from ..backups.const import LOCATION_CLOUD_BACKUP
|
||||
from ..backups.validate import ALL_FOLDERS, FOLDER_HOMEASSISTANT, days_until_stale
|
||||
from ..const import (
|
||||
ATTR_ADDONS,
|
||||
@ -22,10 +23,12 @@ from ..const import (
|
||||
ATTR_CONTENT,
|
||||
ATTR_DATE,
|
||||
ATTR_DAYS_UNTIL_STALE,
|
||||
ATTR_FILENAME,
|
||||
ATTR_FOLDERS,
|
||||
ATTR_HOMEASSISTANT,
|
||||
ATTR_HOMEASSISTANT_EXCLUDE_DATABASE,
|
||||
ATTR_LOCATON,
|
||||
ATTR_JOB_ID,
|
||||
ATTR_LOCATION,
|
||||
ATTR_NAME,
|
||||
ATTR_PASSWORD,
|
||||
ATTR_PROTECTED,
|
||||
@ -36,20 +39,22 @@ from ..const import (
|
||||
ATTR_TIMEOUT,
|
||||
ATTR_TYPE,
|
||||
ATTR_VERSION,
|
||||
REQUEST_FROM,
|
||||
BusEvent,
|
||||
CoreState,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIError
|
||||
from ..exceptions import APIError, APIForbidden
|
||||
from ..jobs import JobSchedulerOptions
|
||||
from ..mounts.const import MountUsage
|
||||
from ..resolution.const import UnhealthyReason
|
||||
from .const import ATTR_BACKGROUND, ATTR_JOB_ID, CONTENT_TYPE_TAR
|
||||
from .const import ATTR_BACKGROUND, ATTR_LOCATIONS, CONTENT_TYPE_TAR
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
RE_SLUGIFY_NAME = re.compile(r"[^A-Za-z0-9]+")
|
||||
RE_BACKUP_FILENAME = re.compile(r"^[^\\\/]+\.tar$")
|
||||
|
||||
# Backwards compatible
|
||||
# Remove: 2022.08
|
||||
@ -76,7 +81,7 @@ SCHEMA_BACKUP_FULL = vol.Schema(
|
||||
vol.Optional(ATTR_NAME): str,
|
||||
vol.Optional(ATTR_PASSWORD): vol.Maybe(str),
|
||||
vol.Optional(ATTR_COMPRESSED): vol.Maybe(vol.Boolean()),
|
||||
vol.Optional(ATTR_LOCATON): vol.Maybe(str),
|
||||
vol.Optional(ATTR_LOCATION): vol.Maybe(str),
|
||||
vol.Optional(ATTR_HOMEASSISTANT_EXCLUDE_DATABASE): vol.Boolean(),
|
||||
vol.Optional(ATTR_BACKGROUND, default=False): vol.Boolean(),
|
||||
}
|
||||
@ -101,6 +106,12 @@ SCHEMA_FREEZE = vol.Schema(
|
||||
vol.Optional(ATTR_TIMEOUT): vol.All(int, vol.Range(min=1)),
|
||||
}
|
||||
)
|
||||
SCHEMA_RELOAD = vol.Schema(
|
||||
{
|
||||
vol.Inclusive(ATTR_LOCATION, "file"): vol.Maybe(str),
|
||||
vol.Inclusive(ATTR_FILENAME, "file"): vol.Match(RE_BACKUP_FILENAME),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class APIBackups(CoreSysAttributes):
|
||||
@ -122,7 +133,8 @@ class APIBackups(CoreSysAttributes):
|
||||
ATTR_DATE: backup.date,
|
||||
ATTR_TYPE: backup.sys_type,
|
||||
ATTR_SIZE: backup.size,
|
||||
ATTR_LOCATON: backup.location,
|
||||
ATTR_LOCATION: backup.location,
|
||||
ATTR_LOCATIONS: backup.locations,
|
||||
ATTR_PROTECTED: backup.protected,
|
||||
ATTR_COMPRESSED: backup.compressed,
|
||||
ATTR_CONTENT: {
|
||||
@ -132,6 +144,7 @@ class APIBackups(CoreSysAttributes):
|
||||
},
|
||||
}
|
||||
for backup in self.sys_backups.list_backups
|
||||
if backup.location != LOCATION_CLOUD_BACKUP
|
||||
]
|
||||
|
||||
@api_process
|
||||
@ -164,10 +177,13 @@ class APIBackups(CoreSysAttributes):
|
||||
self.sys_backups.save_data()
|
||||
|
||||
@api_process
|
||||
async def reload(self, _):
|
||||
async def reload(self, request: web.Request):
|
||||
"""Reload backup list."""
|
||||
await asyncio.shield(self.sys_backups.reload())
|
||||
return True
|
||||
body = await api_validate(SCHEMA_RELOAD, request)
|
||||
self._validate_cloud_backup_location(request, body.get(ATTR_LOCATION))
|
||||
backup = self._location_to_mount(body)
|
||||
|
||||
return await asyncio.shield(self.sys_backups.reload(**backup))
|
||||
|
||||
@api_process
|
||||
async def backup_info(self, request):
|
||||
@ -195,7 +211,8 @@ class APIBackups(CoreSysAttributes):
|
||||
ATTR_PROTECTED: backup.protected,
|
||||
ATTR_SUPERVISOR_VERSION: backup.supervisor_version,
|
||||
ATTR_HOMEASSISTANT: backup.homeassistant_version,
|
||||
ATTR_LOCATON: backup.location,
|
||||
ATTR_LOCATION: backup.location,
|
||||
ATTR_LOCATIONS: backup.locations,
|
||||
ATTR_ADDONS: data_addons,
|
||||
ATTR_REPOSITORIES: backup.repositories,
|
||||
ATTR_FOLDERS: backup.folders,
|
||||
@ -204,17 +221,29 @@ class APIBackups(CoreSysAttributes):
|
||||
|
||||
def _location_to_mount(self, body: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Change location field to mount if necessary."""
|
||||
if not body.get(ATTR_LOCATON):
|
||||
if not body.get(ATTR_LOCATION) or body[ATTR_LOCATION] == LOCATION_CLOUD_BACKUP:
|
||||
return body
|
||||
|
||||
body[ATTR_LOCATON] = self.sys_mounts.get(body[ATTR_LOCATON])
|
||||
if body[ATTR_LOCATON].usage != MountUsage.BACKUP:
|
||||
body[ATTR_LOCATION] = self.sys_mounts.get(body[ATTR_LOCATION])
|
||||
if body[ATTR_LOCATION].usage != MountUsage.BACKUP:
|
||||
raise APIError(
|
||||
f"Mount {body[ATTR_LOCATON].name} is not used for backups, cannot backup to there"
|
||||
f"Mount {body[ATTR_LOCATION].name} is not used for backups, cannot backup to there"
|
||||
)
|
||||
|
||||
return body
|
||||
|
||||
def _validate_cloud_backup_location(
|
||||
self, request: web.Request, location: str | None
|
||||
) -> None:
|
||||
"""Cloud backup location is only available to Home Assistant."""
|
||||
if (
|
||||
location == LOCATION_CLOUD_BACKUP
|
||||
and request.get(REQUEST_FROM) != self.sys_homeassistant
|
||||
):
|
||||
raise APIForbidden(
|
||||
f"Location {LOCATION_CLOUD_BACKUP} is only available for Home Assistant"
|
||||
)
|
||||
|
||||
async def _background_backup_task(
|
||||
self, backup_method: Callable, *args, **kwargs
|
||||
) -> tuple[asyncio.Task, str]:
|
||||
@ -246,9 +275,10 @@ class APIBackups(CoreSysAttributes):
|
||||
self.sys_bus.remove_listener(listener)
|
||||
|
||||
@api_process
|
||||
async def backup_full(self, request):
|
||||
async def backup_full(self, request: web.Request):
|
||||
"""Create full backup."""
|
||||
body = await api_validate(SCHEMA_BACKUP_FULL, request)
|
||||
self._validate_cloud_backup_location(request, body.get(ATTR_LOCATION))
|
||||
background = body.pop(ATTR_BACKGROUND)
|
||||
backup_task, job_id = await self._background_backup_task(
|
||||
self.sys_backups.do_backup_full, **self._location_to_mount(body)
|
||||
@ -266,9 +296,10 @@ class APIBackups(CoreSysAttributes):
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def backup_partial(self, request):
|
||||
async def backup_partial(self, request: web.Request):
|
||||
"""Create a partial backup."""
|
||||
body = await api_validate(SCHEMA_BACKUP_PARTIAL, request)
|
||||
self._validate_cloud_backup_location(request, body.get(ATTR_LOCATION))
|
||||
background = body.pop(ATTR_BACKGROUND)
|
||||
backup_task, job_id = await self._background_backup_task(
|
||||
self.sys_backups.do_backup_partial, **self._location_to_mount(body)
|
||||
@ -286,9 +317,10 @@ class APIBackups(CoreSysAttributes):
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def restore_full(self, request):
|
||||
async def restore_full(self, request: web.Request):
|
||||
"""Full restore of a backup."""
|
||||
backup = self._extract_slug(request)
|
||||
self._validate_cloud_backup_location(request, backup.location)
|
||||
body = await api_validate(SCHEMA_RESTORE_FULL, request)
|
||||
background = body.pop(ATTR_BACKGROUND)
|
||||
restore_task, job_id = await self._background_backup_task(
|
||||
@ -303,9 +335,10 @@ class APIBackups(CoreSysAttributes):
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def restore_partial(self, request):
|
||||
async def restore_partial(self, request: web.Request):
|
||||
"""Partial restore a backup."""
|
||||
backup = self._extract_slug(request)
|
||||
self._validate_cloud_backup_location(request, backup.location)
|
||||
body = await api_validate(SCHEMA_RESTORE_PARTIAL, request)
|
||||
background = body.pop(ATTR_BACKGROUND)
|
||||
restore_task, job_id = await self._background_backup_task(
|
||||
@ -320,23 +353,24 @@ class APIBackups(CoreSysAttributes):
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def freeze(self, request):
|
||||
async def freeze(self, request: web.Request):
|
||||
"""Initiate manual freeze for external backup."""
|
||||
body = await api_validate(SCHEMA_FREEZE, request)
|
||||
await asyncio.shield(self.sys_backups.freeze_all(**body))
|
||||
|
||||
@api_process
|
||||
async def thaw(self, request):
|
||||
async def thaw(self, request: web.Request):
|
||||
"""Begin thaw after manual freeze."""
|
||||
await self.sys_backups.thaw_all()
|
||||
|
||||
@api_process
|
||||
async def remove(self, request):
|
||||
async def remove(self, request: web.Request):
|
||||
"""Remove a backup."""
|
||||
backup = self._extract_slug(request)
|
||||
self._validate_cloud_backup_location(request, backup.location)
|
||||
return self.sys_backups.remove(backup)
|
||||
|
||||
async def download(self, request):
|
||||
async def download(self, request: web.Request):
|
||||
"""Download a backup file."""
|
||||
backup = self._extract_slug(request)
|
||||
|
||||
@ -349,7 +383,7 @@ class APIBackups(CoreSysAttributes):
|
||||
return response
|
||||
|
||||
@api_process
|
||||
async def upload(self, request):
|
||||
async def upload(self, request: web.Request):
|
||||
"""Upload a backup file."""
|
||||
with TemporaryDirectory(dir=str(self.sys_config.path_tmp)) as temp_dir:
|
||||
tar_file = Path(temp_dir, "backup.tar")
|
||||
|
@ -42,11 +42,11 @@ ATTR_GROUP_IDS = "group_ids"
|
||||
ATTR_IDENTIFIERS = "identifiers"
|
||||
ATTR_IS_ACTIVE = "is_active"
|
||||
ATTR_IS_OWNER = "is_owner"
|
||||
ATTR_JOB_ID = "job_id"
|
||||
ATTR_JOBS = "jobs"
|
||||
ATTR_LLMNR = "llmnr"
|
||||
ATTR_LLMNR_HOSTNAME = "llmnr_hostname"
|
||||
ATTR_LOCAL_ONLY = "local_only"
|
||||
ATTR_LOCATIONS = "locations"
|
||||
ATTR_MDNS = "mdns"
|
||||
ATTR_MODEL = "model"
|
||||
ATTR_MOUNTS = "mounts"
|
||||
@ -68,6 +68,7 @@ ATTR_UPDATE_TYPE = "update_type"
|
||||
ATTR_USAGE = "usage"
|
||||
ATTR_USE_NTP = "use_ntp"
|
||||
ATTR_USERS = "users"
|
||||
ATTR_USER_PATH = "user_path"
|
||||
ATTR_VENDOR = "vendor"
|
||||
ATTR_VIRTUALIZATION = "virtualization"
|
||||
|
||||
|
@ -11,7 +11,7 @@ from ..exceptions import APIError
|
||||
from ..mounts.const import ATTR_DEFAULT_BACKUP_MOUNT, MountUsage
|
||||
from ..mounts.mount import Mount
|
||||
from ..mounts.validate import SCHEMA_MOUNT_CONFIG
|
||||
from .const import ATTR_MOUNTS
|
||||
from .const import ATTR_MOUNTS, ATTR_USER_PATH
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
SCHEMA_OPTIONS = vol.Schema(
|
||||
@ -32,7 +32,11 @@ class APIMounts(CoreSysAttributes):
|
||||
if self.sys_mounts.default_backup_mount
|
||||
else None,
|
||||
ATTR_MOUNTS: [
|
||||
mount.to_dict() | {ATTR_STATE: mount.state}
|
||||
mount.to_dict()
|
||||
| {
|
||||
ATTR_STATE: mount.state,
|
||||
ATTR_USER_PATH: mount.container_where.as_posix(),
|
||||
}
|
||||
for mount in self.sys_mounts.mounts
|
||||
],
|
||||
}
|
||||
|
@ -10,11 +10,11 @@ from functools import cached_property
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from pathlib import Path, PurePath
|
||||
import tarfile
|
||||
from tempfile import TemporaryDirectory
|
||||
import time
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
@ -48,6 +48,7 @@ from ..const import (
|
||||
CRYPTO_AES128,
|
||||
)
|
||||
from ..coresys import CoreSys
|
||||
from ..docker.const import PATH_BACKUP, PATH_CLOUD_BACKUP
|
||||
from ..exceptions import AddonsError, BackupError, BackupInvalidError
|
||||
from ..jobs.const import JOB_GROUP_BACKUP
|
||||
from ..jobs.decorator import Job
|
||||
@ -55,7 +56,7 @@ from ..jobs.job_group import JobGroup
|
||||
from ..utils import remove_folder
|
||||
from ..utils.dt import parse_datetime, utcnow
|
||||
from ..utils.json import json_bytes
|
||||
from .const import BUF_SIZE, BackupType
|
||||
from .const import BUF_SIZE, LOCATION_CLOUD_BACKUP, BackupType
|
||||
from .utils import key_to_iv, password_to_key
|
||||
from .validate import SCHEMA_BACKUP
|
||||
|
||||
@ -70,6 +71,7 @@ class Backup(JobGroup):
|
||||
coresys: CoreSys,
|
||||
tar_file: Path,
|
||||
slug: str,
|
||||
location: str | None,
|
||||
data: dict[str, Any] | None = None,
|
||||
):
|
||||
"""Initialize a backup."""
|
||||
@ -83,6 +85,8 @@ class Backup(JobGroup):
|
||||
self._outer_secure_tarfile_tarfile: tarfile.TarFile | None = None
|
||||
self._key: bytes | None = None
|
||||
self._aes: Cipher | None = None
|
||||
# Order is maintained in dict keys so this is effectively an ordered set
|
||||
self._locations: dict[str | None, Literal[None]] = {location: None}
|
||||
|
||||
@property
|
||||
def version(self) -> int:
|
||||
@ -178,12 +182,44 @@ class Backup(JobGroup):
|
||||
"""Set the Docker config data."""
|
||||
self._data[ATTR_DOCKER] = value
|
||||
|
||||
@cached_property
|
||||
@property
|
||||
def location(self) -> str | None:
|
||||
"""Return the location of the backup."""
|
||||
for backup_mount in self.sys_mounts.backup_mounts:
|
||||
if self.tarfile.is_relative_to(backup_mount.local_where):
|
||||
return backup_mount.name
|
||||
return self.locations[0]
|
||||
|
||||
@property
|
||||
def all_locations(self) -> set[str | None]:
|
||||
"""Return all locations this backup was found in."""
|
||||
return self._locations.keys()
|
||||
|
||||
@property
|
||||
def locations(self) -> list[str | None]:
|
||||
"""Return locations this backup was found in except cloud backup (unless that's the only one)."""
|
||||
if len(self._locations) == 1:
|
||||
return list(self._locations)
|
||||
return [
|
||||
location
|
||||
for location in self._locations
|
||||
if location != LOCATION_CLOUD_BACKUP
|
||||
]
|
||||
|
||||
@cached_property
|
||||
def container_path(self) -> PurePath | None:
|
||||
"""Return where this is made available in managed containers (core, addons, etc.).
|
||||
|
||||
This returns none if the tarfile is not in a place mapped into other containers.
|
||||
"""
|
||||
path_map: dict[Path, PurePath] = {
|
||||
self.sys_config.path_backup: PATH_BACKUP,
|
||||
self.sys_config.path_core_backup: PATH_CLOUD_BACKUP,
|
||||
} | {
|
||||
mount.local_where: mount.container_where
|
||||
for mount in self.sys_mounts.backup_mounts
|
||||
}
|
||||
for source, target in path_map.items():
|
||||
if self.tarfile.is_relative_to(source):
|
||||
return target / self.tarfile.relative_to(source)
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
@ -215,6 +251,10 @@ class Backup(JobGroup):
|
||||
"""Returns a copy of the data."""
|
||||
return deepcopy(self._data)
|
||||
|
||||
def add_location(self, location: str | None) -> None:
|
||||
"""Add a location the backup exists."""
|
||||
self._locations[location] = None
|
||||
|
||||
def new(
|
||||
self,
|
||||
name: str,
|
||||
|
@ -4,6 +4,7 @@ from enum import StrEnum
|
||||
|
||||
BUF_SIZE = 2**20 * 4 # 4MB
|
||||
DEFAULT_FREEZE_TIMEOUT = 600
|
||||
LOCATION_CLOUD_BACKUP = ".cloud_backup"
|
||||
|
||||
|
||||
class BackupType(StrEnum):
|
||||
|
@ -7,10 +7,16 @@ from collections.abc import Awaitable, Iterable
|
||||
import errno
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
|
||||
from ..addons.addon import Addon
|
||||
from ..const import (
|
||||
ATTR_DATA,
|
||||
ATTR_DAYS_UNTIL_STALE,
|
||||
ATTR_JOB_ID,
|
||||
ATTR_PATH,
|
||||
ATTR_SLUG,
|
||||
ATTR_TYPE,
|
||||
FILE_HASSIO_BACKUPS,
|
||||
FOLDER_HOMEASSISTANT,
|
||||
CoreState,
|
||||
@ -22,6 +28,7 @@ from ..exceptions import (
|
||||
BackupJobError,
|
||||
BackupMountDownError,
|
||||
)
|
||||
from ..homeassistant.const import WSType
|
||||
from ..jobs.const import JOB_GROUP_BACKUP_MANAGER, JobCondition, JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..jobs.job_group import JobGroup
|
||||
@ -32,7 +39,13 @@ from ..utils.dt import utcnow
|
||||
from ..utils.sentinel import DEFAULT
|
||||
from ..utils.sentry import capture_exception
|
||||
from .backup import Backup
|
||||
from .const import DEFAULT_FREEZE_TIMEOUT, BackupJobStage, BackupType, RestoreJobStage
|
||||
from .const import (
|
||||
DEFAULT_FREEZE_TIMEOUT,
|
||||
LOCATION_CLOUD_BACKUP,
|
||||
BackupJobStage,
|
||||
BackupType,
|
||||
RestoreJobStage,
|
||||
)
|
||||
from .utils import create_slug
|
||||
from .validate import ALL_FOLDERS, SCHEMA_BACKUPS_CONFIG
|
||||
|
||||
@ -66,20 +79,32 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
self._data[ATTR_DAYS_UNTIL_STALE] = value
|
||||
|
||||
@property
|
||||
def backup_locations(self) -> list[Path]:
|
||||
def backup_locations(self) -> dict[str | None, Path]:
|
||||
"""List of locations containing backups."""
|
||||
return [self.sys_config.path_backup] + [
|
||||
mount.local_where
|
||||
return {
|
||||
None: self.sys_config.path_backup,
|
||||
LOCATION_CLOUD_BACKUP: self.sys_config.path_core_backup,
|
||||
} | {
|
||||
mount.name: mount.local_where
|
||||
for mount in self.sys_mounts.backup_mounts
|
||||
if mount.state == UnitActiveState.ACTIVE
|
||||
]
|
||||
}
|
||||
|
||||
def get(self, slug: str) -> Backup:
|
||||
"""Return backup object."""
|
||||
return self._backups.get(slug)
|
||||
|
||||
def _get_base_path(self, location: Mount | type[DEFAULT] | None = DEFAULT) -> Path:
|
||||
def _get_base_path(
|
||||
self,
|
||||
location: Mount
|
||||
| Literal[LOCATION_CLOUD_BACKUP]
|
||||
| type[DEFAULT]
|
||||
| None = DEFAULT,
|
||||
) -> Path:
|
||||
"""Get base path for backup using location or default location."""
|
||||
if location == LOCATION_CLOUD_BACKUP:
|
||||
return self.sys_config.path_core_backup
|
||||
|
||||
if location == DEFAULT and self.sys_mounts.default_backup_mount:
|
||||
location = self.sys_mounts.default_backup_mount
|
||||
|
||||
@ -92,6 +117,24 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
|
||||
return self.sys_config.path_backup
|
||||
|
||||
def _get_location_name(
|
||||
self,
|
||||
location: Mount
|
||||
| Literal[LOCATION_CLOUD_BACKUP]
|
||||
| type[DEFAULT]
|
||||
| None = DEFAULT,
|
||||
) -> str | None:
|
||||
"""Get name of location (or None for local backup folder)."""
|
||||
if location == LOCATION_CLOUD_BACKUP:
|
||||
return location
|
||||
|
||||
if location == DEFAULT and self.sys_mounts.default_backup_mount:
|
||||
location = self.sys_mounts.default_backup_mount
|
||||
|
||||
if location:
|
||||
return location.name
|
||||
return None
|
||||
|
||||
def _change_stage(
|
||||
self,
|
||||
stage: BackupJobStage | RestoreJobStage,
|
||||
@ -138,7 +181,10 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
sys_type: BackupType,
|
||||
password: str | None,
|
||||
compressed: bool = True,
|
||||
location: Mount | type[DEFAULT] | None = DEFAULT,
|
||||
location: Mount
|
||||
| Literal[LOCATION_CLOUD_BACKUP]
|
||||
| type[DEFAULT]
|
||||
| None = DEFAULT,
|
||||
) -> Backup:
|
||||
"""Initialize a new backup object from name.
|
||||
|
||||
@ -149,7 +195,7 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
tar_file = Path(self._get_base_path(location), f"{slug}.tar")
|
||||
|
||||
# init object
|
||||
backup = Backup(self.coresys, tar_file, slug)
|
||||
backup = Backup(self.coresys, tar_file, slug, self._get_location_name(location))
|
||||
backup.new(name, date_str, sys_type, password, compressed)
|
||||
|
||||
# Add backup ID to job
|
||||
@ -169,27 +215,46 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
"""
|
||||
return self.reload()
|
||||
|
||||
async def reload(self) -> None:
|
||||
async def reload(
|
||||
self,
|
||||
location: Mount
|
||||
| Literal[LOCATION_CLOUD_BACKUP]
|
||||
| type[DEFAULT]
|
||||
| None = DEFAULT,
|
||||
filename: str | None = None,
|
||||
) -> None:
|
||||
"""Load exists backups."""
|
||||
self._backups = {}
|
||||
|
||||
async def _load_backup(tar_file):
|
||||
async def _load_backup(location: str | None, tar_file: Path) -> bool:
|
||||
"""Load the backup."""
|
||||
backup = Backup(self.coresys, tar_file, "temp")
|
||||
backup = Backup(self.coresys, tar_file, "temp", location)
|
||||
if await backup.load():
|
||||
self._backups[backup.slug] = Backup(
|
||||
self.coresys, tar_file, backup.slug, backup.data
|
||||
)
|
||||
if backup.slug in self._backups:
|
||||
self._backups[backup.slug].add_location(location)
|
||||
else:
|
||||
self._backups[backup.slug] = Backup(
|
||||
self.coresys, tar_file, backup.slug, location, backup.data
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
if location != DEFAULT and filename:
|
||||
return await _load_backup(
|
||||
self._get_location_name(location),
|
||||
self._get_base_path(location) / filename,
|
||||
)
|
||||
|
||||
self._backups = {}
|
||||
tasks = [
|
||||
self.sys_create_task(_load_backup(tar_file))
|
||||
for path in self.backup_locations
|
||||
self.sys_create_task(_load_backup(_location, tar_file))
|
||||
for _location, path in self.backup_locations.items()
|
||||
for tar_file in self._list_backup_files(path)
|
||||
]
|
||||
|
||||
_LOGGER.info("Found %d backup files", len(tasks))
|
||||
if tasks:
|
||||
await asyncio.wait(tasks)
|
||||
return True
|
||||
|
||||
def remove(self, backup: Backup) -> bool:
|
||||
"""Remove a backup."""
|
||||
@ -211,7 +276,7 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
|
||||
async def import_backup(self, tar_file: Path) -> Backup | None:
|
||||
"""Check backup tarfile and import it."""
|
||||
backup = Backup(self.coresys, tar_file, "temp")
|
||||
backup = Backup(self.coresys, tar_file, "temp", None)
|
||||
|
||||
# Read meta data
|
||||
if not await backup.load():
|
||||
@ -234,7 +299,7 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
return None
|
||||
|
||||
# Load new backup
|
||||
backup = Backup(self.coresys, tar_origin, backup.slug, backup.data)
|
||||
backup = Backup(self.coresys, tar_origin, backup.slug, None, backup.data)
|
||||
if not await backup.load():
|
||||
return None
|
||||
_LOGGER.info("Successfully imported %s", backup.slug)
|
||||
@ -293,6 +358,16 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
return None
|
||||
else:
|
||||
self._backups[backup.slug] = backup
|
||||
await self.sys_homeassistant.websocket.async_send_message(
|
||||
{
|
||||
ATTR_TYPE: WSType.BACKUP_COMPLETE,
|
||||
ATTR_DATA: {
|
||||
ATTR_JOB_ID: self.sys_jobs.current.uuid,
|
||||
ATTR_SLUG: backup.slug,
|
||||
ATTR_PATH: backup.container_path.as_posix(),
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
if addon_start_tasks:
|
||||
self._change_stage(BackupJobStage.AWAIT_ADDON_RESTARTS, backup)
|
||||
@ -315,11 +390,17 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
name: str = "",
|
||||
password: str | None = None,
|
||||
compressed: bool = True,
|
||||
location: Mount | type[DEFAULT] | None = DEFAULT,
|
||||
location: Mount
|
||||
| Literal[LOCATION_CLOUD_BACKUP]
|
||||
| type[DEFAULT]
|
||||
| None = DEFAULT,
|
||||
homeassistant_exclude_database: bool | None = None,
|
||||
) -> Backup | None:
|
||||
"""Create a full backup."""
|
||||
if self._get_base_path(location) == self.sys_config.path_backup:
|
||||
if self._get_base_path(location) in {
|
||||
self.sys_config.path_backup,
|
||||
self.sys_config.path_core_backup,
|
||||
}:
|
||||
await Job.check_conditions(
|
||||
self, {JobCondition.FREE_SPACE}, "BackupManager.do_backup_full"
|
||||
)
|
||||
@ -355,11 +436,17 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
password: str | None = None,
|
||||
homeassistant: bool = False,
|
||||
compressed: bool = True,
|
||||
location: Mount | type[DEFAULT] | None = DEFAULT,
|
||||
location: Mount
|
||||
| Literal[LOCATION_CLOUD_BACKUP]
|
||||
| type[DEFAULT]
|
||||
| None = DEFAULT,
|
||||
homeassistant_exclude_database: bool | None = None,
|
||||
) -> Backup | None:
|
||||
"""Create a partial backup."""
|
||||
if self._get_base_path(location) == self.sys_config.path_backup:
|
||||
if self._get_base_path(location) in {
|
||||
self.sys_config.path_backup,
|
||||
self.sys_config.path_core_backup,
|
||||
}:
|
||||
await Job.check_conditions(
|
||||
self, {JobCondition.FREE_SPACE}, "BackupManager.do_backup_partial"
|
||||
)
|
||||
|
@ -170,6 +170,11 @@ def initialize_system(coresys: CoreSys) -> None:
|
||||
_LOGGER.debug("Creating Supervisor backup folder at '%s'", config.path_backup)
|
||||
config.path_backup.mkdir()
|
||||
|
||||
# Core backup folder
|
||||
if not config.path_core_backup.is_dir():
|
||||
_LOGGER.debug("Creating Core backup folder at '%s", config.path_core_backup)
|
||||
config.path_core_backup.mkdir(parents=True)
|
||||
|
||||
# Share folder
|
||||
if not config.path_share.is_dir():
|
||||
_LOGGER.debug("Creating Supervisor share folder at '%s'", config.path_share)
|
||||
|
@ -50,6 +50,7 @@ MOUNTS_FOLDER = PurePath("mounts")
|
||||
MOUNTS_CREDENTIALS = PurePath(".mounts_credentials")
|
||||
EMERGENCY_DATA = PurePath("emergency")
|
||||
ADDON_CONFIGS = PurePath("addon_configs")
|
||||
CORE_BACKUP_DATA = PurePath("core/backup")
|
||||
|
||||
DEFAULT_BOOT_TIME = datetime.fromtimestamp(0, UTC).isoformat()
|
||||
|
||||
@ -273,6 +274,16 @@ class CoreConfig(FileConfiguration):
|
||||
"""Return root backup data folder external for Docker."""
|
||||
return PurePath(self.path_extern_supervisor, BACKUP_DATA)
|
||||
|
||||
@property
|
||||
def path_core_backup(self) -> Path:
|
||||
"""Return core specific backup folder (cloud backup)."""
|
||||
return self.path_supervisor / CORE_BACKUP_DATA
|
||||
|
||||
@property
|
||||
def path_extern_core_backup(self) -> PurePath:
|
||||
"""Return core specific backup folder (cloud backup) external for Docker."""
|
||||
return PurePath(self.path_extern_supervisor, CORE_BACKUP_DATA)
|
||||
|
||||
@property
|
||||
def path_share(self) -> Path:
|
||||
"""Return root share data folder."""
|
||||
|
@ -220,6 +220,7 @@ ATTR_IP_ADDRESS = "ip_address"
|
||||
ATTR_IPV4 = "ipv4"
|
||||
ATTR_IPV6 = "ipv6"
|
||||
ATTR_ISSUES = "issues"
|
||||
ATTR_JOB_ID = "job_id"
|
||||
ATTR_JOURNALD = "journald"
|
||||
ATTR_KERNEL = "kernel"
|
||||
ATTR_KERNEL_MODULES = "kernel_modules"
|
||||
@ -227,7 +228,7 @@ ATTR_LABELS = "labels"
|
||||
ATTR_LAST_BOOT = "last_boot"
|
||||
ATTR_LEGACY = "legacy"
|
||||
ATTR_LOCALS = "locals"
|
||||
ATTR_LOCATON = "location"
|
||||
ATTR_LOCATION = "location"
|
||||
ATTR_LOGGING = "logging"
|
||||
ATTR_LOGO = "logo"
|
||||
ATTR_LONG_DESCRIPTION = "long_description"
|
||||
@ -259,6 +260,7 @@ ATTR_PANEL_TITLE = "panel_title"
|
||||
ATTR_PANELS = "panels"
|
||||
ATTR_PARENT = "parent"
|
||||
ATTR_PASSWORD = "password"
|
||||
ATTR_PATH = "path"
|
||||
ATTR_PLUGINS = "plugins"
|
||||
ATTR_PORT = "port"
|
||||
ATTR_PORTS = "ports"
|
||||
|
@ -50,6 +50,16 @@ from .const import (
|
||||
MOUNT_DEV,
|
||||
MOUNT_DOCKER,
|
||||
MOUNT_UDEV,
|
||||
PATH_ALL_ADDON_CONFIGS,
|
||||
PATH_BACKUP,
|
||||
PATH_HOMEASSISTANT_CONFIG,
|
||||
PATH_HOMEASSISTANT_CONFIG_LEGACY,
|
||||
PATH_LOCAL_ADDONS,
|
||||
PATH_MEDIA,
|
||||
PATH_PRIVATE_DATA,
|
||||
PATH_PUBLIC_CONFIG,
|
||||
PATH_SHARE,
|
||||
PATH_SSL,
|
||||
Capabilities,
|
||||
MountType,
|
||||
PropagationMode,
|
||||
@ -334,7 +344,7 @@ class DockerAddon(DockerInterface):
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.addon.path_extern_data.as_posix(),
|
||||
target=target_data_path or "/data",
|
||||
target=target_data_path or PATH_PRIVATE_DATA.as_posix(),
|
||||
read_only=False,
|
||||
),
|
||||
]
|
||||
@ -345,7 +355,8 @@ class DockerAddon(DockerInterface):
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target=addon_mapping[MappingType.CONFIG].path or "/config",
|
||||
target=addon_mapping[MappingType.CONFIG].path
|
||||
or PATH_HOMEASSISTANT_CONFIG_LEGACY.as_posix(),
|
||||
read_only=addon_mapping[MappingType.CONFIG].read_only,
|
||||
)
|
||||
)
|
||||
@ -358,7 +369,7 @@ class DockerAddon(DockerInterface):
|
||||
type=MountType.BIND,
|
||||
source=self.addon.path_extern_config.as_posix(),
|
||||
target=addon_mapping[MappingType.ADDON_CONFIG].path
|
||||
or "/config",
|
||||
or PATH_PUBLIC_CONFIG.as_posix(),
|
||||
read_only=addon_mapping[MappingType.ADDON_CONFIG].read_only,
|
||||
)
|
||||
)
|
||||
@ -370,7 +381,7 @@ class DockerAddon(DockerInterface):
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target=addon_mapping[MappingType.HOMEASSISTANT_CONFIG].path
|
||||
or "/homeassistant",
|
||||
or PATH_HOMEASSISTANT_CONFIG.as_posix(),
|
||||
read_only=addon_mapping[
|
||||
MappingType.HOMEASSISTANT_CONFIG
|
||||
].read_only,
|
||||
@ -383,7 +394,7 @@ class DockerAddon(DockerInterface):
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_addon_configs.as_posix(),
|
||||
target=addon_mapping[MappingType.ALL_ADDON_CONFIGS].path
|
||||
or "/addon_configs",
|
||||
or PATH_ALL_ADDON_CONFIGS.as_posix(),
|
||||
read_only=addon_mapping[MappingType.ALL_ADDON_CONFIGS].read_only,
|
||||
)
|
||||
)
|
||||
@ -393,7 +404,7 @@ class DockerAddon(DockerInterface):
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target=addon_mapping[MappingType.SSL].path or "/ssl",
|
||||
target=addon_mapping[MappingType.SSL].path or PATH_SSL.as_posix(),
|
||||
read_only=addon_mapping[MappingType.SSL].read_only,
|
||||
)
|
||||
)
|
||||
@ -403,7 +414,8 @@ class DockerAddon(DockerInterface):
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_addons_local.as_posix(),
|
||||
target=addon_mapping[MappingType.ADDONS].path or "/addons",
|
||||
target=addon_mapping[MappingType.ADDONS].path
|
||||
or PATH_LOCAL_ADDONS.as_posix(),
|
||||
read_only=addon_mapping[MappingType.ADDONS].read_only,
|
||||
)
|
||||
)
|
||||
@ -413,8 +425,10 @@ class DockerAddon(DockerInterface):
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_backup.as_posix(),
|
||||
target=addon_mapping[MappingType.BACKUP].path or "/backup",
|
||||
target=addon_mapping[MappingType.BACKUP].path
|
||||
or PATH_BACKUP.as_posix(),
|
||||
read_only=addon_mapping[MappingType.BACKUP].read_only,
|
||||
propagation=PropagationMode.RSLAVE,
|
||||
)
|
||||
)
|
||||
|
||||
@ -423,7 +437,8 @@ class DockerAddon(DockerInterface):
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target=addon_mapping[MappingType.SHARE].path or "/share",
|
||||
target=addon_mapping[MappingType.SHARE].path
|
||||
or PATH_SHARE.as_posix(),
|
||||
read_only=addon_mapping[MappingType.SHARE].read_only,
|
||||
propagation=PropagationMode.RSLAVE,
|
||||
)
|
||||
@ -434,7 +449,8 @@ class DockerAddon(DockerInterface):
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_media.as_posix(),
|
||||
target=addon_mapping[MappingType.MEDIA].path or "/media",
|
||||
target=addon_mapping[MappingType.MEDIA].path
|
||||
or PATH_MEDIA.as_posix(),
|
||||
read_only=addon_mapping[MappingType.MEDIA].read_only,
|
||||
propagation=PropagationMode.RSLAVE,
|
||||
)
|
||||
|
@ -17,6 +17,7 @@ from .const import (
|
||||
MOUNT_DEV,
|
||||
MOUNT_MACHINE_ID,
|
||||
MOUNT_UDEV,
|
||||
PATH_PRIVATE_DATA,
|
||||
Capabilities,
|
||||
MountType,
|
||||
)
|
||||
@ -48,7 +49,7 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_audio.as_posix(),
|
||||
target="/data",
|
||||
target=PATH_PRIVATE_DATA.as_posix(),
|
||||
read_only=False,
|
||||
),
|
||||
MOUNT_DBUS,
|
||||
|
@ -1,6 +1,7 @@
|
||||
"""Docker constants."""
|
||||
|
||||
from enum import StrEnum
|
||||
from pathlib import PurePath
|
||||
|
||||
from docker.types import Mount
|
||||
|
||||
@ -91,3 +92,15 @@ MOUNT_MACHINE_ID = Mount(
|
||||
MOUNT_UDEV = Mount(
|
||||
type=MountType.BIND, source="/run/udev", target="/run/udev", read_only=True
|
||||
)
|
||||
|
||||
PATH_PRIVATE_DATA = PurePath("/data")
|
||||
PATH_HOMEASSISTANT_CONFIG_LEGACY = PurePath("/config")
|
||||
PATH_HOMEASSISTANT_CONFIG = PurePath("/homeassistant")
|
||||
PATH_PUBLIC_CONFIG = PurePath("/config")
|
||||
PATH_ALL_ADDON_CONFIGS = PurePath("/addon_configs")
|
||||
PATH_SSL = PurePath("/ssl")
|
||||
PATH_LOCAL_ADDONS = PurePath("/addons")
|
||||
PATH_BACKUP = PurePath("/backup")
|
||||
PATH_SHARE = PurePath("/share")
|
||||
PATH_MEDIA = PurePath("/media")
|
||||
PATH_CLOUD_BACKUP = PurePath("/cloud_backup")
|
||||
|
@ -22,6 +22,12 @@ from .const import (
|
||||
MOUNT_DEV,
|
||||
MOUNT_MACHINE_ID,
|
||||
MOUNT_UDEV,
|
||||
PATH_BACKUP,
|
||||
PATH_CLOUD_BACKUP,
|
||||
PATH_MEDIA,
|
||||
PATH_PUBLIC_CONFIG,
|
||||
PATH_SHARE,
|
||||
PATH_SSL,
|
||||
MountType,
|
||||
PropagationMode,
|
||||
)
|
||||
@ -96,7 +102,7 @@ class DockerHomeAssistant(DockerInterface):
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
target=PATH_PUBLIC_CONFIG.as_posix(),
|
||||
read_only=False,
|
||||
),
|
||||
]
|
||||
@ -109,23 +115,36 @@ class DockerHomeAssistant(DockerInterface):
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target="/ssl",
|
||||
target=PATH_SSL.as_posix(),
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target="/share",
|
||||
target=PATH_SHARE.as_posix(),
|
||||
read_only=False,
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_media.as_posix(),
|
||||
target="/media",
|
||||
target=PATH_MEDIA.as_posix(),
|
||||
read_only=False,
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_backup.as_posix(),
|
||||
target=PATH_BACKUP.as_posix(),
|
||||
read_only=False,
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_core_backup.as_posix(),
|
||||
target=PATH_CLOUD_BACKUP.as_posix(),
|
||||
read_only=False,
|
||||
),
|
||||
# Configuration audio
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
|
@ -32,6 +32,7 @@ class WSType(StrEnum):
|
||||
SUPERVISOR_EVENT = "supervisor/event"
|
||||
BACKUP_START = "backup/start"
|
||||
BACKUP_END = "backup/end"
|
||||
BACKUP_COMPLETE = "backup/supervisor/backup_complete"
|
||||
|
||||
|
||||
class WSEvent(StrEnum):
|
||||
|
@ -34,6 +34,7 @@ MIN_VERSION = {
|
||||
WSType.SUPERVISOR_EVENT: "2021.2.4",
|
||||
WSType.BACKUP_START: "2022.1.0",
|
||||
WSType.BACKUP_END: "2022.1.0",
|
||||
WSType.BACKUP_COMPLETE: "2024.11.99",
|
||||
}
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
@ -141,6 +141,15 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
||||
]
|
||||
)
|
||||
|
||||
# Bind all backup mounts to directories in backup
|
||||
if self.backup_mounts:
|
||||
await asyncio.wait(
|
||||
[
|
||||
self.sys_create_task(self._bind_backup(mount))
|
||||
for mount in self.backup_mounts
|
||||
]
|
||||
)
|
||||
|
||||
@Job(name="mount_manager_reload", conditions=[JobCondition.MOUNT_AVAILABLE])
|
||||
async def reload(self) -> None:
|
||||
"""Update mounts info via dbus and reload failed mounts."""
|
||||
@ -206,6 +215,8 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
||||
await self._bind_media(mount)
|
||||
elif mount.usage == MountUsage.SHARE:
|
||||
await self._bind_share(mount)
|
||||
elif mount.usage == MountUsage.BACKUP:
|
||||
await self._bind_backup(mount)
|
||||
|
||||
@Job(
|
||||
name="mount_manager_remove_mount",
|
||||
@ -258,6 +269,10 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
||||
if (bound_mount := self._bound_mounts.get(name)) and bound_mount.emergency:
|
||||
await self._bind_mount(bound_mount.mount, bound_mount.bind_mount.where)
|
||||
|
||||
async def _bind_backup(self, mount: Mount) -> None:
|
||||
"""Bind a backup mount to backup directory."""
|
||||
await self._bind_mount(mount, self.sys_config.path_extern_backup / mount.name)
|
||||
|
||||
async def _bind_media(self, mount: Mount) -> None:
|
||||
"""Bind a media mount to media directory."""
|
||||
await self._bind_mount(mount, self.sys_config.path_extern_media / mount.name)
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
import asyncio
|
||||
from functools import cached_property
|
||||
import logging
|
||||
from pathlib import Path, PurePath
|
||||
|
||||
@ -29,6 +30,7 @@ from ..dbus.const import (
|
||||
UnitActiveState,
|
||||
)
|
||||
from ..dbus.systemd import SystemdUnit
|
||||
from ..docker.const import PATH_BACKUP, PATH_MEDIA, PATH_SHARE
|
||||
from ..exceptions import (
|
||||
DBusError,
|
||||
DBusSystemdNoSuchUnit,
|
||||
@ -150,7 +152,7 @@ class Mount(CoreSysAttributes, ABC):
|
||||
"""Get state of mount."""
|
||||
return self._state
|
||||
|
||||
@property
|
||||
@cached_property
|
||||
def local_where(self) -> Path | None:
|
||||
"""Return where this is mounted within supervisor container.
|
||||
|
||||
@ -162,6 +164,21 @@ class Mount(CoreSysAttributes, ABC):
|
||||
else None
|
||||
)
|
||||
|
||||
@property
|
||||
def container_where(self) -> PurePath | None:
|
||||
"""Return where this is made available in managed containers (core, addons, etc.).
|
||||
|
||||
This returns none if it is not made available in managed containers.
|
||||
"""
|
||||
match self.usage:
|
||||
case MountUsage.BACKUP:
|
||||
return PurePath(PATH_BACKUP, self.name)
|
||||
case MountUsage.MEDIA:
|
||||
return PurePath(PATH_MEDIA, self.name)
|
||||
case MountUsage.SHARE:
|
||||
return PurePath(PATH_SHARE, self.name)
|
||||
return None
|
||||
|
||||
@property
|
||||
def failed_issue(self) -> Issue:
|
||||
"""Get issue used if this mount has failed."""
|
||||
|
@ -11,7 +11,7 @@ from voluptuous.humanize import humanize_error
|
||||
|
||||
from ..addons.validate import SCHEMA_ADDON_CONFIG, SCHEMA_ADDON_TRANSLATIONS
|
||||
from ..const import (
|
||||
ATTR_LOCATON,
|
||||
ATTR_LOCATION,
|
||||
ATTR_REPOSITORY,
|
||||
ATTR_SLUG,
|
||||
ATTR_TRANSLATIONS,
|
||||
@ -223,7 +223,7 @@ class StoreData(CoreSysAttributes):
|
||||
|
||||
# store
|
||||
addon_config[ATTR_REPOSITORY] = repository
|
||||
addon_config[ATTR_LOCATON] = str(addon.parent)
|
||||
addon_config[ATTR_LOCATION] = str(addon.parent)
|
||||
addon_config[ATTR_TRANSLATIONS] = _read_addon_translations(addon.parent)
|
||||
addons_config[addon_slug] = addon_config
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path, PurePath
|
||||
from shutil import copy
|
||||
from typing import Any
|
||||
from unittest.mock import ANY, AsyncMock, PropertyMock, patch
|
||||
|
||||
@ -19,6 +20,9 @@ from supervisor.homeassistant.module import HomeAssistant
|
||||
from supervisor.mounts.mount import Mount
|
||||
from supervisor.supervisor import Supervisor
|
||||
|
||||
from tests.common import get_fixture_path
|
||||
from tests.const import TEST_ADDON_SLUG
|
||||
|
||||
|
||||
async def test_info(api_client, coresys: CoreSys, mock_full_backup: Backup):
|
||||
"""Test info endpoint."""
|
||||
@ -467,3 +471,132 @@ async def test_restore_immediate_errors(
|
||||
)
|
||||
assert resp.status == 400
|
||||
assert "No Home Assistant" in (await resp.json())["message"]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("folder", "location"), [("backup", None), ("core/backup", ".cloud_backup")]
|
||||
)
|
||||
async def test_reload(
|
||||
request: pytest.FixtureRequest,
|
||||
api_client: TestClient,
|
||||
coresys: CoreSys,
|
||||
tmp_supervisor_data: Path,
|
||||
folder: str,
|
||||
location: str | None,
|
||||
):
|
||||
"""Test backups reload."""
|
||||
assert not coresys.backups.list_backups
|
||||
|
||||
backup_file = get_fixture_path("backup_example.tar")
|
||||
copy(backup_file, tmp_supervisor_data / folder)
|
||||
|
||||
resp = await api_client.post("/backups/reload")
|
||||
assert resp.status == 200
|
||||
|
||||
assert len(coresys.backups.list_backups) == 1
|
||||
assert (backup := coresys.backups.get("7fed74c8"))
|
||||
assert backup.location == location
|
||||
assert backup.locations == [location]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("folder", "location"), [("backup", None), ("core/backup", ".cloud_backup")]
|
||||
)
|
||||
async def test_partial_reload(
|
||||
request: pytest.FixtureRequest,
|
||||
api_client: TestClient,
|
||||
coresys: CoreSys,
|
||||
tmp_supervisor_data: Path,
|
||||
folder: str,
|
||||
location: str | None,
|
||||
):
|
||||
"""Test partial backups reload."""
|
||||
assert not coresys.backups.list_backups
|
||||
|
||||
backup_file = get_fixture_path("backup_example.tar")
|
||||
copy(backup_file, tmp_supervisor_data / folder)
|
||||
|
||||
resp = await api_client.post(
|
||||
"/backups/reload", json={"location": location, "filename": "backup_example.tar"}
|
||||
)
|
||||
assert resp.status == 200
|
||||
|
||||
assert len(coresys.backups.list_backups) == 1
|
||||
assert (backup := coresys.backups.get("7fed74c8"))
|
||||
assert backup.location == location
|
||||
assert backup.locations == [location]
|
||||
|
||||
|
||||
async def test_invalid_reload(api_client: TestClient):
|
||||
"""Test invalid reload."""
|
||||
resp = await api_client.post("/backups/reload", json={"location": "no_filename"})
|
||||
assert resp.status == 400
|
||||
|
||||
resp = await api_client.post(
|
||||
"/backups/reload", json={"filename": "no_location.tar"}
|
||||
)
|
||||
assert resp.status == 400
|
||||
|
||||
resp = await api_client.post(
|
||||
"/backups/reload", json={"location": None, "filename": "no/sub/paths.tar"}
|
||||
)
|
||||
assert resp.status == 400
|
||||
|
||||
resp = await api_client.post(
|
||||
"/backups/reload", json={"location": None, "filename": "not_tar.tar.gz"}
|
||||
)
|
||||
assert resp.status == 400
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("install_addon_ssh")
|
||||
@pytest.mark.parametrize("api_client", TEST_ADDON_SLUG, indirect=True)
|
||||
async def test_cloud_backup_core_only(api_client: TestClient, mock_full_backup: Backup):
|
||||
"""Test only core can access cloud backup location."""
|
||||
resp = await api_client.post(
|
||||
"/backups/reload",
|
||||
json={"location": ".cloud_backup", "filename": "caller_not_core.tar"},
|
||||
)
|
||||
assert resp.status == 403
|
||||
|
||||
resp = await api_client.post(
|
||||
"/backups/new/full",
|
||||
json={
|
||||
"name": "Mount test",
|
||||
"location": ".cloud_backup",
|
||||
},
|
||||
)
|
||||
assert resp.status == 403
|
||||
|
||||
resp = await api_client.post(
|
||||
"/backups/new/partial",
|
||||
json={"name": "Test", "homeassistant": True, "location": ".cloud_backup"},
|
||||
)
|
||||
assert resp.status == 403
|
||||
|
||||
# pylint: disable-next=protected-access
|
||||
mock_full_backup._locations = {".cloud_backup": None}
|
||||
assert mock_full_backup.location == ".cloud_backup"
|
||||
|
||||
resp = await api_client.post(f"/backups/{mock_full_backup.slug}/restore/full")
|
||||
assert resp.status == 403
|
||||
|
||||
resp = await api_client.post(
|
||||
f"/backups/{mock_full_backup.slug}/restore/partial",
|
||||
json={"homeassistant": True},
|
||||
)
|
||||
assert resp.status == 403
|
||||
|
||||
resp = await api_client.delete(f"/backups/{mock_full_backup.slug}")
|
||||
assert resp.status == 403
|
||||
|
||||
|
||||
async def test_partial_reload_errors_no_file(
|
||||
api_client: TestClient,
|
||||
coresys: CoreSys,
|
||||
tmp_supervisor_data: Path,
|
||||
):
|
||||
"""Partial reload returns error when asked to reload non-existent file."""
|
||||
resp = await api_client.post(
|
||||
"/backups/reload", json={"location": None, "filename": "does_not_exist.tar"}
|
||||
)
|
||||
assert resp.status == 400
|
||||
|
@ -81,6 +81,7 @@ async def test_api_create_mount(
|
||||
"share": "backups",
|
||||
"state": "active",
|
||||
"read_only": False,
|
||||
"user_path": "/backup/backup_test",
|
||||
}
|
||||
]
|
||||
coresys.mounts.save_data.assert_called_once()
|
||||
@ -257,6 +258,7 @@ async def test_api_update_mount(
|
||||
"share": "new_backups",
|
||||
"state": "active",
|
||||
"read_only": False,
|
||||
"user_path": "/backup/backup_test",
|
||||
}
|
||||
]
|
||||
coresys.mounts.save_data.assert_called_once()
|
||||
@ -292,8 +294,9 @@ async def test_api_update_dbus_error_mount_remains(
|
||||
"""Test mount remains in list with unsuccessful state if dbus error occurs during update."""
|
||||
systemd_service: SystemdService = all_dbus_services["systemd"]
|
||||
systemd_unit_service: SystemdUnitService = all_dbus_services["systemd_unit"]
|
||||
systemd_unit_service.active_state = ["failed", "inactive"]
|
||||
systemd_unit_service.active_state = ["failed", "inactive", "failed", "inactive"]
|
||||
systemd_service.response_get_unit = [
|
||||
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
|
||||
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
|
||||
DBusError("org.freedesktop.systemd1.NoSuchUnit", "error"),
|
||||
]
|
||||
@ -325,6 +328,7 @@ async def test_api_update_dbus_error_mount_remains(
|
||||
"share": "backups",
|
||||
"state": None,
|
||||
"read_only": False,
|
||||
"user_path": "/backup/backup_test",
|
||||
}
|
||||
]
|
||||
|
||||
@ -372,6 +376,7 @@ async def test_api_update_dbus_error_mount_remains(
|
||||
"share": "backups",
|
||||
"state": None,
|
||||
"read_only": False,
|
||||
"user_path": "/backup/backup_test",
|
||||
}
|
||||
]
|
||||
|
||||
@ -828,6 +833,7 @@ async def test_api_create_read_only_cifs_mount(
|
||||
"share": "media",
|
||||
"state": "active",
|
||||
"read_only": True,
|
||||
"user_path": "/media/media_test",
|
||||
}
|
||||
]
|
||||
coresys.mounts.save_data.assert_called_once()
|
||||
@ -868,6 +874,7 @@ async def test_api_create_read_only_nfs_mount(
|
||||
"path": "/media/camera",
|
||||
"state": "active",
|
||||
"read_only": True,
|
||||
"user_path": "/media/media_test",
|
||||
}
|
||||
]
|
||||
coresys.mounts.save_data.assert_called_once()
|
||||
|
@ -10,7 +10,7 @@ from supervisor.coresys import CoreSys
|
||||
|
||||
async def test_new_backup_stays_in_folder(coresys: CoreSys, tmp_path: Path):
|
||||
"""Test making a new backup operates entirely within folder where backup will be stored."""
|
||||
backup = Backup(coresys, tmp_path / "my_backup.tar", "test")
|
||||
backup = Backup(coresys, tmp_path / "my_backup.tar", "test", None)
|
||||
backup.new("test", "2023-07-21T21:05:00.000000+00:00", BackupType.FULL)
|
||||
assert not listdir(tmp_path)
|
||||
|
||||
|
@ -4,7 +4,7 @@ import asyncio
|
||||
import errno
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from shutil import rmtree
|
||||
from shutil import copy, rmtree
|
||||
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, PropertyMock, patch
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
@ -34,10 +34,12 @@ from supervisor.homeassistant.api import HomeAssistantAPI
|
||||
from supervisor.homeassistant.const import WSType
|
||||
from supervisor.homeassistant.core import HomeAssistantCore
|
||||
from supervisor.homeassistant.module import HomeAssistant
|
||||
from supervisor.jobs import JobSchedulerOptions
|
||||
from supervisor.jobs.const import JobCondition
|
||||
from supervisor.mounts.mount import Mount
|
||||
from supervisor.utils.json import read_json_file, write_json_file
|
||||
|
||||
from tests.common import get_fixture_path
|
||||
from tests.const import TEST_ADDON_SLUG
|
||||
from tests.dbus_service_mocks.base import DBusServiceMock
|
||||
from tests.dbus_service_mocks.systemd import Systemd as SystemdService
|
||||
@ -626,7 +628,8 @@ async def test_full_backup_to_mount(
|
||||
},
|
||||
)
|
||||
await coresys.mounts.create_mount(mount)
|
||||
assert mount_dir in coresys.backups.backup_locations
|
||||
assert "backup_test" in coresys.backups.backup_locations
|
||||
assert coresys.backups.backup_locations["backup_test"] == mount_dir
|
||||
|
||||
# Make a backup and add it to mounts. Confirm it exists in the right place
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
@ -671,7 +674,8 @@ async def test_partial_backup_to_mount(
|
||||
},
|
||||
)
|
||||
await coresys.mounts.create_mount(mount)
|
||||
assert mount_dir in coresys.backups.backup_locations
|
||||
assert "backup_test" in coresys.backups.backup_locations
|
||||
assert coresys.backups.backup_locations["backup_test"] == mount_dir
|
||||
|
||||
# Make a backup and add it to mounts. Confirm it exists in the right place
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
@ -723,7 +727,8 @@ async def test_backup_to_down_mount_error(
|
||||
},
|
||||
)
|
||||
await coresys.mounts.create_mount(mount)
|
||||
assert mount_dir in coresys.backups.backup_locations
|
||||
assert "backup_test" in coresys.backups.backup_locations
|
||||
assert coresys.backups.backup_locations["backup_test"] == mount_dir
|
||||
|
||||
# Attempt to make a backup which fails because is_mount on directory is false
|
||||
mock_is_mount.return_value = False
|
||||
@ -1866,3 +1871,161 @@ async def test_core_pre_backup_actions_failed(
|
||||
f"Preparing backup of Home Assistant Core failed due to: {pre_backup_error['message']}"
|
||||
in caplog.text
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("mount_propagation", "mock_is_mount", "path_extern")
|
||||
async def test_reload_multiple_locations(coresys: CoreSys, tmp_supervisor_data: Path):
|
||||
"""Test reload with a backup that exists in multiple locations."""
|
||||
(mount_dir := coresys.config.path_mounts / "backup_test").mkdir()
|
||||
await coresys.mounts.load()
|
||||
mount = Mount.from_dict(
|
||||
coresys,
|
||||
{
|
||||
"name": "backup_test",
|
||||
"usage": "backup",
|
||||
"type": "cifs",
|
||||
"server": "test.local",
|
||||
"share": "test",
|
||||
},
|
||||
)
|
||||
await coresys.mounts.create_mount(mount)
|
||||
|
||||
assert not coresys.backups.list_backups
|
||||
|
||||
backup_file = get_fixture_path("backup_example.tar")
|
||||
copy(backup_file, tmp_supervisor_data / "core/backup")
|
||||
await coresys.backups.reload()
|
||||
|
||||
assert coresys.backups.list_backups
|
||||
assert (backup := coresys.backups.get("7fed74c8"))
|
||||
assert backup.location == ".cloud_backup"
|
||||
assert backup.locations == [".cloud_backup"]
|
||||
assert backup.all_locations == {".cloud_backup"}
|
||||
|
||||
copy(backup_file, tmp_supervisor_data / "backup")
|
||||
await coresys.backups.reload()
|
||||
|
||||
assert coresys.backups.list_backups
|
||||
assert (backup := coresys.backups.get("7fed74c8"))
|
||||
assert backup.location is None
|
||||
assert backup.locations == [None]
|
||||
assert backup.all_locations == {".cloud_backup", None}
|
||||
|
||||
copy(backup_file, mount_dir)
|
||||
await coresys.backups.reload()
|
||||
|
||||
assert coresys.backups.list_backups
|
||||
assert (backup := coresys.backups.get("7fed74c8"))
|
||||
assert backup.location in {None, "backup_test"}
|
||||
assert backup.locations == [None, "backup_test"]
|
||||
assert backup.all_locations == {".cloud_backup", None, "backup_test"}
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("mount_propagation", "mock_is_mount", "path_extern")
|
||||
async def test_partial_reload_multiple_locations(
|
||||
coresys: CoreSys, tmp_supervisor_data: Path
|
||||
):
|
||||
"""Test a partial reload with a backup that exists in multiple locations."""
|
||||
(mount_dir := coresys.config.path_mounts / "backup_test").mkdir()
|
||||
await coresys.mounts.load()
|
||||
mount = Mount.from_dict(
|
||||
coresys,
|
||||
{
|
||||
"name": "backup_test",
|
||||
"usage": "backup",
|
||||
"type": "cifs",
|
||||
"server": "test.local",
|
||||
"share": "test",
|
||||
},
|
||||
)
|
||||
await coresys.mounts.create_mount(mount)
|
||||
|
||||
assert not coresys.backups.list_backups
|
||||
|
||||
backup_file = get_fixture_path("backup_example.tar")
|
||||
copy(backup_file, tmp_supervisor_data / "core/backup")
|
||||
await coresys.backups.reload()
|
||||
|
||||
assert coresys.backups.list_backups
|
||||
assert (backup := coresys.backups.get("7fed74c8"))
|
||||
assert backup.location == ".cloud_backup"
|
||||
assert backup.locations == [".cloud_backup"]
|
||||
assert backup.all_locations == {".cloud_backup"}
|
||||
|
||||
copy(backup_file, tmp_supervisor_data / "backup")
|
||||
await coresys.backups.reload(location=None, filename="backup_example.tar")
|
||||
|
||||
assert coresys.backups.list_backups
|
||||
assert (backup := coresys.backups.get("7fed74c8"))
|
||||
assert backup.location is None
|
||||
assert backup.locations == [None]
|
||||
assert backup.all_locations == {".cloud_backup", None}
|
||||
|
||||
copy(backup_file, mount_dir)
|
||||
await coresys.backups.reload(location=mount, filename="backup_example.tar")
|
||||
|
||||
assert coresys.backups.list_backups
|
||||
assert (backup := coresys.backups.get("7fed74c8"))
|
||||
assert backup.location is None
|
||||
assert backup.locations == [None, "backup_test"]
|
||||
assert backup.all_locations == {".cloud_backup", None, "backup_test"}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("location", "folder"), [(None, "backup"), (".cloud_backup", "cloud_backup")]
|
||||
)
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data")
|
||||
async def test_partial_backup_complete_ws_message(
|
||||
coresys: CoreSys, ha_ws_client: AsyncMock, location: str | None, folder: str
|
||||
):
|
||||
"""Test WS message notifies core when a partial backup is complete."""
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
ha_ws_client.ha_version = AwesomeVersion("2024.12.0")
|
||||
|
||||
# Test a partial backup
|
||||
job, backup_task = coresys.jobs.schedule_job(
|
||||
coresys.backups.do_backup_partial,
|
||||
JobSchedulerOptions(),
|
||||
"test",
|
||||
folders=["media"],
|
||||
location=location,
|
||||
)
|
||||
backup: Backup = await backup_task
|
||||
|
||||
assert ha_ws_client.async_send_command.call_args_list[-3].args[0] == {
|
||||
"type": "backup/supervisor/backup_complete",
|
||||
"data": {
|
||||
"job_id": job.uuid,
|
||||
"slug": backup.slug,
|
||||
"path": f"/{folder}/{backup.slug}.tar",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("location", "folder"), [(None, "backup"), (".cloud_backup", "cloud_backup")]
|
||||
)
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data")
|
||||
async def test_full_backup_complete_ws_message(
|
||||
coresys: CoreSys, ha_ws_client: AsyncMock, location: str | None, folder: str
|
||||
):
|
||||
"""Test WS message notifies core when a full backup is complete."""
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
ha_ws_client.ha_version = AwesomeVersion("2024.12.0")
|
||||
|
||||
# Test a full backup
|
||||
job, backup_task = coresys.jobs.schedule_job(
|
||||
coresys.backups.do_backup_full, JobSchedulerOptions(), "test", location=location
|
||||
)
|
||||
backup: Backup = await backup_task
|
||||
|
||||
assert ha_ws_client.async_send_command.call_args_list[-3].args[0] == {
|
||||
"type": "backup/supervisor/backup_complete",
|
||||
"data": {
|
||||
"job_id": job.uuid,
|
||||
"slug": backup.slug,
|
||||
"path": f"/{folder}/{backup.slug}.tar",
|
||||
},
|
||||
}
|
||||
|
@ -418,6 +418,7 @@ async def tmp_supervisor_data(coresys: CoreSys, tmp_path: Path) -> Path:
|
||||
coresys.config.path_addons_data.mkdir(parents=True)
|
||||
coresys.config.path_addon_configs.mkdir(parents=True)
|
||||
coresys.config.path_ssl.mkdir()
|
||||
coresys.config.path_core_backup.mkdir(parents=True)
|
||||
yield tmp_path
|
||||
|
||||
|
||||
@ -579,7 +580,7 @@ def install_addon_example(coresys: CoreSys, repository):
|
||||
@pytest.fixture
|
||||
async def mock_full_backup(coresys: CoreSys, tmp_path) -> Backup:
|
||||
"""Mock a full backup."""
|
||||
mock_backup = Backup(coresys, Path(tmp_path, "test_backup"), "test")
|
||||
mock_backup = Backup(coresys, Path(tmp_path, "test_backup"), "test", None)
|
||||
mock_backup.new("Test", utcnow().isoformat(), BackupType.FULL)
|
||||
mock_backup.repositories = ["https://github.com/awesome-developer/awesome-repo"]
|
||||
mock_backup.docker = {}
|
||||
@ -603,7 +604,7 @@ async def mock_full_backup(coresys: CoreSys, tmp_path) -> Backup:
|
||||
@pytest.fixture
|
||||
async def mock_partial_backup(coresys: CoreSys, tmp_path) -> Backup:
|
||||
"""Mock a partial backup."""
|
||||
mock_backup = Backup(coresys, Path(tmp_path, "test_backup"), "test")
|
||||
mock_backup = Backup(coresys, Path(tmp_path, "test_backup"), "test", None)
|
||||
mock_backup.new("Test", utcnow().isoformat(), BackupType.PARTIAL)
|
||||
mock_backup.repositories = ["https://github.com/awesome-developer/awesome-repo"]
|
||||
mock_backup.docker = {}
|
||||
@ -634,7 +635,7 @@ async def backups(
|
||||
temp_tar = Path(tmp_path, f"{slug}.tar")
|
||||
with SecureTarFile(temp_tar, "w"):
|
||||
pass
|
||||
backup = Backup(coresys, temp_tar, slug)
|
||||
backup = Backup(coresys, temp_tar, slug, None)
|
||||
backup._data = { # pylint: disable=protected-access
|
||||
ATTR_SLUG: slug,
|
||||
ATTR_DATE: utcnow().isoformat(),
|
||||
|
@ -77,6 +77,19 @@ async def test_homeassistant_start(
|
||||
read_only=False,
|
||||
propagation="rslave",
|
||||
),
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_backup.as_posix(),
|
||||
target="/backup",
|
||||
read_only=False,
|
||||
propagation="rslave",
|
||||
),
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.config.path_extern_core_backup.as_posix(),
|
||||
target="/cloud_backup",
|
||||
read_only=False,
|
||||
),
|
||||
Mount(
|
||||
type="bind",
|
||||
source=coresys.homeassistant.path_extern_pulse.as_posix(),
|
||||
|
BIN
tests/fixtures/backup_example.tar
vendored
Normal file
BIN
tests/fixtures/backup_example.tar
vendored
Normal file
Binary file not shown.
@ -46,13 +46,14 @@ async def test_fixup(
|
||||
suggestions=[SuggestionType.EXECUTE_RELOAD, SuggestionType.EXECUTE_REMOVE],
|
||||
)
|
||||
|
||||
systemd_unit_service.active_state = ["active", "inactive"]
|
||||
systemd_unit_service.active_state = ["active", "inactive", "active", "inactive"]
|
||||
await mount_execute_remove()
|
||||
|
||||
assert coresys.resolution.issues == []
|
||||
assert coresys.resolution.suggestions == []
|
||||
assert coresys.mounts.mounts == []
|
||||
assert systemd_service.StopUnit.calls == [
|
||||
("mnt-data-supervisor-mounts-test.mount", "fail")
|
||||
("mnt-data-supervisor-backup-test.mount", "fail"),
|
||||
("mnt-data-supervisor-mounts-test.mount", "fail"),
|
||||
]
|
||||
coresys.mounts.save_data.assert_called_once()
|
||||
|
Loading…
x
Reference in New Issue
Block a user