mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-09-09 13:09:29 +00:00
Compare commits
64 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a390500309 | ||
![]() |
7c576da32c | ||
![]() |
6d021c1659 | ||
![]() |
37c1c89d44 | ||
![]() |
010043f116 | ||
![]() |
b1010c3c61 | ||
![]() |
7f0204bfc3 | ||
![]() |
a508cc5efd | ||
![]() |
65c90696d5 | ||
![]() |
b9f47898d6 | ||
![]() |
26f554e46a | ||
![]() |
b57889c84f | ||
![]() |
77fd1b4017 | ||
![]() |
ab6745bc99 | ||
![]() |
a5ea3cae72 | ||
![]() |
8bcd1b4efd | ||
![]() |
a24657e565 | ||
![]() |
b7721420fa | ||
![]() |
6c564fe4fd | ||
![]() |
012bfd7e6c | ||
![]() |
a70f81aa01 | ||
![]() |
1376a38de5 | ||
![]() |
1827ecda65 | ||
![]() |
994c981228 | ||
![]() |
5bbfbf44ae | ||
![]() |
ace58ba735 | ||
![]() |
f9840306a0 | ||
![]() |
322b3bbb4e | ||
![]() |
501318f468 | ||
![]() |
0234f38b23 | ||
![]() |
8743e0072f | ||
![]() |
a79e06afa7 | ||
![]() |
682b8e0535 | ||
![]() |
d70aa5f9a9 | ||
![]() |
1c815dcad1 | ||
![]() |
afa467a32b | ||
![]() |
274218d48e | ||
![]() |
7e73df26ab | ||
![]() |
ef8fc80c95 | ||
![]() |
05c39144e3 | ||
![]() |
f5cd35af47 | ||
![]() |
c69ecdafd0 | ||
![]() |
fa90c247ec | ||
![]() |
0cd7bd47bb | ||
![]() |
36d48d19fc | ||
![]() |
9322b68d47 | ||
![]() |
e11ff64b15 | ||
![]() |
3776dabfcf | ||
![]() |
d4e5831f0f | ||
![]() |
7b3b478e88 | ||
![]() |
f5afe13e91 | ||
![]() |
49ce468d83 | ||
![]() |
b26551c812 | ||
![]() |
394ba580d2 | ||
![]() |
2f7a54f5fd | ||
![]() |
360e085926 | ||
![]() |
042921925d | ||
![]() |
dcf024387b | ||
![]() |
e1232bc9e7 | ||
![]() |
d96598b5dd | ||
![]() |
2605f85668 | ||
![]() |
2c8e6ca0cd | ||
![]() |
0225f574be | ||
![]() |
34090bf2eb |
20
.github/workflows/builder.yml
vendored
20
.github/workflows/builder.yml
vendored
@@ -53,7 +53,7 @@ jobs:
|
||||
requirements: ${{ steps.requirements.outputs.changed }}
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
arch: ${{ fromJson(needs.init.outputs.architectures) }}
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -106,13 +106,13 @@ jobs:
|
||||
|
||||
- name: Build wheels
|
||||
if: needs.init.outputs.requirements == 'true'
|
||||
uses: home-assistant/wheels@2023.04.0
|
||||
uses: home-assistant/wheels@2023.10.5
|
||||
with:
|
||||
abi: cp311
|
||||
tag: musllinux_1_2
|
||||
arch: ${{ matrix.arch }}
|
||||
wheels-key: ${{ secrets.WHEELS_KEY }}
|
||||
apk: "libffi-dev;openssl-dev"
|
||||
apk: "libffi-dev;openssl-dev;yaml-dev"
|
||||
skip-binary: aiohttp
|
||||
env-file: true
|
||||
requirements: "requirements.txt"
|
||||
@@ -125,7 +125,7 @@ jobs:
|
||||
|
||||
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: actions/setup-python@v4.7.0
|
||||
uses: actions/setup-python@v4.7.1
|
||||
with:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
|
||||
@@ -149,7 +149,7 @@ jobs:
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: docker/login-action@v2.2.0
|
||||
uses: docker/login-action@v3.0.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -160,7 +160,7 @@ jobs:
|
||||
run: echo "BUILD_ARGS=--test" >> $GITHUB_ENV
|
||||
|
||||
- name: Build supervisor
|
||||
uses: home-assistant/builder@2023.08.0
|
||||
uses: home-assistant/builder@2023.09.0
|
||||
with:
|
||||
args: |
|
||||
$BUILD_ARGS \
|
||||
@@ -178,7 +178,7 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Initialize git
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
@@ -203,11 +203,11 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Build the Supervisor
|
||||
if: needs.init.outputs.publish != 'true'
|
||||
uses: home-assistant/builder@2023.08.0
|
||||
uses: home-assistant/builder@2023.09.0
|
||||
with:
|
||||
args: |
|
||||
--test \
|
||||
|
42
.github/workflows/ci.yaml
vendored
42
.github/workflows/ci.yaml
vendored
@@ -25,10 +25,10 @@ jobs:
|
||||
name: Prepare Python dependencies
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python
|
||||
id: python
|
||||
uses: actions/setup-python@v4.7.0
|
||||
uses: actions/setup-python@v4.7.1
|
||||
with:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
- name: Restore Python virtual environment
|
||||
@@ -66,9 +66,9 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.7.0
|
||||
uses: actions/setup-python@v4.7.1
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Register hadolint problem matcher
|
||||
run: |
|
||||
echo "::add-matcher::.github/workflows/matchers/hadolint.json"
|
||||
@@ -110,9 +110,9 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.7.0
|
||||
uses: actions/setup-python@v4.7.1
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -154,9 +154,9 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.7.0
|
||||
uses: actions/setup-python@v4.7.1
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -186,9 +186,9 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.7.0
|
||||
uses: actions/setup-python@v4.7.1
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -227,9 +227,9 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.7.0
|
||||
uses: actions/setup-python@v4.7.1
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -271,9 +271,9 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.7.0
|
||||
uses: actions/setup-python@v4.7.1
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -303,9 +303,9 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.7.0
|
||||
uses: actions/setup-python@v4.7.1
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -344,9 +344,9 @@ jobs:
|
||||
name: Run tests Python ${{ needs.prepare.outputs.python-version }}
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.7.0
|
||||
uses: actions/setup-python@v4.7.1
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@@ -402,9 +402,9 @@ jobs:
|
||||
needs: ["pytest", "prepare"]
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.7.0
|
||||
uses: actions/setup-python@v4.7.1
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
|
4
.github/workflows/release-drafter.yml
vendored
4
.github/workflows/release-drafter.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Release Drafter
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
echo "::set-output name=version::$datepre.$newpost"
|
||||
|
||||
- name: Run Release Drafter
|
||||
uses: release-drafter/release-drafter@v5.24.0
|
||||
uses: release-drafter/release-drafter@v5.25.0
|
||||
with:
|
||||
tag: ${{ steps.version.outputs.version }}
|
||||
name: ${{ steps.version.outputs.version }}
|
||||
|
2
.github/workflows/sentry.yaml
vendored
2
.github/workflows/sentry.yaml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.0.0
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Sentry Release
|
||||
uses: getsentry/action-release@v1.4.1
|
||||
env:
|
||||
|
@@ -28,7 +28,7 @@ repos:
|
||||
hooks:
|
||||
- id: isort
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.4.0
|
||||
rev: v3.15.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py310-plus]
|
||||
|
@@ -22,6 +22,7 @@ RUN \
|
||||
libpulse \
|
||||
musl \
|
||||
openssl \
|
||||
yaml \
|
||||
\
|
||||
&& curl -Lso /usr/bin/cosign "https://github.com/home-assistant/cosign/releases/download/${COSIGN_VERSION}/cosign_${BUILD_ARCH}" \
|
||||
&& chmod a+x /usr/bin/cosign
|
||||
|
@@ -1,5 +1,5 @@
|
||||
aiodns==3.0.0
|
||||
aiohttp==3.8.5
|
||||
aiodns==3.1.1
|
||||
aiohttp==3.8.6
|
||||
async_timeout==4.0.3
|
||||
atomicwrites-homeassistant==1.4.1
|
||||
attrs==23.1.0
|
||||
@@ -8,19 +8,19 @@ brotli==1.1.0
|
||||
ciso8601==2.3.0
|
||||
colorlog==6.7.0
|
||||
cpe==1.2.1
|
||||
cryptography==41.0.3
|
||||
debugpy==1.7.0
|
||||
cryptography==41.0.4
|
||||
debugpy==1.8.0
|
||||
deepmerge==1.1.0
|
||||
dirhash==0.2.1
|
||||
docker==6.1.3
|
||||
faust-cchardet==2.1.19
|
||||
gitpython==3.1.36
|
||||
gitpython==3.1.40
|
||||
jinja2==3.1.2
|
||||
pulsectl==23.5.2
|
||||
pyudev==0.24.1
|
||||
ruamel.yaml==0.17.21
|
||||
PyYAML==6.0.1
|
||||
securetar==2023.3.0
|
||||
sentry-sdk==1.30.0
|
||||
sentry-sdk==1.32.0
|
||||
voluptuous==0.13.1
|
||||
dbus-fast==2.2.0
|
||||
typing_extensions==4.7.1
|
||||
dbus-fast==2.11.1
|
||||
typing_extensions==4.8.0
|
||||
|
@@ -1,16 +1,16 @@
|
||||
black==23.9.1
|
||||
coverage==7.3.1
|
||||
black==23.10.0
|
||||
coverage==7.3.2
|
||||
flake8-docstrings==1.7.0
|
||||
flake8==6.1.0
|
||||
pre-commit==3.4.0
|
||||
pre-commit==3.5.0
|
||||
pydocstyle==6.3.0
|
||||
pylint==2.17.5
|
||||
pylint==3.0.2
|
||||
pytest-aiohttp==1.0.5
|
||||
pytest-asyncio==0.18.3
|
||||
pytest-cov==4.1.0
|
||||
pytest-timeout==2.1.0
|
||||
pytest-timeout==2.2.0
|
||||
pytest==7.4.2
|
||||
pyupgrade==3.10.1
|
||||
time-machine==2.12.0
|
||||
typing_extensions==4.7.1
|
||||
urllib3==2.0.4
|
||||
pyupgrade==3.15.0
|
||||
time-machine==2.13.0
|
||||
typing_extensions==4.8.0
|
||||
urllib3==2.0.7
|
||||
|
12
setup.py
12
setup.py
@@ -15,18 +15,6 @@ setup(
|
||||
"A maintainless private cloud operator system that"
|
||||
"setup a Home-Assistant instance. Based on HassOS"
|
||||
),
|
||||
classifiers=[
|
||||
"Intended Audience :: End Users/Desktop",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"Operating System :: OS Independent",
|
||||
"Topic :: Home Automation",
|
||||
"Topic :: Software Development :: Libraries :: Python Modules",
|
||||
"Topic :: Scientific/Engineering :: Atmospheric Science",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Intended Audience :: Developers",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
],
|
||||
keywords=["docker", "home-assistant", "api"],
|
||||
zip_safe=False,
|
||||
platforms="any",
|
||||
|
@@ -288,29 +288,40 @@ class AddonManager(CoreSysAttributes):
|
||||
)
|
||||
|
||||
# Update instance
|
||||
last_state: AddonState = addon.state
|
||||
old_image = addon.image
|
||||
# Cache data to prevent races with other updates to global
|
||||
store = store.clone()
|
||||
|
||||
try:
|
||||
await addon.instance.update(store.version, store.image)
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
_LOGGER.info("Add-on '%s' successfully updated", slug)
|
||||
self.data.update(store)
|
||||
# Stop the addon if running
|
||||
if (last_state := addon.state) in {AddonState.STARTED, AddonState.STARTUP}:
|
||||
await addon.stop()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerError):
|
||||
await addon.instance.cleanup(old_image=old_image)
|
||||
try:
|
||||
_LOGGER.info("Add-on '%s' successfully updated", slug)
|
||||
self.data.update(store)
|
||||
|
||||
# Setup/Fix AppArmor profile
|
||||
await addon.install_apparmor()
|
||||
# Cleanup
|
||||
with suppress(DockerError):
|
||||
await addon.instance.cleanup(
|
||||
old_image=old_image, image=store.image, version=store.version
|
||||
)
|
||||
|
||||
# restore state
|
||||
return (
|
||||
await addon.start()
|
||||
if last_state in [AddonState.STARTED, AddonState.STARTUP]
|
||||
else None
|
||||
)
|
||||
# Setup/Fix AppArmor profile
|
||||
await addon.install_apparmor()
|
||||
|
||||
finally:
|
||||
# restore state. Return awaitable for caller if no exception
|
||||
out = (
|
||||
await addon.start()
|
||||
if last_state in {AddonState.STARTED, AddonState.STARTUP}
|
||||
else None
|
||||
)
|
||||
return out
|
||||
|
||||
@Job(
|
||||
name="addon_manager_rebuild",
|
||||
@@ -389,9 +400,11 @@ class AddonManager(CoreSysAttributes):
|
||||
if slug not in self.local:
|
||||
_LOGGER.debug("Add-on %s is not local available for restore", slug)
|
||||
addon = Addon(self.coresys, slug)
|
||||
had_ingress = False
|
||||
else:
|
||||
_LOGGER.debug("Add-on %s is local available for restore", slug)
|
||||
addon = self.local[slug]
|
||||
had_ingress = addon.ingress_panel
|
||||
|
||||
wait_for_start = await addon.restore(tar_file)
|
||||
|
||||
@@ -401,7 +414,7 @@ class AddonManager(CoreSysAttributes):
|
||||
self.local[slug] = addon
|
||||
|
||||
# Update ingress
|
||||
if addon.with_ingress:
|
||||
if had_ingress != addon.ingress_panel:
|
||||
await self.sys_ingress.reload()
|
||||
with suppress(HomeAssistantAPIError):
|
||||
await self.sys_ingress.update_hass_panel(addon)
|
||||
|
@@ -662,7 +662,7 @@ class Addon(AddonModel):
|
||||
await asyncio.wait_for(self._startup_task, STARTUP_TIMEOUT)
|
||||
except asyncio.TimeoutError:
|
||||
_LOGGER.warning(
|
||||
"Timeout while waiting for addon %s to start, took more then %s seconds",
|
||||
"Timeout while waiting for addon %s to start, took more than %s seconds",
|
||||
self.name,
|
||||
STARTUP_TIMEOUT,
|
||||
)
|
||||
@@ -782,10 +782,7 @@ class Addon(AddonModel):
|
||||
|
||||
if self.backup_mode == AddonBackupMode.COLD:
|
||||
_LOGGER.info("Shutdown add-on %s for cold backup", self.slug)
|
||||
try:
|
||||
await self.instance.stop()
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
await self.stop()
|
||||
|
||||
elif self.backup_pre is not None:
|
||||
await self._backup_command(self.backup_pre)
|
||||
@@ -933,64 +930,67 @@ class Addon(AddonModel):
|
||||
|
||||
# Stop it first if its running
|
||||
if await self.instance.is_running():
|
||||
with suppress(DockerError):
|
||||
await self.instance.stop()
|
||||
await self.stop()
|
||||
|
||||
# Check version / restore image
|
||||
version = data[ATTR_VERSION]
|
||||
if not await self.instance.exists():
|
||||
_LOGGER.info("Restore/Install of image for addon %s", self.slug)
|
||||
|
||||
image_file = Path(temp, "image.tar")
|
||||
if image_file.is_file():
|
||||
with suppress(DockerError):
|
||||
await self.instance.import_image(image_file)
|
||||
else:
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(version, restore_image)
|
||||
await self.instance.cleanup()
|
||||
elif self.instance.version != version or self.legacy:
|
||||
_LOGGER.info("Restore/Update of image for addon %s", self.slug)
|
||||
with suppress(DockerError):
|
||||
await self.instance.update(version, restore_image)
|
||||
|
||||
# Restore data
|
||||
def _restore_data():
|
||||
"""Restore data."""
|
||||
temp_data = Path(temp, "data")
|
||||
if temp_data.is_dir():
|
||||
shutil.copytree(temp_data, self.path_data, symlinks=True)
|
||||
else:
|
||||
self.path_data.mkdir()
|
||||
|
||||
_LOGGER.info("Restoring data for addon %s", self.slug)
|
||||
if self.path_data.is_dir():
|
||||
await remove_data(self.path_data)
|
||||
try:
|
||||
await self.sys_run_in_executor(_restore_data)
|
||||
except shutil.Error as err:
|
||||
raise AddonsError(
|
||||
f"Can't restore origin data: {err}", _LOGGER.error
|
||||
) from err
|
||||
# Check version / restore image
|
||||
version = data[ATTR_VERSION]
|
||||
if not await self.instance.exists():
|
||||
_LOGGER.info("Restore/Install of image for addon %s", self.slug)
|
||||
|
||||
# Restore AppArmor
|
||||
profile_file = Path(temp, "apparmor.txt")
|
||||
if profile_file.exists():
|
||||
image_file = Path(temp, "image.tar")
|
||||
if image_file.is_file():
|
||||
with suppress(DockerError):
|
||||
await self.instance.import_image(image_file)
|
||||
else:
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(version, restore_image)
|
||||
await self.instance.cleanup()
|
||||
elif self.instance.version != version or self.legacy:
|
||||
_LOGGER.info("Restore/Update of image for addon %s", self.slug)
|
||||
with suppress(DockerError):
|
||||
await self.instance.update(version, restore_image)
|
||||
|
||||
# Restore data
|
||||
def _restore_data():
|
||||
"""Restore data."""
|
||||
temp_data = Path(temp, "data")
|
||||
if temp_data.is_dir():
|
||||
shutil.copytree(temp_data, self.path_data, symlinks=True)
|
||||
else:
|
||||
self.path_data.mkdir()
|
||||
|
||||
_LOGGER.info("Restoring data for addon %s", self.slug)
|
||||
if self.path_data.is_dir():
|
||||
await remove_data(self.path_data)
|
||||
try:
|
||||
await self.sys_host.apparmor.load_profile(self.slug, profile_file)
|
||||
except HostAppArmorError as err:
|
||||
_LOGGER.error(
|
||||
"Can't restore AppArmor profile for add-on %s", self.slug
|
||||
)
|
||||
raise AddonsError() from err
|
||||
await self.sys_run_in_executor(_restore_data)
|
||||
except shutil.Error as err:
|
||||
raise AddonsError(
|
||||
f"Can't restore origin data: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
# Is add-on loaded
|
||||
if not self.loaded:
|
||||
await self.load()
|
||||
# Restore AppArmor
|
||||
profile_file = Path(temp, "apparmor.txt")
|
||||
if profile_file.exists():
|
||||
try:
|
||||
await self.sys_host.apparmor.load_profile(
|
||||
self.slug, profile_file
|
||||
)
|
||||
except HostAppArmorError as err:
|
||||
_LOGGER.error(
|
||||
"Can't restore AppArmor profile for add-on %s", self.slug
|
||||
)
|
||||
raise AddonsError() from err
|
||||
|
||||
# Run add-on
|
||||
if data[ATTR_STATE] == AddonState.STARTED:
|
||||
wait_for_start = await self.start()
|
||||
# Is add-on loaded
|
||||
if not self.loaded:
|
||||
await self.load()
|
||||
|
||||
finally:
|
||||
# Run add-on
|
||||
if data[ATTR_STATE] == AddonState.STARTED:
|
||||
wait_for_start = await self.start()
|
||||
|
||||
_LOGGER.info("Finished restore for add-on %s", self.slug)
|
||||
return wait_for_start
|
||||
|
@@ -186,6 +186,8 @@ class RestAPI(CoreSysAttributes):
|
||||
# Boards endpoints
|
||||
self.webapp.add_routes(
|
||||
[
|
||||
web.get("/os/boards/green", api_os.boards_green_info),
|
||||
web.post("/os/boards/green", api_os.boards_green_options),
|
||||
web.get("/os/boards/yellow", api_os.boards_yellow_info),
|
||||
web.post("/os/boards/yellow", api_os.boards_yellow_options),
|
||||
web.get("/os/boards/{board}", api_os.boards_other_info),
|
||||
|
@@ -20,6 +20,7 @@ from ..const import (
|
||||
ATTR_DAYS_UNTIL_STALE,
|
||||
ATTR_FOLDERS,
|
||||
ATTR_HOMEASSISTANT,
|
||||
ATTR_HOMEASSISTANT_EXCLUDE_DATABASE,
|
||||
ATTR_LOCATON,
|
||||
ATTR_NAME,
|
||||
ATTR_PASSWORD,
|
||||
@@ -64,6 +65,7 @@ SCHEMA_BACKUP_FULL = vol.Schema(
|
||||
vol.Optional(ATTR_PASSWORD): vol.Maybe(str),
|
||||
vol.Optional(ATTR_COMPRESSED): vol.Maybe(vol.Boolean()),
|
||||
vol.Optional(ATTR_LOCATON): vol.Maybe(str),
|
||||
vol.Optional(ATTR_HOMEASSISTANT_EXCLUDE_DATABASE): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -184,6 +186,7 @@ class APIBackups(CoreSysAttributes):
|
||||
ATTR_ADDONS: data_addons,
|
||||
ATTR_REPOSITORIES: backup.repositories,
|
||||
ATTR_FOLDERS: backup.folders,
|
||||
ATTR_HOMEASSISTANT_EXCLUDE_DATABASE: backup.homeassistant_exclude_database,
|
||||
}
|
||||
|
||||
def _location_to_mount(self, body: dict[str, Any]) -> dict[str, Any]:
|
||||
|
@@ -23,7 +23,6 @@ ATTR_CONNECTION_BUS = "connection_bus"
|
||||
ATTR_DATA_DISK = "data_disk"
|
||||
ATTR_DEVICE = "device"
|
||||
ATTR_DEV_PATH = "dev_path"
|
||||
ATTR_DISK_LED = "disk_led"
|
||||
ATTR_DISKS = "disks"
|
||||
ATTR_DRIVES = "drives"
|
||||
ATTR_DT_SYNCHRONIZED = "dt_synchronized"
|
||||
@@ -31,7 +30,6 @@ ATTR_DT_UTC = "dt_utc"
|
||||
ATTR_EJECTABLE = "ejectable"
|
||||
ATTR_FALLBACK = "fallback"
|
||||
ATTR_FILESYSTEMS = "filesystems"
|
||||
ATTR_HEARTBEAT_LED = "heartbeat_led"
|
||||
ATTR_IDENTIFIERS = "identifiers"
|
||||
ATTR_JOBS = "jobs"
|
||||
ATTR_LLMNR = "llmnr"
|
||||
@@ -41,7 +39,6 @@ ATTR_MODEL = "model"
|
||||
ATTR_MOUNTS = "mounts"
|
||||
ATTR_MOUNT_POINTS = "mount_points"
|
||||
ATTR_PANEL_PATH = "panel_path"
|
||||
ATTR_POWER_LED = "power_led"
|
||||
ATTR_REMOVABLE = "removable"
|
||||
ATTR_REVISION = "revision"
|
||||
ATTR_SEAT = "seat"
|
||||
@@ -49,6 +46,7 @@ ATTR_SIGNED = "signed"
|
||||
ATTR_STARTUP_TIME = "startup_time"
|
||||
ATTR_SUBSYSTEM = "subsystem"
|
||||
ATTR_SYSFS = "sysfs"
|
||||
ATTR_SYSTEM_HEALTH_LED = "system_health_led"
|
||||
ATTR_TIME_DETECTED = "time_detected"
|
||||
ATTR_UPDATE_TYPE = "update_type"
|
||||
ATTR_USE_NTP = "use_ntp"
|
||||
|
@@ -12,6 +12,7 @@ from ..const import (
|
||||
ATTR_AUDIO_INPUT,
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_BACKUP,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE,
|
||||
ATTR_BLK_READ,
|
||||
ATTR_BLK_WRITE,
|
||||
ATTR_BOOT,
|
||||
@@ -51,6 +52,7 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
vol.Optional(ATTR_REFRESH_TOKEN): vol.Maybe(str),
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(str),
|
||||
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
|
||||
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -82,6 +84,7 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
ATTR_WATCHDOG: self.sys_homeassistant.watchdog,
|
||||
ATTR_AUDIO_INPUT: self.sys_homeassistant.audio_input,
|
||||
ATTR_AUDIO_OUTPUT: self.sys_homeassistant.audio_output,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE: self.sys_homeassistant.backups_exclude_database,
|
||||
}
|
||||
|
||||
@api_process
|
||||
@@ -113,6 +116,11 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
if ATTR_AUDIO_OUTPUT in body:
|
||||
self.sys_homeassistant.audio_output = body[ATTR_AUDIO_OUTPUT]
|
||||
|
||||
if ATTR_BACKUPS_EXCLUDE_DATABASE in body:
|
||||
self.sys_homeassistant.backups_exclude_database = body[
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE
|
||||
]
|
||||
|
||||
self.sys_homeassistant.save_data()
|
||||
|
||||
@api_process
|
||||
|
@@ -8,11 +8,15 @@ from aiohttp import web
|
||||
import voluptuous as vol
|
||||
|
||||
from ..const import (
|
||||
ATTR_ACTIVITY_LED,
|
||||
ATTR_BOARD,
|
||||
ATTR_BOOT,
|
||||
ATTR_DEVICES,
|
||||
ATTR_DISK_LED,
|
||||
ATTR_HEARTBEAT_LED,
|
||||
ATTR_ID,
|
||||
ATTR_NAME,
|
||||
ATTR_POWER_LED,
|
||||
ATTR_SERIAL,
|
||||
ATTR_SIZE,
|
||||
ATTR_UPDATE_AVAILABLE,
|
||||
@@ -27,21 +31,19 @@ from .const import (
|
||||
ATTR_DATA_DISK,
|
||||
ATTR_DEV_PATH,
|
||||
ATTR_DEVICE,
|
||||
ATTR_DISK_LED,
|
||||
ATTR_DISKS,
|
||||
ATTR_HEARTBEAT_LED,
|
||||
ATTR_MODEL,
|
||||
ATTR_POWER_LED,
|
||||
ATTR_SYSTEM_HEALTH_LED,
|
||||
ATTR_VENDOR,
|
||||
)
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): version_tag})
|
||||
SCHEMA_DISK = vol.Schema({vol.Required(ATTR_DEVICE): str})
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_YELLOW_OPTIONS = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_DISK_LED): vol.Boolean(),
|
||||
@@ -49,6 +51,14 @@ SCHEMA_YELLOW_OPTIONS = vol.Schema(
|
||||
vol.Optional(ATTR_POWER_LED): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
SCHEMA_GREEN_OPTIONS = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_ACTIVITY_LED): vol.Boolean(),
|
||||
vol.Optional(ATTR_POWER_LED): vol.Boolean(),
|
||||
vol.Optional(ATTR_SYSTEM_HEALTH_LED): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
# pylint: enable=no-value-for-parameter
|
||||
|
||||
|
||||
class APIOS(CoreSysAttributes):
|
||||
@@ -105,6 +115,31 @@ class APIOS(CoreSysAttributes):
|
||||
],
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def boards_green_info(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Get green board settings."""
|
||||
return {
|
||||
ATTR_ACTIVITY_LED: self.sys_dbus.agent.board.green.activity_led,
|
||||
ATTR_POWER_LED: self.sys_dbus.agent.board.green.power_led,
|
||||
ATTR_SYSTEM_HEALTH_LED: self.sys_dbus.agent.board.green.user_led,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def boards_green_options(self, request: web.Request) -> None:
|
||||
"""Update green board settings."""
|
||||
body = await api_validate(SCHEMA_GREEN_OPTIONS, request)
|
||||
|
||||
if ATTR_ACTIVITY_LED in body:
|
||||
self.sys_dbus.agent.board.green.activity_led = body[ATTR_ACTIVITY_LED]
|
||||
|
||||
if ATTR_POWER_LED in body:
|
||||
self.sys_dbus.agent.board.green.power_led = body[ATTR_POWER_LED]
|
||||
|
||||
if ATTR_SYSTEM_HEALTH_LED in body:
|
||||
self.sys_dbus.agent.board.green.user_led = body[ATTR_SYSTEM_HEALTH_LED]
|
||||
|
||||
self.sys_dbus.agent.board.green.save_data()
|
||||
|
||||
@api_process
|
||||
async def boards_yellow_info(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Get yellow board settings."""
|
||||
@@ -128,6 +163,7 @@ class APIOS(CoreSysAttributes):
|
||||
if ATTR_POWER_LED in body:
|
||||
self.sys_dbus.agent.board.yellow.power_led = body[ATTR_POWER_LED]
|
||||
|
||||
self.sys_dbus.agent.board.yellow.save_data()
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.REBOOT_REQUIRED,
|
||||
ContextType.SYSTEM,
|
||||
|
@@ -6,7 +6,10 @@ import logging
|
||||
import aiohttp
|
||||
from aiohttp import web
|
||||
from aiohttp.client_exceptions import ClientConnectorError
|
||||
from aiohttp.client_ws import ClientWebSocketResponse
|
||||
from aiohttp.hdrs import AUTHORIZATION, CONTENT_TYPE
|
||||
from aiohttp.http import WSMessage
|
||||
from aiohttp.http_websocket import WSMsgType
|
||||
from aiohttp.web_exceptions import HTTPBadGateway, HTTPUnauthorized
|
||||
|
||||
from ..coresys import CoreSysAttributes
|
||||
@@ -114,7 +117,7 @@ class APIProxy(CoreSysAttributes):
|
||||
body=data, status=client.status, content_type=client.content_type
|
||||
)
|
||||
|
||||
async def _websocket_client(self):
|
||||
async def _websocket_client(self) -> ClientWebSocketResponse:
|
||||
"""Initialize a WebSocket API connection."""
|
||||
url = f"{self.sys_homeassistant.api_url}/api/websocket"
|
||||
|
||||
@@ -167,6 +170,25 @@ class APIProxy(CoreSysAttributes):
|
||||
|
||||
raise APIError()
|
||||
|
||||
async def _proxy_message(
|
||||
self,
|
||||
read_task: asyncio.Task,
|
||||
target: web.WebSocketResponse | ClientWebSocketResponse,
|
||||
) -> None:
|
||||
"""Proxy a message from client to server or vice versa."""
|
||||
if read_task.exception():
|
||||
raise read_task.exception()
|
||||
|
||||
msg: WSMessage = read_task.result()
|
||||
if msg.type == WSMsgType.TEXT:
|
||||
return await target.send_str(msg.data)
|
||||
if msg.type == WSMsgType.BINARY:
|
||||
return await target.send_bytes(msg.data)
|
||||
|
||||
raise TypeError(
|
||||
f"Cannot proxy websocket message of unsupported type: {msg.type}"
|
||||
)
|
||||
|
||||
async def websocket(self, request: web.Request):
|
||||
"""Initialize a WebSocket API connection."""
|
||||
if not await self.sys_homeassistant.api.check_api_state():
|
||||
@@ -214,13 +236,13 @@ class APIProxy(CoreSysAttributes):
|
||||
|
||||
_LOGGER.info("Home Assistant WebSocket API request running")
|
||||
try:
|
||||
client_read = None
|
||||
server_read = None
|
||||
client_read: asyncio.Task | None = None
|
||||
server_read: asyncio.Task | None = None
|
||||
while not server.closed and not client.closed:
|
||||
if not client_read:
|
||||
client_read = self.sys_create_task(client.receive_str())
|
||||
client_read = self.sys_create_task(client.receive())
|
||||
if not server_read:
|
||||
server_read = self.sys_create_task(server.receive_str())
|
||||
server_read = self.sys_create_task(server.receive())
|
||||
|
||||
# wait until data need to be processed
|
||||
await asyncio.wait(
|
||||
@@ -229,14 +251,12 @@ class APIProxy(CoreSysAttributes):
|
||||
|
||||
# server
|
||||
if server_read.done() and not client.closed:
|
||||
server_read.exception()
|
||||
await client.send_str(server_read.result())
|
||||
await self._proxy_message(server_read, client)
|
||||
server_read = None
|
||||
|
||||
# client
|
||||
if client_read.done() and not server.closed:
|
||||
client_read.exception()
|
||||
await server.send_str(client_read.result())
|
||||
await self._proxy_message(client_read, server)
|
||||
client_read = None
|
||||
|
||||
except asyncio.CancelledError:
|
||||
@@ -246,9 +266,9 @@ class APIProxy(CoreSysAttributes):
|
||||
_LOGGER.info("Home Assistant WebSocket API error: %s", err)
|
||||
|
||||
finally:
|
||||
if client_read:
|
||||
if client_read and not client_read.done():
|
||||
client_read.cancel()
|
||||
if server_read:
|
||||
if server_read and not server_read.done():
|
||||
server_read.cancel()
|
||||
|
||||
# close connections
|
||||
|
@@ -186,12 +186,14 @@ class APIStore(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def addons_list(self, request: web.Request) -> list[dict[str, Any]]:
|
||||
async def addons_list(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return all store add-ons."""
|
||||
return [
|
||||
self._generate_addon_information(self.sys_addons.store[addon])
|
||||
for addon in self.sys_addons.store
|
||||
]
|
||||
return {
|
||||
ATTR_ADDONS: [
|
||||
self._generate_addon_information(self.sys_addons.store[addon])
|
||||
for addon in self.sys_addons.store
|
||||
]
|
||||
}
|
||||
|
||||
@api_process
|
||||
def addons_addon_install(self, request: web.Request) -> Awaitable[None]:
|
||||
|
@@ -26,6 +26,7 @@ from ..const import (
|
||||
ATTR_CRYPTO,
|
||||
ATTR_DATE,
|
||||
ATTR_DOCKER,
|
||||
ATTR_EXCLUDE_DATABASE,
|
||||
ATTR_FOLDERS,
|
||||
ATTR_HOMEASSISTANT,
|
||||
ATTR_NAME,
|
||||
@@ -130,7 +131,14 @@ class Backup(CoreSysAttributes):
|
||||
"""Return backup Home Assistant version."""
|
||||
if self.homeassistant is None:
|
||||
return None
|
||||
return self._data[ATTR_HOMEASSISTANT][ATTR_VERSION]
|
||||
return self.homeassistant[ATTR_VERSION]
|
||||
|
||||
@property
|
||||
def homeassistant_exclude_database(self) -> bool:
|
||||
"""Return whether database was excluded from Home Assistant backup."""
|
||||
if self.homeassistant is None:
|
||||
return None
|
||||
return self.homeassistant[ATTR_EXCLUDE_DATABASE]
|
||||
|
||||
@property
|
||||
def homeassistant(self):
|
||||
@@ -539,9 +547,12 @@ class Backup(CoreSysAttributes):
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.warning("Can't restore folder %s: %s", folder, err)
|
||||
|
||||
async def store_homeassistant(self):
|
||||
"""Backup Home Assitant Core configuration folder."""
|
||||
self._data[ATTR_HOMEASSISTANT] = {ATTR_VERSION: self.sys_homeassistant.version}
|
||||
async def store_homeassistant(self, exclude_database: bool = False):
|
||||
"""Backup Home Assistant Core configuration folder."""
|
||||
self._data[ATTR_HOMEASSISTANT] = {
|
||||
ATTR_VERSION: self.sys_homeassistant.version,
|
||||
ATTR_EXCLUDE_DATABASE: exclude_database,
|
||||
}
|
||||
|
||||
# Backup Home Assistant Core config directory
|
||||
tar_name = Path(
|
||||
@@ -551,13 +562,13 @@ class Backup(CoreSysAttributes):
|
||||
tar_name, "w", key=self._key, gzip=self.compressed, bufsize=BUF_SIZE
|
||||
)
|
||||
|
||||
await self.sys_homeassistant.backup(homeassistant_file)
|
||||
await self.sys_homeassistant.backup(homeassistant_file, exclude_database)
|
||||
|
||||
# Store size
|
||||
self.homeassistant[ATTR_SIZE] = homeassistant_file.size
|
||||
|
||||
async def restore_homeassistant(self) -> Awaitable[None]:
|
||||
"""Restore Home Assitant Core configuration folder."""
|
||||
"""Restore Home Assistant Core configuration folder."""
|
||||
await self.sys_homeassistant.core.stop()
|
||||
|
||||
# Restore Home Assistant Core config directory
|
||||
@@ -568,7 +579,9 @@ class Backup(CoreSysAttributes):
|
||||
tar_name, "r", key=self._key, gzip=self.compressed, bufsize=BUF_SIZE
|
||||
)
|
||||
|
||||
await self.sys_homeassistant.restore(homeassistant_file)
|
||||
await self.sys_homeassistant.restore(
|
||||
homeassistant_file, self.homeassistant_exclude_database
|
||||
)
|
||||
|
||||
# Generate restore task
|
||||
async def _core_update():
|
||||
|
@@ -226,6 +226,7 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
addon_list: list[Addon],
|
||||
folder_list: list[str],
|
||||
homeassistant: bool,
|
||||
homeassistant_exclude_database: bool | None,
|
||||
) -> Backup | None:
|
||||
"""Create a backup.
|
||||
|
||||
@@ -245,7 +246,11 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
# HomeAssistant Folder is for v1
|
||||
if homeassistant:
|
||||
self._change_stage(BackupJobStage.HOME_ASSISTANT, backup)
|
||||
await backup.store_homeassistant()
|
||||
await backup.store_homeassistant(
|
||||
self.sys_homeassistant.backups_exclude_database
|
||||
if homeassistant_exclude_database is None
|
||||
else homeassistant_exclude_database
|
||||
)
|
||||
|
||||
# Backup folders
|
||||
if folder_list:
|
||||
@@ -272,7 +277,7 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
|
||||
@Job(
|
||||
name="backup_manager_full_backup",
|
||||
conditions=[JobCondition.FREE_SPACE, JobCondition.RUNNING],
|
||||
conditions=[JobCondition.RUNNING],
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=BackupJobError,
|
||||
)
|
||||
@@ -282,15 +287,25 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
password: str | None = None,
|
||||
compressed: bool = True,
|
||||
location: Mount | type[DEFAULT] | None = DEFAULT,
|
||||
homeassistant_exclude_database: bool | None = None,
|
||||
) -> Backup | None:
|
||||
"""Create a full backup."""
|
||||
if self._get_base_path(location) == self.sys_config.path_backup:
|
||||
await Job.check_conditions(
|
||||
self, {JobCondition.FREE_SPACE}, "BackupManager.do_backup_full"
|
||||
)
|
||||
|
||||
backup = self._create_backup(
|
||||
name, BackupType.FULL, password, compressed, location
|
||||
)
|
||||
|
||||
_LOGGER.info("Creating new full backup with slug %s", backup.slug)
|
||||
backup = await self._do_backup(
|
||||
backup, self.sys_addons.installed, ALL_FOLDERS, True
|
||||
backup,
|
||||
self.sys_addons.installed,
|
||||
ALL_FOLDERS,
|
||||
True,
|
||||
homeassistant_exclude_database,
|
||||
)
|
||||
if backup:
|
||||
_LOGGER.info("Creating full backup with slug %s completed", backup.slug)
|
||||
@@ -298,7 +313,7 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
|
||||
@Job(
|
||||
name="backup_manager_partial_backup",
|
||||
conditions=[JobCondition.FREE_SPACE, JobCondition.RUNNING],
|
||||
conditions=[JobCondition.RUNNING],
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=BackupJobError,
|
||||
)
|
||||
@@ -311,8 +326,14 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
homeassistant: bool = False,
|
||||
compressed: bool = True,
|
||||
location: Mount | type[DEFAULT] | None = DEFAULT,
|
||||
homeassistant_exclude_database: bool | None = None,
|
||||
) -> Backup | None:
|
||||
"""Create a partial backup."""
|
||||
if self._get_base_path(location) == self.sys_config.path_backup:
|
||||
await Job.check_conditions(
|
||||
self, {JobCondition.FREE_SPACE}, "BackupManager.do_backup_partial"
|
||||
)
|
||||
|
||||
addons = addons or []
|
||||
folders = folders or []
|
||||
|
||||
@@ -337,7 +358,9 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
continue
|
||||
_LOGGER.warning("Add-on %s not found/installed", addon_slug)
|
||||
|
||||
backup = await self._do_backup(backup, addon_list, folders, homeassistant)
|
||||
backup = await self._do_backup(
|
||||
backup, addon_list, folders, homeassistant, homeassistant_exclude_database
|
||||
)
|
||||
if backup:
|
||||
_LOGGER.info("Creating partial backup with slug %s completed", backup.slug)
|
||||
return backup
|
||||
|
@@ -14,6 +14,7 @@ from ..const import (
|
||||
ATTR_DATE,
|
||||
ATTR_DAYS_UNTIL_STALE,
|
||||
ATTR_DOCKER,
|
||||
ATTR_EXCLUDE_DATABASE,
|
||||
ATTR_FOLDERS,
|
||||
ATTR_HOMEASSISTANT,
|
||||
ATTR_NAME,
|
||||
@@ -103,6 +104,9 @@ SCHEMA_BACKUP = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_VERSION): version_tag,
|
||||
vol.Optional(ATTR_SIZE, default=0): vol.Coerce(float),
|
||||
vol.Optional(
|
||||
ATTR_EXCLUDE_DATABASE, default=False
|
||||
): vol.Boolean(),
|
||||
},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
)
|
||||
|
@@ -19,6 +19,7 @@ SUPERVISOR_DATA = Path("/data")
|
||||
FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, "addons.json")
|
||||
FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, "auth.json")
|
||||
FILE_HASSIO_BACKUPS = Path(SUPERVISOR_DATA, "backups.json")
|
||||
FILE_HASSIO_BOARD = Path(SUPERVISOR_DATA, "board.json")
|
||||
FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, "config.json")
|
||||
FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, "discovery.json")
|
||||
FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, "docker.json")
|
||||
@@ -88,6 +89,7 @@ REQUEST_FROM = "HASSIO_FROM"
|
||||
ATTR_ACCESS_TOKEN = "access_token"
|
||||
ATTR_ACCESSPOINTS = "accesspoints"
|
||||
ATTR_ACTIVE = "active"
|
||||
ATTR_ACTIVITY_LED = "activity_led"
|
||||
ATTR_ADDON = "addon"
|
||||
ATTR_ADDONS = "addons"
|
||||
ATTR_ADDONS_CUSTOM_LIST = "addons_custom_list"
|
||||
@@ -113,6 +115,7 @@ ATTR_BACKUP_EXCLUDE = "backup_exclude"
|
||||
ATTR_BACKUP_POST = "backup_post"
|
||||
ATTR_BACKUP_PRE = "backup_pre"
|
||||
ATTR_BACKUPS = "backups"
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE = "backups_exclude_database"
|
||||
ATTR_BLK_READ = "blk_read"
|
||||
ATTR_BLK_WRITE = "blk_write"
|
||||
ATTR_BOARD = "board"
|
||||
@@ -152,6 +155,7 @@ ATTR_DIAGNOSTICS = "diagnostics"
|
||||
ATTR_DISCOVERY = "discovery"
|
||||
ATTR_DISK = "disk"
|
||||
ATTR_DISK_FREE = "disk_free"
|
||||
ATTR_DISK_LED = "disk_led"
|
||||
ATTR_DISK_LIFE_TIME = "disk_life_time"
|
||||
ATTR_DISK_TOTAL = "disk_total"
|
||||
ATTR_DISK_USED = "disk_used"
|
||||
@@ -164,6 +168,7 @@ ATTR_ENABLE = "enable"
|
||||
ATTR_ENABLED = "enabled"
|
||||
ATTR_ENVIRONMENT = "environment"
|
||||
ATTR_EVENT = "event"
|
||||
ATTR_EXCLUDE_DATABASE = "exclude_database"
|
||||
ATTR_FEATURES = "features"
|
||||
ATTR_FILENAME = "filename"
|
||||
ATTR_FLAGS = "flags"
|
||||
@@ -177,7 +182,9 @@ ATTR_HASSIO_API = "hassio_api"
|
||||
ATTR_HASSIO_ROLE = "hassio_role"
|
||||
ATTR_HASSOS = "hassos"
|
||||
ATTR_HEALTHY = "healthy"
|
||||
ATTR_HEARTBEAT_LED = "heartbeat_led"
|
||||
ATTR_HOMEASSISTANT = "homeassistant"
|
||||
ATTR_HOMEASSISTANT_EXCLUDE_DATABASE = "homeassistant_exclude_database"
|
||||
ATTR_HOMEASSISTANT_API = "homeassistant_api"
|
||||
ATTR_HOST = "host"
|
||||
ATTR_HOST_DBUS = "host_dbus"
|
||||
@@ -252,6 +259,7 @@ ATTR_PLUGINS = "plugins"
|
||||
ATTR_PORT = "port"
|
||||
ATTR_PORTS = "ports"
|
||||
ATTR_PORTS_DESCRIPTION = "ports_description"
|
||||
ATTR_POWER_LED = "power_led"
|
||||
ATTR_PREFIX = "prefix"
|
||||
ATTR_PRIMARY = "primary"
|
||||
ATTR_PRIORITY = "priority"
|
||||
@@ -315,6 +323,7 @@ ATTR_UPDATE_KEY = "update_key"
|
||||
ATTR_URL = "url"
|
||||
ATTR_USB = "usb"
|
||||
ATTR_USER = "user"
|
||||
ATTR_USER_LED = "user_led"
|
||||
ATTR_USERNAME = "username"
|
||||
ATTR_UUID = "uuid"
|
||||
ATTR_VALID = "valid"
|
||||
|
@@ -250,7 +250,7 @@ class Core(CoreSysAttributes):
|
||||
except HomeAssistantError as err:
|
||||
capture_exception(err)
|
||||
else:
|
||||
_LOGGER.info("Skiping start of Home Assistant")
|
||||
_LOGGER.info("Skipping start of Home Assistant")
|
||||
|
||||
# Core is not running
|
||||
if self.sys_homeassistant.core.error_state:
|
||||
|
@@ -725,7 +725,7 @@ class CoreSysAttributes:
|
||||
def sys_run_in_executor(
|
||||
self, funct: Callable[..., T], *args: tuple[Any], **kwargs: dict[str, Any]
|
||||
) -> Coroutine[Any, Any, T]:
|
||||
"""Add an job to the executor pool."""
|
||||
"""Add a job to the executor pool."""
|
||||
return self.coresys.run_in_executor(funct, *args, **kwargs)
|
||||
|
||||
def sys_create_task(self, coroutine: Coroutine) -> asyncio.Task:
|
||||
|
@@ -11,7 +11,8 @@ from ...const import (
|
||||
DBUS_OBJECT_HAOS_BOARDS,
|
||||
)
|
||||
from ...interface import DBusInterfaceProxy, dbus_property
|
||||
from .const import BOARD_NAME_SUPERVISED, BOARD_NAME_YELLOW
|
||||
from .const import BOARD_NAME_GREEN, BOARD_NAME_SUPERVISED, BOARD_NAME_YELLOW
|
||||
from .green import Green
|
||||
from .interface import BoardProxy
|
||||
from .supervised import Supervised
|
||||
from .yellow import Yellow
|
||||
@@ -39,6 +40,14 @@ class BoardManager(DBusInterfaceProxy):
|
||||
"""Get board name."""
|
||||
return self.properties[DBUS_ATTR_BOARD]
|
||||
|
||||
@property
|
||||
def green(self) -> Green:
|
||||
"""Get Green board."""
|
||||
if self.board != BOARD_NAME_GREEN:
|
||||
raise BoardInvalidError("Green board is not in use", _LOGGER.error)
|
||||
|
||||
return self._board_proxy
|
||||
|
||||
@property
|
||||
def supervised(self) -> Supervised:
|
||||
"""Get Supervised board."""
|
||||
@@ -61,6 +70,8 @@ class BoardManager(DBusInterfaceProxy):
|
||||
|
||||
if self.board == BOARD_NAME_YELLOW:
|
||||
self._board_proxy = Yellow()
|
||||
elif self.board == BOARD_NAME_GREEN:
|
||||
self._board_proxy = Green()
|
||||
elif self.board == BOARD_NAME_SUPERVISED:
|
||||
self._board_proxy = Supervised()
|
||||
|
||||
|
@@ -1,4 +1,5 @@
|
||||
"""Constants for boards."""
|
||||
|
||||
BOARD_NAME_GREEN = "Green"
|
||||
BOARD_NAME_SUPERVISED = "Supervised"
|
||||
BOARD_NAME_YELLOW = "Yellow"
|
||||
|
65
supervisor/dbus/agent/boards/green.py
Normal file
65
supervisor/dbus/agent/boards/green.py
Normal file
@@ -0,0 +1,65 @@
|
||||
"""Green board management."""
|
||||
|
||||
import asyncio
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ....const import ATTR_ACTIVITY_LED, ATTR_POWER_LED, ATTR_USER_LED
|
||||
from ...const import DBUS_ATTR_ACTIVITY_LED, DBUS_ATTR_POWER_LED, DBUS_ATTR_USER_LED
|
||||
from ...interface import dbus_property
|
||||
from .const import BOARD_NAME_GREEN
|
||||
from .interface import BoardProxy
|
||||
from .validate import SCHEMA_GREEN_BOARD
|
||||
|
||||
|
||||
class Green(BoardProxy):
|
||||
"""Green board manager object."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize properties."""
|
||||
super().__init__(BOARD_NAME_GREEN, SCHEMA_GREEN_BOARD)
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def activity_led(self) -> bool:
|
||||
"""Get activity LED enabled."""
|
||||
return self.properties[DBUS_ATTR_ACTIVITY_LED]
|
||||
|
||||
@activity_led.setter
|
||||
def activity_led(self, enabled: bool) -> None:
|
||||
"""Enable/disable activity LED."""
|
||||
self._data[ATTR_ACTIVITY_LED] = enabled
|
||||
asyncio.create_task(self.dbus.Boards.Green.set_activity_led(enabled))
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def power_led(self) -> bool:
|
||||
"""Get power LED enabled."""
|
||||
return self.properties[DBUS_ATTR_POWER_LED]
|
||||
|
||||
@power_led.setter
|
||||
def power_led(self, enabled: bool) -> None:
|
||||
"""Enable/disable power LED."""
|
||||
self._data[ATTR_POWER_LED] = enabled
|
||||
asyncio.create_task(self.dbus.Boards.Green.set_power_led(enabled))
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def user_led(self) -> bool:
|
||||
"""Get user LED enabled."""
|
||||
return self.properties[DBUS_ATTR_USER_LED]
|
||||
|
||||
@user_led.setter
|
||||
def user_led(self, enabled: bool) -> None:
|
||||
"""Enable/disable disk LED."""
|
||||
self._data[ATTR_USER_LED] = enabled
|
||||
asyncio.create_task(self.dbus.Boards.Green.set_user_led(enabled))
|
||||
|
||||
async def connect(self, bus: MessageBus) -> None:
|
||||
"""Connect to D-Bus."""
|
||||
await super().connect(bus)
|
||||
|
||||
# Set LEDs based on settings on connect
|
||||
self.activity_led = self._data[ATTR_ACTIVITY_LED]
|
||||
self.power_led = self._data[ATTR_POWER_LED]
|
||||
self.user_led = self._data[ATTR_USER_LED]
|
@@ -1,17 +1,23 @@
|
||||
"""Board dbus proxy interface."""
|
||||
|
||||
from voluptuous import Schema
|
||||
|
||||
from ....const import FILE_HASSIO_BOARD
|
||||
from ....utils.common import FileConfiguration
|
||||
from ...const import DBUS_IFACE_HAOS_BOARDS, DBUS_NAME_HAOS, DBUS_OBJECT_HAOS_BOARDS
|
||||
from ...interface import DBusInterfaceProxy
|
||||
from .validate import SCHEMA_BASE_BOARD
|
||||
|
||||
|
||||
class BoardProxy(DBusInterfaceProxy):
|
||||
class BoardProxy(FileConfiguration, DBusInterfaceProxy):
|
||||
"""DBus interface proxy for os board."""
|
||||
|
||||
bus_name: str = DBUS_NAME_HAOS
|
||||
|
||||
def __init__(self, name: str) -> None:
|
||||
def __init__(self, name: str, file_schema: Schema | None = None) -> None:
|
||||
"""Initialize properties."""
|
||||
super().__init__()
|
||||
super().__init__(FILE_HASSIO_BOARD, file_schema or SCHEMA_BASE_BOARD)
|
||||
super(FileConfiguration, self).__init__()
|
||||
|
||||
self._name: str = name
|
||||
self.object_path: str = f"{DBUS_OBJECT_HAOS_BOARDS}/{name}"
|
||||
|
32
supervisor/dbus/agent/boards/validate.py
Normal file
32
supervisor/dbus/agent/boards/validate.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""Validation for board config."""
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from ....const import (
|
||||
ATTR_ACTIVITY_LED,
|
||||
ATTR_DISK_LED,
|
||||
ATTR_HEARTBEAT_LED,
|
||||
ATTR_POWER_LED,
|
||||
ATTR_USER_LED,
|
||||
)
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_BASE_BOARD = vol.Schema({}, extra=vol.REMOVE_EXTRA)
|
||||
|
||||
SCHEMA_GREEN_BOARD = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_ACTIVITY_LED, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_POWER_LED, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_USER_LED, default=True): vol.Boolean(),
|
||||
},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
)
|
||||
|
||||
SCHEMA_YELLOW_BOARD = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_DISK_LED, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_HEARTBEAT_LED, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_POWER_LED, default=True): vol.Boolean(),
|
||||
},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
)
|
@@ -2,10 +2,14 @@
|
||||
|
||||
import asyncio
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ....const import ATTR_DISK_LED, ATTR_HEARTBEAT_LED, ATTR_POWER_LED
|
||||
from ...const import DBUS_ATTR_DISK_LED, DBUS_ATTR_HEARTBEAT_LED, DBUS_ATTR_POWER_LED
|
||||
from ...interface import dbus_property
|
||||
from .const import BOARD_NAME_YELLOW
|
||||
from .interface import BoardProxy
|
||||
from .validate import SCHEMA_YELLOW_BOARD
|
||||
|
||||
|
||||
class Yellow(BoardProxy):
|
||||
@@ -13,7 +17,7 @@ class Yellow(BoardProxy):
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize properties."""
|
||||
super().__init__(BOARD_NAME_YELLOW)
|
||||
super().__init__(BOARD_NAME_YELLOW, SCHEMA_YELLOW_BOARD)
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
@@ -24,6 +28,7 @@ class Yellow(BoardProxy):
|
||||
@heartbeat_led.setter
|
||||
def heartbeat_led(self, enabled: bool) -> None:
|
||||
"""Enable/disable heartbeat LED."""
|
||||
self._data[ATTR_HEARTBEAT_LED] = enabled
|
||||
asyncio.create_task(self.dbus.Boards.Yellow.set_heartbeat_led(enabled))
|
||||
|
||||
@property
|
||||
@@ -35,6 +40,7 @@ class Yellow(BoardProxy):
|
||||
@power_led.setter
|
||||
def power_led(self, enabled: bool) -> None:
|
||||
"""Enable/disable power LED."""
|
||||
self._data[ATTR_POWER_LED] = enabled
|
||||
asyncio.create_task(self.dbus.Boards.Yellow.set_power_led(enabled))
|
||||
|
||||
@property
|
||||
@@ -46,4 +52,14 @@ class Yellow(BoardProxy):
|
||||
@disk_led.setter
|
||||
def disk_led(self, enabled: bool) -> None:
|
||||
"""Enable/disable disk LED."""
|
||||
self._data[ATTR_DISK_LED] = enabled
|
||||
asyncio.create_task(self.dbus.Boards.Yellow.set_disk_led(enabled))
|
||||
|
||||
async def connect(self, bus: MessageBus) -> None:
|
||||
"""Connect to D-Bus."""
|
||||
await super().connect(bus)
|
||||
|
||||
# Set LEDs based on settings on connect
|
||||
self.disk_led = self._data[ATTR_DISK_LED]
|
||||
self.heartbeat_led = self._data[ATTR_HEARTBEAT_LED]
|
||||
self.power_led = self._data[ATTR_POWER_LED]
|
||||
|
@@ -64,6 +64,7 @@ DBUS_OBJECT_UDISKS2 = "/org/freedesktop/UDisks2/Manager"
|
||||
DBUS_ATTR_ACTIVE_ACCESSPOINT = "ActiveAccessPoint"
|
||||
DBUS_ATTR_ACTIVE_CONNECTION = "ActiveConnection"
|
||||
DBUS_ATTR_ACTIVE_CONNECTIONS = "ActiveConnections"
|
||||
DBUS_ATTR_ACTIVITY_LED = "ActivityLED"
|
||||
DBUS_ATTR_ADDRESS_DATA = "AddressData"
|
||||
DBUS_ATTR_BITRATE = "Bitrate"
|
||||
DBUS_ATTR_BOARD = "Board"
|
||||
@@ -169,6 +170,7 @@ DBUS_ATTR_TIMEUSEC = "TimeUSec"
|
||||
DBUS_ATTR_TIMEZONE = "Timezone"
|
||||
DBUS_ATTR_TRANSACTION_STATISTICS = "TransactionStatistics"
|
||||
DBUS_ATTR_TYPE = "Type"
|
||||
DBUS_ATTR_USER_LED = "UserLED"
|
||||
DBUS_ATTR_USERSPACE_TIMESTAMP_MONOTONIC = "UserspaceTimestampMonotonic"
|
||||
DBUS_ATTR_UUID_UPPERCASE = "UUID"
|
||||
DBUS_ATTR_UUID = "Uuid"
|
||||
|
@@ -2,7 +2,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import socket
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING
|
||||
from uuid import uuid4
|
||||
|
||||
from dbus_fast import Variant
|
||||
@@ -19,6 +19,7 @@ from . import (
|
||||
CONF_ATTR_PATH,
|
||||
CONF_ATTR_VLAN,
|
||||
)
|
||||
from .. import NetworkManager
|
||||
from ....host.const import InterfaceMethod, InterfaceType
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -26,8 +27,11 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
def get_connection_from_interface(
|
||||
interface: Interface, name: str | None = None, uuid: str | None = None
|
||||
) -> Any:
|
||||
interface: Interface,
|
||||
network_manager: NetworkManager,
|
||||
name: str | None = None,
|
||||
uuid: str | None = None,
|
||||
) -> dict[str, dict[str, Variant]]:
|
||||
"""Generate message argument for network interface update."""
|
||||
|
||||
# Generate/Update ID/name
|
||||
@@ -121,9 +125,15 @@ def get_connection_from_interface(
|
||||
if interface.type == InterfaceType.ETHERNET:
|
||||
conn[CONF_ATTR_802_ETHERNET] = {ATTR_ASSIGNED_MAC: Variant("s", "preserve")}
|
||||
elif interface.type == "vlan":
|
||||
parent = interface.vlan.interface
|
||||
if parent in network_manager and (
|
||||
parent_connection := network_manager.get(parent).connection
|
||||
):
|
||||
parent = parent_connection.uuid
|
||||
|
||||
conn[CONF_ATTR_VLAN] = {
|
||||
"id": Variant("u", interface.vlan.id),
|
||||
"parent": Variant("s", interface.vlan.interface),
|
||||
"parent": Variant("s", parent),
|
||||
}
|
||||
elif interface.type == InterfaceType.WIRELESS:
|
||||
wireless = {
|
||||
|
@@ -501,24 +501,16 @@ class DockerAddon(DockerInterface):
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Security check
|
||||
if not self.addon.protected:
|
||||
_LOGGER.warning("%s running with disabled protected mode!", self.addon.name)
|
||||
|
||||
# Cleanup
|
||||
await self.stop()
|
||||
|
||||
# Don't set a hostname if no separate UTS namespace is used
|
||||
hostname = None if self.uts_mode else self.addon.hostname
|
||||
|
||||
# Create & Run container
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
await self._run(
|
||||
tag=str(self.addon.version),
|
||||
name=self.name,
|
||||
hostname=hostname,
|
||||
@@ -549,7 +541,6 @@ class DockerAddon(DockerInterface):
|
||||
)
|
||||
raise
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting Docker add-on %s with version %s", self.image, self.version
|
||||
)
|
||||
@@ -589,10 +580,6 @@ class DockerAddon(DockerInterface):
|
||||
version, image=image, latest=latest, need_build=self.addon.latest_need_build
|
||||
)
|
||||
|
||||
# Stop container & cleanup
|
||||
with suppress(DockerError):
|
||||
await self.stop()
|
||||
|
||||
@Job(
|
||||
name="docker_addon_install",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
|
@@ -92,16 +92,7 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
await self.stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
await self._run(
|
||||
tag=str(self.sys_plugins.audio.version),
|
||||
init=False,
|
||||
ipv4=self.sys_docker.network.audio,
|
||||
@@ -118,8 +109,6 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
},
|
||||
mounts=self.mounts,
|
||||
)
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting Audio %s with version %s - %s",
|
||||
self.image,
|
||||
|
@@ -33,16 +33,7 @@ class DockerCli(DockerInterface, CoreSysAttributes):
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
await self.stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
await self._run(
|
||||
entrypoint=["/init"],
|
||||
tag=str(self.sys_plugins.cli.version),
|
||||
init=False,
|
||||
@@ -60,8 +51,6 @@ class DockerCli(DockerInterface, CoreSysAttributes):
|
||||
ENV_TOKEN: self.sys_plugins.cli.supervisor_token,
|
||||
},
|
||||
)
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting CLI %s with version %s - %s",
|
||||
self.image,
|
||||
|
@@ -35,16 +35,7 @@ class DockerDNS(DockerInterface, CoreSysAttributes):
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
await self.stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
await self._run(
|
||||
tag=str(self.sys_plugins.dns.version),
|
||||
init=False,
|
||||
dns=False,
|
||||
@@ -65,8 +56,6 @@ class DockerDNS(DockerInterface, CoreSysAttributes):
|
||||
],
|
||||
oom_score_adj=-300,
|
||||
)
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting DNS %s with version %s - %s",
|
||||
self.image,
|
||||
|
@@ -152,16 +152,7 @@ class DockerHomeAssistant(DockerInterface):
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
await self.stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
await self._run(
|
||||
tag=(self.sys_homeassistant.version),
|
||||
name=self.name,
|
||||
hostname=self.name,
|
||||
@@ -186,8 +177,6 @@ class DockerHomeAssistant(DockerInterface):
|
||||
tmpfs={"/tmp": ""},
|
||||
oom_score_adj=-300,
|
||||
)
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting Home Assistant %s with version %s", self.image, self.version
|
||||
)
|
||||
|
@@ -1,7 +1,6 @@
|
||||
"""Interface class for Supervisor Docker object."""
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections import defaultdict
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
@@ -92,7 +91,6 @@ class DockerInterface(JobGroup):
|
||||
)
|
||||
self.coresys: CoreSys = coresys
|
||||
self._meta: dict[str, Any] | None = None
|
||||
self.lock: asyncio.Lock = asyncio.Lock()
|
||||
|
||||
@property
|
||||
def timeout(self) -> int:
|
||||
@@ -153,7 +151,7 @@ class DockerInterface(JobGroup):
|
||||
@property
|
||||
def in_progress(self) -> bool:
|
||||
"""Return True if a task is in progress."""
|
||||
return self.lock.locked()
|
||||
return self.active_job
|
||||
|
||||
@property
|
||||
def restart_policy(self) -> RestartPolicy | None:
|
||||
@@ -379,6 +377,27 @@ class DockerInterface(JobGroup):
|
||||
"""Run Docker image."""
|
||||
raise NotImplementedError()
|
||||
|
||||
async def _run(self, **kwargs) -> None:
|
||||
"""Run Docker image with retry inf necessary."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
await self.stop()
|
||||
|
||||
# Create & Run container
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run, self.image, **kwargs
|
||||
)
|
||||
except DockerNotFound as err:
|
||||
# If image is missing, capture the exception as this shouldn't happen
|
||||
capture_exception(err)
|
||||
raise
|
||||
|
||||
# Store metadata
|
||||
self._meta = docker_container.attrs
|
||||
|
||||
@Job(
|
||||
name="docker_interface_stop",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
@@ -451,12 +470,17 @@ class DockerInterface(JobGroup):
|
||||
return b""
|
||||
|
||||
@Job(name="docker_interface_cleanup", limit=JobExecutionLimit.GROUP_WAIT)
|
||||
def cleanup(self, old_image: str | None = None) -> Awaitable[None]:
|
||||
def cleanup(
|
||||
self,
|
||||
old_image: str | None = None,
|
||||
image: str | None = None,
|
||||
version: AwesomeVersion | None = None,
|
||||
) -> Awaitable[None]:
|
||||
"""Check if old version exists and cleanup."""
|
||||
return self.sys_run_in_executor(
|
||||
self.sys_docker.cleanup_old_images,
|
||||
self.image,
|
||||
self.version,
|
||||
image or self.image,
|
||||
version or self.version,
|
||||
{old_image} if old_image else None,
|
||||
)
|
||||
|
||||
|
@@ -38,16 +38,7 @@ class DockerMulticast(DockerInterface, CoreSysAttributes):
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
await self.stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
await self._run(
|
||||
tag=str(self.sys_plugins.multicast.version),
|
||||
init=False,
|
||||
name=self.name,
|
||||
@@ -59,8 +50,6 @@ class DockerMulticast(DockerInterface, CoreSysAttributes):
|
||||
extra_hosts={"supervisor": self.sys_docker.network.supervisor},
|
||||
environment={ENV_TIME: self.sys_timezone},
|
||||
)
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting Multicast %s with version %s - Host", self.image, self.version
|
||||
)
|
||||
|
@@ -35,16 +35,7 @@ class DockerObserver(DockerInterface, CoreSysAttributes):
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
await self.stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run,
|
||||
self.image,
|
||||
await self._run(
|
||||
tag=str(self.sys_plugins.observer.version),
|
||||
init=False,
|
||||
ipv4=self.sys_docker.network.observer,
|
||||
@@ -63,8 +54,6 @@ class DockerObserver(DockerInterface, CoreSysAttributes):
|
||||
ports={"80/tcp": 4357},
|
||||
oom_score_adj=-300,
|
||||
)
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting Observer %s with version %s - %s",
|
||||
self.image,
|
||||
|
@@ -67,6 +67,10 @@ class HomeAssistantCrashError(HomeAssistantError):
|
||||
"""Error on crash of a Home Assistant startup."""
|
||||
|
||||
|
||||
class HomeAssistantStartupTimeout(HomeAssistantCrashError):
|
||||
"""Timeout waiting for Home Assistant successful startup."""
|
||||
|
||||
|
||||
class HomeAssistantAPIError(HomeAssistantError):
|
||||
"""Home Assistant API exception."""
|
||||
|
||||
|
@@ -2,12 +2,14 @@
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta
|
||||
import logging
|
||||
import re
|
||||
import secrets
|
||||
import shutil
|
||||
from typing import Final
|
||||
|
||||
import attr
|
||||
from awesomeversion import AwesomeVersion
|
||||
|
||||
from ..const import ATTR_HOMEASSISTANT, BusEvent
|
||||
@@ -21,6 +23,7 @@ from ..exceptions import (
|
||||
HomeAssistantCrashError,
|
||||
HomeAssistantError,
|
||||
HomeAssistantJobError,
|
||||
HomeAssistantStartupTimeout,
|
||||
HomeAssistantUpdateError,
|
||||
JobException,
|
||||
)
|
||||
@@ -40,15 +43,17 @@ from .const import (
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
SECONDS_BETWEEN_API_CHECKS: Final[int] = 5
|
||||
STARTUP_API_CHECK_TIMEOUT: Final[timedelta] = timedelta(minutes=5)
|
||||
RE_YAML_ERROR = re.compile(r"homeassistant\.util\.yaml")
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
@dataclass
|
||||
class ConfigResult:
|
||||
"""Return object from config check."""
|
||||
|
||||
valid = attr.ib()
|
||||
log = attr.ib()
|
||||
valid: bool
|
||||
log: str
|
||||
|
||||
|
||||
class HomeAssistantCore(JobGroup):
|
||||
@@ -58,7 +63,6 @@ class HomeAssistantCore(JobGroup):
|
||||
"""Initialize Home Assistant object."""
|
||||
super().__init__(coresys, JOB_GROUP_HOME_ASSISTANT_CORE)
|
||||
self.instance: DockerHomeAssistant = DockerHomeAssistant(coresys)
|
||||
self.lock: asyncio.Lock = asyncio.Lock()
|
||||
self._error_state: bool = False
|
||||
|
||||
@property
|
||||
@@ -402,7 +406,7 @@ class HomeAssistantCore(JobGroup):
|
||||
@property
|
||||
def in_progress(self) -> bool:
|
||||
"""Return True if a task is in progress."""
|
||||
return self.instance.in_progress or self.lock.locked()
|
||||
return self.instance.in_progress or self.active_job
|
||||
|
||||
async def check_config(self) -> ConfigResult:
|
||||
"""Run Home Assistant config check."""
|
||||
@@ -436,8 +440,9 @@ class HomeAssistantCore(JobGroup):
|
||||
return
|
||||
_LOGGER.info("Wait until Home Assistant is ready")
|
||||
|
||||
while True:
|
||||
await asyncio.sleep(5)
|
||||
start = datetime.now()
|
||||
while not (timeout := datetime.now() >= start + STARTUP_API_CHECK_TIMEOUT):
|
||||
await asyncio.sleep(SECONDS_BETWEEN_API_CHECKS)
|
||||
|
||||
# 1: Check if Container is is_running
|
||||
if not await self.instance.is_running():
|
||||
@@ -451,6 +456,11 @@ class HomeAssistantCore(JobGroup):
|
||||
return
|
||||
|
||||
self._error_state = True
|
||||
if timeout:
|
||||
raise HomeAssistantStartupTimeout(
|
||||
"No API response in 5 minutes, assuming core has had a fatal startup error",
|
||||
_LOGGER.error,
|
||||
)
|
||||
raise HomeAssistantCrashError()
|
||||
|
||||
@Job(
|
||||
|
@@ -18,6 +18,7 @@ from ..const import (
|
||||
ATTR_ACCESS_TOKEN,
|
||||
ATTR_AUDIO_INPUT,
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE,
|
||||
ATTR_BOOT,
|
||||
ATTR_IMAGE,
|
||||
ATTR_PORT,
|
||||
@@ -62,6 +63,10 @@ HOMEASSISTANT_BACKUP_EXCLUDE = [
|
||||
"*.log.*",
|
||||
"OZW_Log.txt",
|
||||
]
|
||||
HOMEASSISTANT_BACKUP_EXCLUDE_DATABASE = [
|
||||
"home-assistant_v?.db",
|
||||
"home-assistant_v?.db-wal",
|
||||
]
|
||||
|
||||
|
||||
class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
@@ -258,6 +263,16 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
except (AwesomeVersionException, TypeError):
|
||||
return False
|
||||
|
||||
@property
|
||||
def backups_exclude_database(self) -> bool:
|
||||
"""Exclude database from core backups by default."""
|
||||
return self._data[ATTR_BACKUPS_EXCLUDE_DATABASE]
|
||||
|
||||
@backups_exclude_database.setter
|
||||
def backups_exclude_database(self, value: bool) -> None:
|
||||
"""Set whether backups should exclude database by default."""
|
||||
self._data[ATTR_BACKUPS_EXCLUDE_DATABASE] = value
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Prepare Home Assistant object."""
|
||||
await asyncio.wait(
|
||||
@@ -327,7 +342,9 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
)
|
||||
|
||||
@Job(name="home_assistant_module_backup")
|
||||
async def backup(self, tar_file: tarfile.TarFile) -> None:
|
||||
async def backup(
|
||||
self, tar_file: tarfile.TarFile, exclude_database: bool = False
|
||||
) -> None:
|
||||
"""Backup Home Assistant Core config/ directory."""
|
||||
await self.begin_backup()
|
||||
try:
|
||||
@@ -351,11 +368,16 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
# Backup metadata
|
||||
backup.add(temp, arcname=".")
|
||||
|
||||
# Set excludes
|
||||
excludes = HOMEASSISTANT_BACKUP_EXCLUDE.copy()
|
||||
if exclude_database:
|
||||
excludes += HOMEASSISTANT_BACKUP_EXCLUDE_DATABASE
|
||||
|
||||
# Backup data
|
||||
atomic_contents_add(
|
||||
backup,
|
||||
self.sys_config.path_homeassistant,
|
||||
excludes=HOMEASSISTANT_BACKUP_EXCLUDE,
|
||||
excludes=excludes,
|
||||
arcname="data",
|
||||
)
|
||||
|
||||
@@ -371,7 +393,10 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
finally:
|
||||
await self.end_backup()
|
||||
|
||||
async def restore(self, tar_file: tarfile.TarFile) -> None:
|
||||
@Job(name="home_assistant_module_restore")
|
||||
async def restore(
|
||||
self, tar_file: tarfile.TarFile, exclude_database: bool = False
|
||||
) -> None:
|
||||
"""Restore Home Assistant Core config/ directory."""
|
||||
with TemporaryDirectory(dir=self.sys_config.path_tmp) as temp:
|
||||
temp_path = Path(temp)
|
||||
@@ -399,11 +424,22 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
def _restore_data():
|
||||
"""Restore data."""
|
||||
shutil.copytree(
|
||||
temp_data, self.sys_config.path_homeassistant, symlinks=True
|
||||
temp_data,
|
||||
self.sys_config.path_homeassistant,
|
||||
symlinks=True,
|
||||
dirs_exist_ok=bool(excludes),
|
||||
)
|
||||
|
||||
_LOGGER.info("Restore Home Assistant Core config folder")
|
||||
await remove_folder(self.sys_config.path_homeassistant)
|
||||
excludes = (
|
||||
HOMEASSISTANT_BACKUP_EXCLUDE_DATABASE if exclude_database else None
|
||||
)
|
||||
await remove_folder(
|
||||
self.sys_config.path_homeassistant,
|
||||
content_only=bool(excludes),
|
||||
excludes=excludes,
|
||||
tmp_dir=self.sys_config.path_tmp,
|
||||
)
|
||||
try:
|
||||
await self.sys_run_in_executor(_restore_data)
|
||||
except shutil.Error as err:
|
||||
@@ -455,11 +491,13 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
{ATTR_TYPE: "config/auth/list"}
|
||||
)
|
||||
|
||||
return [
|
||||
IngressSessionDataUser(
|
||||
id=data["id"],
|
||||
username=data.get("username"),
|
||||
display_name=data.get("name"),
|
||||
)
|
||||
for data in list_of_users
|
||||
]
|
||||
if list_of_users:
|
||||
return [
|
||||
IngressSessionDataUser(
|
||||
id=data["id"],
|
||||
username=data.get("username"),
|
||||
display_name=data.get("name"),
|
||||
)
|
||||
for data in list_of_users
|
||||
]
|
||||
return []
|
||||
|
@@ -7,6 +7,7 @@ from ..const import (
|
||||
ATTR_ACCESS_TOKEN,
|
||||
ATTR_AUDIO_INPUT,
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE,
|
||||
ATTR_BOOT,
|
||||
ATTR_IMAGE,
|
||||
ATTR_PORT,
|
||||
@@ -32,6 +33,7 @@ SCHEMA_HASS_CONFIG = vol.Schema(
|
||||
vol.Optional(ATTR_WATCHDOG, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT, default=None): vol.Maybe(str),
|
||||
vol.Optional(ATTR_AUDIO_INPUT, default=None): vol.Maybe(str),
|
||||
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE, default=False): vol.Boolean(),
|
||||
},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
)
|
||||
|
@@ -154,7 +154,7 @@ class WSClient:
|
||||
@classmethod
|
||||
async def connect_with_auth(
|
||||
cls, session: aiohttp.ClientSession, loop, url: str, token: str
|
||||
) -> "WSClient":
|
||||
) -> WSClient:
|
||||
"""Create an authenticated websocket client."""
|
||||
try:
|
||||
client = await session.ws_connect(url, ssl=False)
|
||||
|
@@ -175,4 +175,4 @@ class HostManager(CoreSysAttributes):
|
||||
async def _hardware_events(self, device: Device) -> None:
|
||||
"""Process hardware requests."""
|
||||
if self.sys_hardware.policy.is_match_cgroup(PolicyGroup.AUDIO, device):
|
||||
await self.sound.update()
|
||||
await self.sound.update(reload_pulse=True)
|
||||
|
@@ -189,6 +189,7 @@ class NetworkManager(CoreSysAttributes):
|
||||
_LOGGER.debug("Updating existing configuration for %s", interface.name)
|
||||
settings = get_connection_from_interface(
|
||||
interface,
|
||||
self.sys_dbus.network,
|
||||
name=inet.settings.connection.id,
|
||||
uuid=inet.settings.connection.uuid,
|
||||
)
|
||||
@@ -217,7 +218,7 @@ class NetworkManager(CoreSysAttributes):
|
||||
# Create new configuration and activate interface
|
||||
elif inet and interface.enabled:
|
||||
_LOGGER.debug("Create new configuration for %s", interface.name)
|
||||
settings = get_connection_from_interface(interface)
|
||||
settings = get_connection_from_interface(interface, self.sys_dbus.network)
|
||||
|
||||
try:
|
||||
settings, con = await self.sys_dbus.network.add_and_activate_connection(
|
||||
@@ -244,7 +245,7 @@ class NetworkManager(CoreSysAttributes):
|
||||
|
||||
# Create new interface (like vlan)
|
||||
elif not inet:
|
||||
settings = get_connection_from_interface(interface)
|
||||
settings = get_connection_from_interface(interface, self.sys_dbus.network)
|
||||
|
||||
try:
|
||||
await self.sys_dbus.network.settings.add_connection(settings)
|
||||
|
@@ -15,6 +15,9 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
PULSE_NAME = "supervisor"
|
||||
|
||||
PULSE_ALSA_MODULE = "module-alsa-card"
|
||||
PULSE_UDEV_MODULE = "module-udev-detect"
|
||||
|
||||
|
||||
class StreamType(StrEnum):
|
||||
"""INPUT/OUTPUT type of source."""
|
||||
@@ -235,9 +238,9 @@ class SoundControl(CoreSysAttributes):
|
||||
@Job(
|
||||
name="sound_control_update",
|
||||
limit=JobExecutionLimit.THROTTLE_WAIT,
|
||||
throttle_period=timedelta(seconds=10),
|
||||
throttle_period=timedelta(seconds=2),
|
||||
)
|
||||
async def update(self):
|
||||
async def update(self, reload_pulse: bool = False):
|
||||
"""Update properties over dbus."""
|
||||
_LOGGER.info("Updating PulseAudio information")
|
||||
|
||||
@@ -348,11 +351,32 @@ class SoundControl(CoreSysAttributes):
|
||||
f"Error while processing pulse update: {err}", _LOGGER.error
|
||||
) from err
|
||||
except PulseError as err:
|
||||
_LOGGER.debug("Can't update PulseAudio data: %s", err)
|
||||
_LOGGER.warning("Can't update PulseAudio data: %s", err)
|
||||
|
||||
return data
|
||||
|
||||
def _reload_pulse_modules():
|
||||
try:
|
||||
with Pulse(PULSE_NAME) as pulse:
|
||||
modules = pulse.module_list()
|
||||
for alsa_module in filter(
|
||||
lambda x: x.name == PULSE_ALSA_MODULE, modules
|
||||
):
|
||||
pulse.module_unload(alsa_module.index)
|
||||
udev_module = next(
|
||||
filter(lambda x: x.name == PULSE_UDEV_MODULE, modules)
|
||||
)
|
||||
pulse.module_unload(udev_module.index)
|
||||
# And now reload
|
||||
pulse.module_load(PULSE_UDEV_MODULE)
|
||||
except StopIteration:
|
||||
_LOGGER.warning("Can't reload PulseAudio modules.")
|
||||
except PulseError as err:
|
||||
_LOGGER.warning("Can't reload PulseAudio modules: %s", err)
|
||||
|
||||
# Update data from pulse server
|
||||
if reload_pulse:
|
||||
await self.sys_run_in_executor(_reload_pulse_modules)
|
||||
data: PulseData = await self.sys_run_in_executor(_get_pulse_data)
|
||||
self._applications = data.applications
|
||||
self._cards = data.cards
|
||||
|
@@ -86,7 +86,7 @@ class SupervisorJob:
|
||||
}
|
||||
|
||||
@contextmanager
|
||||
def start(self, *, on_done: Callable[["SupervisorJob"], None] | None = None):
|
||||
def start(self):
|
||||
"""Start the job in the current task.
|
||||
|
||||
This can only be called if the parent ID matches the job running in the current task.
|
||||
@@ -107,8 +107,6 @@ class SupervisorJob:
|
||||
self.done = True
|
||||
if token:
|
||||
_CURRENT_JOB.reset(token)
|
||||
if on_done:
|
||||
on_done(self)
|
||||
|
||||
|
||||
class JobManager(FileConfiguration, CoreSysAttributes):
|
||||
@@ -192,7 +190,7 @@ class JobManager(FileConfiguration, CoreSysAttributes):
|
||||
if job.uuid not in self._jobs:
|
||||
raise JobNotFound(f"Could not find job {job.name}", _LOGGER.error)
|
||||
|
||||
if not job.done:
|
||||
if job.done is False:
|
||||
_LOGGER.warning("Removing incomplete job %s from job manager", job.name)
|
||||
|
||||
del self._jobs[job.uuid]
|
||||
|
@@ -174,6 +174,14 @@ class Job(CoreSysAttributes):
|
||||
return obj
|
||||
return None
|
||||
|
||||
def _handle_job_condition_exception(self, err: JobConditionException) -> None:
|
||||
"""Handle a job condition failure."""
|
||||
error_msg = str(err)
|
||||
if self.on_condition is None:
|
||||
_LOGGER.info(error_msg)
|
||||
return
|
||||
raise self.on_condition(error_msg, _LOGGER.warning) from None
|
||||
|
||||
def __call__(self, method):
|
||||
"""Call the wrapper logic."""
|
||||
self._method = method
|
||||
@@ -193,99 +201,125 @@ class Job(CoreSysAttributes):
|
||||
internal=self._internal,
|
||||
)
|
||||
|
||||
# Handle condition
|
||||
if self.conditions:
|
||||
try:
|
||||
await self._check_conditions()
|
||||
except JobConditionException as err:
|
||||
error_msg = str(err)
|
||||
if self.on_condition is None:
|
||||
_LOGGER.info(error_msg)
|
||||
try:
|
||||
# Handle condition
|
||||
if self.conditions:
|
||||
try:
|
||||
await Job.check_conditions(
|
||||
self, set(self.conditions), self._method.__qualname__
|
||||
)
|
||||
except JobConditionException as err:
|
||||
return self._handle_job_condition_exception(err)
|
||||
|
||||
# Handle exection limits
|
||||
if self.limit in (
|
||||
JobExecutionLimit.SINGLE_WAIT,
|
||||
JobExecutionLimit.ONCE,
|
||||
):
|
||||
await self._acquire_exection_limit()
|
||||
elif self.limit in (
|
||||
JobExecutionLimit.GROUP_ONCE,
|
||||
JobExecutionLimit.GROUP_WAIT,
|
||||
):
|
||||
try:
|
||||
await obj.acquire(
|
||||
job, self.limit == JobExecutionLimit.GROUP_WAIT
|
||||
)
|
||||
except JobGroupExecutionLimitExceeded as err:
|
||||
if self.on_condition:
|
||||
raise self.on_condition(str(err)) from err
|
||||
raise err
|
||||
elif self.limit in (
|
||||
JobExecutionLimit.THROTTLE,
|
||||
JobExecutionLimit.GROUP_THROTTLE,
|
||||
):
|
||||
time_since_last_call = datetime.now() - self.last_call(group_name)
|
||||
if time_since_last_call < self.throttle_period(group_name):
|
||||
return
|
||||
raise self.on_condition(error_msg, _LOGGER.warning) from None
|
||||
|
||||
# Handle exection limits
|
||||
if self.limit in (JobExecutionLimit.SINGLE_WAIT, JobExecutionLimit.ONCE):
|
||||
await self._acquire_exection_limit()
|
||||
elif self.limit in (
|
||||
JobExecutionLimit.GROUP_ONCE,
|
||||
JobExecutionLimit.GROUP_WAIT,
|
||||
):
|
||||
try:
|
||||
await obj.acquire(job, self.limit == JobExecutionLimit.GROUP_WAIT)
|
||||
except JobGroupExecutionLimitExceeded as err:
|
||||
if self.on_condition:
|
||||
raise self.on_condition(str(err)) from err
|
||||
raise err
|
||||
elif self.limit in (
|
||||
JobExecutionLimit.THROTTLE,
|
||||
JobExecutionLimit.GROUP_THROTTLE,
|
||||
):
|
||||
time_since_last_call = datetime.now() - self.last_call(group_name)
|
||||
if time_since_last_call < self.throttle_period(group_name):
|
||||
return
|
||||
elif self.limit in (
|
||||
JobExecutionLimit.THROTTLE_WAIT,
|
||||
JobExecutionLimit.GROUP_THROTTLE_WAIT,
|
||||
):
|
||||
await self._acquire_exection_limit()
|
||||
time_since_last_call = datetime.now() - self.last_call(group_name)
|
||||
if time_since_last_call < self.throttle_period(group_name):
|
||||
self._release_exception_limits()
|
||||
return
|
||||
elif self.limit in (
|
||||
JobExecutionLimit.THROTTLE_RATE_LIMIT,
|
||||
JobExecutionLimit.GROUP_THROTTLE_RATE_LIMIT,
|
||||
):
|
||||
# Only reprocess array when necessary (at limit)
|
||||
if len(self.rate_limited_calls(group_name)) >= self.throttle_max_calls:
|
||||
self.set_rate_limited_calls(
|
||||
[
|
||||
call
|
||||
for call in self.rate_limited_calls(group_name)
|
||||
if call > datetime.now() - self.throttle_period(group_name)
|
||||
],
|
||||
group_name,
|
||||
)
|
||||
|
||||
if len(self.rate_limited_calls(group_name)) >= self.throttle_max_calls:
|
||||
on_condition = (
|
||||
JobException if self.on_condition is None else self.on_condition
|
||||
)
|
||||
raise on_condition(
|
||||
f"Rate limit exceeded, more then {self.throttle_max_calls} calls in {self.throttle_period(group_name)}",
|
||||
)
|
||||
|
||||
# Execute Job
|
||||
with job.start(on_done=self.sys_jobs.remove_job if self.cleanup else None):
|
||||
try:
|
||||
self.set_last_call(datetime.now(), group_name)
|
||||
if self.rate_limited_calls(group_name) is not None:
|
||||
self.add_rate_limited_call(
|
||||
self.last_call(group_name), group_name
|
||||
elif self.limit in (
|
||||
JobExecutionLimit.THROTTLE_WAIT,
|
||||
JobExecutionLimit.GROUP_THROTTLE_WAIT,
|
||||
):
|
||||
await self._acquire_exection_limit()
|
||||
time_since_last_call = datetime.now() - self.last_call(group_name)
|
||||
if time_since_last_call < self.throttle_period(group_name):
|
||||
self._release_exception_limits()
|
||||
return
|
||||
elif self.limit in (
|
||||
JobExecutionLimit.THROTTLE_RATE_LIMIT,
|
||||
JobExecutionLimit.GROUP_THROTTLE_RATE_LIMIT,
|
||||
):
|
||||
# Only reprocess array when necessary (at limit)
|
||||
if (
|
||||
len(self.rate_limited_calls(group_name))
|
||||
>= self.throttle_max_calls
|
||||
):
|
||||
self.set_rate_limited_calls(
|
||||
[
|
||||
call
|
||||
for call in self.rate_limited_calls(group_name)
|
||||
if call
|
||||
> datetime.now() - self.throttle_period(group_name)
|
||||
],
|
||||
group_name,
|
||||
)
|
||||
|
||||
return await self._method(obj, *args, **kwargs)
|
||||
except HassioError as err:
|
||||
raise err
|
||||
except Exception as err:
|
||||
_LOGGER.exception("Unhandled exception: %s", err)
|
||||
capture_exception(err)
|
||||
raise JobException() from err
|
||||
finally:
|
||||
self._release_exception_limits()
|
||||
if self.limit in (
|
||||
JobExecutionLimit.GROUP_ONCE,
|
||||
JobExecutionLimit.GROUP_WAIT,
|
||||
if (
|
||||
len(self.rate_limited_calls(group_name))
|
||||
>= self.throttle_max_calls
|
||||
):
|
||||
obj.release()
|
||||
on_condition = (
|
||||
JobException
|
||||
if self.on_condition is None
|
||||
else self.on_condition
|
||||
)
|
||||
raise on_condition(
|
||||
f"Rate limit exceeded, more than {self.throttle_max_calls} calls in {self.throttle_period(group_name)}",
|
||||
)
|
||||
|
||||
# Execute Job
|
||||
with job.start():
|
||||
try:
|
||||
self.set_last_call(datetime.now(), group_name)
|
||||
if self.rate_limited_calls(group_name) is not None:
|
||||
self.add_rate_limited_call(
|
||||
self.last_call(group_name), group_name
|
||||
)
|
||||
|
||||
return await self._method(obj, *args, **kwargs)
|
||||
|
||||
# If a method has a conditional JobCondition, they must check it in the method
|
||||
# These should be handled like normal JobConditions as much as possible
|
||||
except JobConditionException as err:
|
||||
return self._handle_job_condition_exception(err)
|
||||
except HassioError as err:
|
||||
raise err
|
||||
except Exception as err:
|
||||
_LOGGER.exception("Unhandled exception: %s", err)
|
||||
capture_exception(err)
|
||||
raise JobException() from err
|
||||
finally:
|
||||
self._release_exception_limits()
|
||||
if self.limit in (
|
||||
JobExecutionLimit.GROUP_ONCE,
|
||||
JobExecutionLimit.GROUP_WAIT,
|
||||
):
|
||||
obj.release()
|
||||
|
||||
# Jobs that weren't started are always cleaned up. Also clean up done jobs if required
|
||||
finally:
|
||||
if job.done is None or self.cleanup:
|
||||
self.sys_jobs.remove_job(job)
|
||||
|
||||
return wrapper
|
||||
|
||||
async def _check_conditions(self):
|
||||
@staticmethod
|
||||
async def check_conditions(
|
||||
coresys: CoreSysAttributes, conditions: set[JobCondition], method_name: str
|
||||
):
|
||||
"""Check conditions."""
|
||||
used_conditions = set(self.conditions) - set(self.sys_jobs.ignore_conditions)
|
||||
ignored_conditions = set(self.conditions) & set(self.sys_jobs.ignore_conditions)
|
||||
used_conditions = set(conditions) - set(coresys.sys_jobs.ignore_conditions)
|
||||
ignored_conditions = set(conditions) & set(coresys.sys_jobs.ignore_conditions)
|
||||
|
||||
# Check if somethings is ignored
|
||||
if ignored_conditions:
|
||||
@@ -294,93 +328,97 @@ class Job(CoreSysAttributes):
|
||||
ignored_conditions,
|
||||
)
|
||||
|
||||
if JobCondition.HEALTHY in used_conditions and not self.sys_core.healthy:
|
||||
if JobCondition.HEALTHY in used_conditions and not coresys.sys_core.healthy:
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, system is not healthy - {', '.join(self.sys_resolution.unhealthy)}"
|
||||
f"'{method_name}' blocked from execution, system is not healthy - {', '.join(coresys.sys_resolution.unhealthy)}"
|
||||
)
|
||||
|
||||
if (
|
||||
JobCondition.RUNNING in used_conditions
|
||||
and self.sys_core.state != CoreState.RUNNING
|
||||
and coresys.sys_core.state != CoreState.RUNNING
|
||||
):
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, system is not running - {self.sys_core.state!s}"
|
||||
f"'{method_name}' blocked from execution, system is not running - {coresys.sys_core.state!s}"
|
||||
)
|
||||
|
||||
if (
|
||||
JobCondition.FROZEN in used_conditions
|
||||
and self.sys_core.state != CoreState.FREEZE
|
||||
and coresys.sys_core.state != CoreState.FREEZE
|
||||
):
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, system is not frozen - {self.sys_core.state!s}"
|
||||
f"'{method_name}' blocked from execution, system is not frozen - {coresys.sys_core.state!s}"
|
||||
)
|
||||
|
||||
if (
|
||||
JobCondition.FREE_SPACE in used_conditions
|
||||
and self.sys_host.info.free_space < MINIMUM_FREE_SPACE_THRESHOLD
|
||||
and coresys.sys_host.info.free_space < MINIMUM_FREE_SPACE_THRESHOLD
|
||||
):
|
||||
self.sys_resolution.create_issue(IssueType.FREE_SPACE, ContextType.SYSTEM)
|
||||
coresys.sys_resolution.create_issue(
|
||||
IssueType.FREE_SPACE, ContextType.SYSTEM
|
||||
)
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, not enough free space ({self.sys_host.info.free_space}GB) left on the device"
|
||||
f"'{method_name}' blocked from execution, not enough free space ({coresys.sys_host.info.free_space}GB) left on the device"
|
||||
)
|
||||
|
||||
if JobCondition.INTERNET_SYSTEM in used_conditions:
|
||||
await self.sys_supervisor.check_connectivity()
|
||||
if not self.sys_supervisor.connectivity:
|
||||
await coresys.sys_supervisor.check_connectivity()
|
||||
if not coresys.sys_supervisor.connectivity:
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, no supervisor internet connection"
|
||||
f"'{method_name}' blocked from execution, no supervisor internet connection"
|
||||
)
|
||||
|
||||
if JobCondition.INTERNET_HOST in used_conditions:
|
||||
await self.sys_host.network.check_connectivity()
|
||||
await coresys.sys_host.network.check_connectivity()
|
||||
if (
|
||||
self.sys_host.network.connectivity is not None
|
||||
and not self.sys_host.network.connectivity
|
||||
coresys.sys_host.network.connectivity is not None
|
||||
and not coresys.sys_host.network.connectivity
|
||||
):
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, no host internet connection"
|
||||
f"'{method_name}' blocked from execution, no host internet connection"
|
||||
)
|
||||
|
||||
if JobCondition.HAOS in used_conditions and not self.sys_os.available:
|
||||
if JobCondition.HAOS in used_conditions and not coresys.sys_os.available:
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, no Home Assistant OS available"
|
||||
f"'{method_name}' blocked from execution, no Home Assistant OS available"
|
||||
)
|
||||
|
||||
if (
|
||||
JobCondition.OS_AGENT in used_conditions
|
||||
and HostFeature.OS_AGENT not in self.sys_host.features
|
||||
and HostFeature.OS_AGENT not in coresys.sys_host.features
|
||||
):
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, no Home Assistant OS-Agent available"
|
||||
f"'{method_name}' blocked from execution, no Home Assistant OS-Agent available"
|
||||
)
|
||||
|
||||
if (
|
||||
JobCondition.HOST_NETWORK in used_conditions
|
||||
and not self.sys_dbus.network.is_connected
|
||||
and not coresys.sys_dbus.network.is_connected
|
||||
):
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, host Network Manager not available"
|
||||
f"'{method_name}' blocked from execution, host Network Manager not available"
|
||||
)
|
||||
|
||||
if (
|
||||
JobCondition.AUTO_UPDATE in used_conditions
|
||||
and not self.sys_updater.auto_update
|
||||
and not coresys.sys_updater.auto_update
|
||||
):
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, supervisor auto updates disabled"
|
||||
f"'{method_name}' blocked from execution, supervisor auto updates disabled"
|
||||
)
|
||||
|
||||
if (
|
||||
JobCondition.SUPERVISOR_UPDATED in used_conditions
|
||||
and self.sys_supervisor.need_update
|
||||
and coresys.sys_supervisor.need_update
|
||||
):
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, supervisor needs to be updated first"
|
||||
f"'{method_name}' blocked from execution, supervisor needs to be updated first"
|
||||
)
|
||||
|
||||
if JobCondition.PLUGINS_UPDATED in used_conditions and (
|
||||
out_of_date := [
|
||||
plugin for plugin in self.sys_plugins.all_plugins if plugin.need_update
|
||||
plugin
|
||||
for plugin in coresys.sys_plugins.all_plugins
|
||||
if plugin.need_update
|
||||
]
|
||||
):
|
||||
errors = await asyncio.gather(
|
||||
@@ -391,15 +429,15 @@ class Job(CoreSysAttributes):
|
||||
out_of_date[i].slug for i in range(len(errors)) if errors[i] is not None
|
||||
]:
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, was unable to update plugin(s) {', '.join(update_failures)} and all plugins must be up to date first"
|
||||
f"'{method_name}' blocked from execution, was unable to update plugin(s) {', '.join(update_failures)} and all plugins must be up to date first"
|
||||
)
|
||||
|
||||
if (
|
||||
JobCondition.MOUNT_AVAILABLE in used_conditions
|
||||
and HostFeature.MOUNT not in self.sys_host.features
|
||||
and HostFeature.MOUNT not in coresys.sys_host.features
|
||||
):
|
||||
raise JobConditionException(
|
||||
f"'{self._method.__qualname__}' blocked from execution, mounting not supported on system"
|
||||
f"'{method_name}' blocked from execution, mounting not supported on system"
|
||||
)
|
||||
|
||||
async def _acquire_exection_limit(self) -> None:
|
||||
|
@@ -7,6 +7,7 @@ from ..addons.const import ADDON_UPDATE_CONDITIONS
|
||||
from ..const import AddonState
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import AddonsError, HomeAssistantError, ObserverError
|
||||
from ..homeassistant.const import LANDINGPAGE
|
||||
from ..jobs.decorator import Job, JobCondition
|
||||
from ..plugins.const import PLUGIN_UPDATE_CONDITIONS
|
||||
from ..utils.sentry import capture_exception
|
||||
@@ -142,6 +143,9 @@ class Tasks(CoreSysAttributes):
|
||||
if self.sys_homeassistant.error_state:
|
||||
# Home Assistant is in an error state, this is handled by the rollback feature
|
||||
return
|
||||
if self.sys_homeassistant.version == LANDINGPAGE:
|
||||
# Skip watchdog for landingpage
|
||||
return
|
||||
if not await self.sys_homeassistant.core.is_running():
|
||||
# The home assistant container is not running
|
||||
return
|
||||
|
@@ -105,11 +105,7 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
if not (event.name == self.instance.name):
|
||||
return
|
||||
|
||||
if event.state in [
|
||||
ContainerState.FAILED,
|
||||
ContainerState.STOPPED,
|
||||
ContainerState.UNHEALTHY,
|
||||
]:
|
||||
if event.state in {ContainerState.FAILED, ContainerState.UNHEALTHY}:
|
||||
await self._restart_after_problem(event.state)
|
||||
|
||||
async def _restart_after_problem(self, state: ContainerState):
|
||||
@@ -123,10 +119,7 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
state,
|
||||
)
|
||||
try:
|
||||
if state == ContainerState.STOPPED and attempts == 0:
|
||||
await self.start()
|
||||
else:
|
||||
await self.rebuild()
|
||||
await self.rebuild()
|
||||
except PluginError as err:
|
||||
attempts = attempts + 1
|
||||
_LOGGER.error("Watchdog restart of %s plugin failed!", self.slug)
|
||||
|
57
supervisor/resolution/fixups/addon_execute_repair.py
Normal file
57
supervisor/resolution/fixups/addon_execute_repair.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""Helper to fix missing image for addon."""
|
||||
|
||||
import logging
|
||||
|
||||
from ...coresys import CoreSys
|
||||
from ..const import ContextType, IssueType, SuggestionType
|
||||
from .base import FixupBase
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup(coresys: CoreSys) -> FixupBase:
|
||||
"""Check setup function."""
|
||||
return FixupAddonExecuteRepair(coresys)
|
||||
|
||||
|
||||
class FixupAddonExecuteRepair(FixupBase):
|
||||
"""Storage class for fixup."""
|
||||
|
||||
async def process_fixup(self, reference: str | None = None) -> None:
|
||||
"""Pull the addons image."""
|
||||
addon = self.sys_addons.get(reference, local_only=True)
|
||||
if not addon:
|
||||
_LOGGER.info(
|
||||
"Cannot repair addon %s as it is not installed, dismissing suggestion",
|
||||
reference,
|
||||
)
|
||||
return
|
||||
|
||||
if await addon.instance.exists():
|
||||
_LOGGER.info(
|
||||
"Addon %s does not need repair, dismissing suggestion", reference
|
||||
)
|
||||
return
|
||||
|
||||
_LOGGER.info("Installing image for addon %s")
|
||||
await addon.instance.install(addon.version)
|
||||
|
||||
@property
|
||||
def suggestion(self) -> SuggestionType:
|
||||
"""Return a SuggestionType enum."""
|
||||
return SuggestionType.EXECUTE_REPAIR
|
||||
|
||||
@property
|
||||
def context(self) -> ContextType:
|
||||
"""Return a ContextType enum."""
|
||||
return ContextType.ADDON
|
||||
|
||||
@property
|
||||
def issues(self) -> list[IssueType]:
|
||||
"""Return a IssueType enum list."""
|
||||
return [IssueType.MISSING_IMAGE]
|
||||
|
||||
@property
|
||||
def auto(self) -> bool:
|
||||
"""Return if a fixup can be apply as auto fix."""
|
||||
return True
|
@@ -24,6 +24,8 @@ class FixupMountExecuteRemove(FixupBase):
|
||||
await self.sys_mounts.remove_mount(reference)
|
||||
except MountNotFound:
|
||||
_LOGGER.warning("Can't find mount %s for fixup", reference)
|
||||
else:
|
||||
self.sys_mounts.save_data()
|
||||
|
||||
@property
|
||||
def suggestion(self) -> SuggestionType:
|
||||
|
@@ -1,7 +1,11 @@
|
||||
"""Init file for Supervisor add-ons."""
|
||||
|
||||
from copy import deepcopy
|
||||
import logging
|
||||
from typing import Self
|
||||
|
||||
from ..addons.model import AddonModel, Data
|
||||
from ..coresys import CoreSys
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -9,6 +13,11 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
class AddonStore(AddonModel):
|
||||
"""Hold data for add-on inside Supervisor."""
|
||||
|
||||
def __init__(self, coresys: CoreSys, slug: str, data: Data | None = None):
|
||||
"""Initialize object."""
|
||||
super().__init__(coresys, slug)
|
||||
self._data: Data | None = data
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Return internal representation."""
|
||||
return f"<Store: {self.slug}>"
|
||||
@@ -16,7 +25,7 @@ class AddonStore(AddonModel):
|
||||
@property
|
||||
def data(self) -> Data:
|
||||
"""Return add-on data/config."""
|
||||
return self.sys_store.data.addons[self.slug]
|
||||
return self._data or self.sys_store.data.addons[self.slug]
|
||||
|
||||
@property
|
||||
def is_installed(self) -> bool:
|
||||
@@ -27,3 +36,7 @@ class AddonStore(AddonModel):
|
||||
def is_detached(self) -> bool:
|
||||
"""Return True if add-on is detached."""
|
||||
return False
|
||||
|
||||
def clone(self) -> Self:
|
||||
"""Return a copy that includes data and does not use global store data."""
|
||||
return type(self)(self.coresys, self.slug, deepcopy(self.data))
|
||||
|
@@ -6,6 +6,7 @@ import os
|
||||
from pathlib import Path
|
||||
import re
|
||||
import socket
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Any
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@@ -76,13 +77,31 @@ def get_message_from_exception_chain(err: Exception) -> str:
|
||||
return get_message_from_exception_chain(err.__context__)
|
||||
|
||||
|
||||
async def remove_folder(folder: Path, content_only: bool = False) -> None:
|
||||
async def remove_folder(
|
||||
folder: Path,
|
||||
content_only: bool = False,
|
||||
excludes: list[str] | None = None,
|
||||
tmp_dir: Path | None = None,
|
||||
) -> None:
|
||||
"""Remove folder and reset privileged.
|
||||
|
||||
Is needed to avoid issue with:
|
||||
- CAP_DAC_OVERRIDE
|
||||
- CAP_DAC_READ_SEARCH
|
||||
"""
|
||||
if excludes:
|
||||
if not tmp_dir:
|
||||
raise ValueError("tmp_dir is required if excludes are provided")
|
||||
if not content_only:
|
||||
raise ValueError("Cannot delete the folder if excludes are provided")
|
||||
|
||||
temp = TemporaryDirectory(dir=tmp_dir)
|
||||
temp_path = Path(temp.name)
|
||||
moved_files: list[Path] = []
|
||||
for item in folder.iterdir():
|
||||
if any(item.match(exclude) for exclude in excludes):
|
||||
moved_files.append(item.rename(temp_path / item.name))
|
||||
|
||||
del_folder = f"{folder}" + "/{,.[!.],..?}*" if content_only else f"{folder}"
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
@@ -99,6 +118,11 @@ async def remove_folder(folder: Path, content_only: bool = False) -> None:
|
||||
else:
|
||||
if proc.returncode == 0:
|
||||
return
|
||||
finally:
|
||||
if excludes:
|
||||
for item in moved_files:
|
||||
item.rename(folder / item.name)
|
||||
temp.cleanup()
|
||||
|
||||
_LOGGER.error("Can't remove folder %s: %s", folder, error_msg)
|
||||
|
||||
|
@@ -3,20 +3,23 @@ import logging
|
||||
from pathlib import Path
|
||||
|
||||
from atomicwrites import atomic_write
|
||||
from ruamel.yaml import YAML, YAMLError
|
||||
from yaml import YAMLError, dump, load
|
||||
|
||||
try:
|
||||
from yaml import CDumper as Dumper, CSafeLoader as SafeLoader
|
||||
except ImportError:
|
||||
from yaml import SafeLoader, Dumper
|
||||
|
||||
from ..exceptions import YamlFileError
|
||||
|
||||
_YAML = YAML(typ="safe")
|
||||
_YAML.allow_duplicate_keys = True
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def read_yaml_file(path: Path) -> dict:
|
||||
"""Read YAML file from path."""
|
||||
try:
|
||||
return _YAML.load(path) or {}
|
||||
with open(path, encoding="utf-8") as yaml_file:
|
||||
return load(yaml_file, Loader=SafeLoader) or {}
|
||||
|
||||
except (YAMLError, AttributeError, OSError) as err:
|
||||
raise YamlFileError(
|
||||
@@ -28,7 +31,7 @@ def write_yaml_file(path: Path, data: dict) -> None:
|
||||
"""Write a YAML file."""
|
||||
try:
|
||||
with atomic_write(path, overwrite=True) as fp:
|
||||
_YAML.dump(data, fp)
|
||||
dump(data, fp, Dumper=Dumper)
|
||||
path.chmod(0o600)
|
||||
except (YAMLError, OSError, ValueError, TypeError) as err:
|
||||
raise YamlFileError(f"Can't write {path!s}: {err!s}", _LOGGER.error) from err
|
||||
|
@@ -39,10 +39,6 @@ def _fire_test_event(coresys: CoreSys, name: str, state: ContainerState):
|
||||
)
|
||||
|
||||
|
||||
async def mock_stop() -> None:
|
||||
"""Mock for stop method."""
|
||||
|
||||
|
||||
def test_options_merge(coresys: CoreSys, install_addon_ssh: Addon) -> None:
|
||||
"""Test options merge."""
|
||||
addon = coresys.addons.get(TEST_ADDON_SLUG)
|
||||
@@ -148,7 +144,7 @@ async def test_addon_watchdog(coresys: CoreSys, install_addon_ssh: Addon) -> Non
|
||||
|
||||
# Rebuild if it failed
|
||||
current_state.return_value = ContainerState.FAILED
|
||||
with patch.object(DockerAddon, "stop", return_value=mock_stop()) as stop:
|
||||
with patch.object(DockerAddon, "stop") as stop:
|
||||
_fire_test_event(coresys, f"addon_{TEST_ADDON_SLUG}", ContainerState.FAILED)
|
||||
await asyncio.sleep(0)
|
||||
stop.assert_called_once_with(remove_container=True)
|
||||
@@ -183,7 +179,7 @@ async def test_watchdog_on_stop(coresys: CoreSys, install_addon_ssh: Addon) -> N
|
||||
DockerAddon,
|
||||
"current_state",
|
||||
return_value=ContainerState.STOPPED,
|
||||
), patch.object(DockerAddon, "stop", return_value=mock_stop()):
|
||||
), patch.object(DockerAddon, "stop"):
|
||||
# Do not restart when addon stopped by user
|
||||
_fire_test_event(coresys, f"addon_{TEST_ADDON_SLUG}", ContainerState.RUNNING)
|
||||
await asyncio.sleep(0)
|
||||
@@ -515,6 +511,42 @@ async def test_backup_cold_mode(
|
||||
assert bool(start_task) is (status == "running")
|
||||
|
||||
|
||||
async def test_backup_cold_mode_with_watchdog(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
):
|
||||
"""Test backing up an addon in cold mode with watchdog active."""
|
||||
container.status = "running"
|
||||
install_addon_ssh.watchdog = True
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
await install_addon_ssh.load()
|
||||
|
||||
# Simulate stop firing the docker event for stopped container like it normally would
|
||||
async def mock_stop(*args, **kwargs):
|
||||
container.status = "stopped"
|
||||
_fire_test_event(coresys, f"addon_{TEST_ADDON_SLUG}", ContainerState.STOPPED)
|
||||
|
||||
# Patching out the normal end of backup process leaves the container in a stopped state
|
||||
# Watchdog should still not try to restart it though, it should remain this way
|
||||
tarfile = SecureTarFile(coresys.config.path_tmp / "test.tar.gz", "w")
|
||||
with patch.object(Addon, "start") as start, patch.object(
|
||||
Addon, "restart"
|
||||
) as restart, patch.object(Addon, "end_backup"), patch.object(
|
||||
DockerAddon, "stop", new=mock_stop
|
||||
), patch.object(
|
||||
AddonModel,
|
||||
"backup_mode",
|
||||
new=PropertyMock(return_value=AddonBackupMode.COLD),
|
||||
):
|
||||
await install_addon_ssh.backup(tarfile)
|
||||
await asyncio.sleep(0)
|
||||
start.assert_not_called()
|
||||
restart.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("status", ["running", "stopped"])
|
||||
async def test_restore(
|
||||
coresys: CoreSys,
|
||||
@@ -532,11 +564,9 @@ async def test_restore(
|
||||
tarfile = SecureTarFile(get_fixture_path(f"backup_local_ssh_{status}.tar.gz"), "r")
|
||||
with patch.object(DockerAddon, "is_running", return_value=False), patch.object(
|
||||
CpuArch, "supported", new=PropertyMock(return_value=["aarch64"])
|
||||
), patch.object(Ingress, "update_hass_panel") as update_hass_panel:
|
||||
):
|
||||
start_task = await coresys.addons.restore(TEST_ADDON_SLUG, tarfile)
|
||||
|
||||
update_hass_panel.assert_called_once()
|
||||
|
||||
assert bool(start_task) is (status == "running")
|
||||
|
||||
|
||||
@@ -563,6 +593,41 @@ async def test_restore_while_running(
|
||||
container.stop.assert_called_once()
|
||||
|
||||
|
||||
async def test_restore_while_running_with_watchdog(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
):
|
||||
"""Test restore of a running addon with watchdog interference."""
|
||||
container.status = "running"
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
install_addon_ssh.watchdog = True
|
||||
await install_addon_ssh.load()
|
||||
|
||||
# Simulate stop firing the docker event for stopped container like it normally would
|
||||
async def mock_stop(*args, **kwargs):
|
||||
container.status = "stopped"
|
||||
_fire_test_event(coresys, f"addon_{TEST_ADDON_SLUG}", ContainerState.STOPPED)
|
||||
|
||||
# We restore a stopped backup so restore will not restart it
|
||||
# Watchdog will see it stop and should not attempt reanimation either
|
||||
tarfile = SecureTarFile(get_fixture_path("backup_local_ssh_stopped.tar.gz"), "r")
|
||||
with patch.object(Addon, "start") as start, patch.object(
|
||||
Addon, "restart"
|
||||
) as restart, patch.object(DockerAddon, "stop", new=mock_stop), patch.object(
|
||||
CpuArch, "supported", new=PropertyMock(return_value=["aarch64"])
|
||||
), patch.object(
|
||||
Ingress, "update_hass_panel"
|
||||
):
|
||||
await coresys.addons.restore(TEST_ADDON_SLUG, tarfile)
|
||||
await asyncio.sleep(0)
|
||||
start.assert_not_called()
|
||||
restart.assert_not_called()
|
||||
|
||||
|
||||
async def test_start_when_running(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
|
@@ -15,6 +15,7 @@ from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.addon import DockerAddon
|
||||
from supervisor.docker.const import ContainerState
|
||||
from supervisor.docker.interface import DockerInterface
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
from supervisor.docker.monitor import DockerContainerStateEvent
|
||||
from supervisor.exceptions import (
|
||||
AddonConfigurationError,
|
||||
@@ -23,6 +24,7 @@ from supervisor.exceptions import (
|
||||
DockerNotFound,
|
||||
)
|
||||
from supervisor.plugins.dns import PluginDns
|
||||
from supervisor.store.repository import Repository
|
||||
from supervisor.utils import check_exception_chain
|
||||
from supervisor.utils.common import write_json_file
|
||||
|
||||
@@ -364,3 +366,78 @@ async def test_repository_file_error(
|
||||
write_json_file(repo_file, {"invalid": "bad"})
|
||||
await coresys.store.data.update()
|
||||
assert f"Repository parse error {repo_dir.as_posix()}" in caplog.text
|
||||
|
||||
|
||||
async def test_store_data_changes_during_update(
|
||||
coresys: CoreSys, install_addon_ssh: Addon
|
||||
):
|
||||
"""Test store data changing for an addon during an update does not cause errors."""
|
||||
event = asyncio.Event()
|
||||
coresys.store.data.addons["local_ssh"]["image"] = "test_image"
|
||||
coresys.store.data.addons["local_ssh"]["version"] = AwesomeVersion("1.1.1")
|
||||
|
||||
async def simulate_update():
|
||||
async def mock_update(_, version, image, *args, **kwargs):
|
||||
assert version == AwesomeVersion("1.1.1")
|
||||
assert image == "test_image"
|
||||
await event.wait()
|
||||
|
||||
with patch.object(DockerAddon, "update", new=mock_update), patch.object(
|
||||
DockerAPI, "cleanup_old_images"
|
||||
) as cleanup:
|
||||
await coresys.addons.update("local_ssh")
|
||||
cleanup.assert_called_once_with(
|
||||
"test_image", AwesomeVersion("1.1.1"), {"local/amd64-addon-ssh"}
|
||||
)
|
||||
|
||||
update_task = coresys.create_task(simulate_update())
|
||||
await asyncio.sleep(0)
|
||||
|
||||
with patch.object(Repository, "update"):
|
||||
await coresys.store.reload()
|
||||
|
||||
assert "image" not in coresys.store.data.addons["local_ssh"]
|
||||
assert coresys.store.data.addons["local_ssh"]["version"] == AwesomeVersion("9.2.1")
|
||||
|
||||
event.set()
|
||||
await update_task
|
||||
|
||||
assert install_addon_ssh.image == "test_image"
|
||||
assert install_addon_ssh.version == AwesomeVersion("1.1.1")
|
||||
|
||||
|
||||
async def test_watchdog_runs_during_update(
|
||||
coresys: CoreSys, install_addon_ssh: Addon, container: MagicMock
|
||||
):
|
||||
"""Test watchdog running during a long update."""
|
||||
container.status = "running"
|
||||
install_addon_ssh.watchdog = True
|
||||
coresys.store.data.addons["local_ssh"]["image"] = "test_image"
|
||||
coresys.store.data.addons["local_ssh"]["version"] = AwesomeVersion("1.1.1")
|
||||
await install_addon_ssh.load()
|
||||
|
||||
# Simulate stop firing the docker event for stopped container like it normally would
|
||||
async def mock_stop(*args, **kwargs):
|
||||
container.status = "stopped"
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
name=f"addon_{TEST_ADDON_SLUG}",
|
||||
state=ContainerState.STOPPED,
|
||||
id="abc123",
|
||||
time=1,
|
||||
),
|
||||
)
|
||||
|
||||
# Mock update to just wait and let other tasks run as if it is long running
|
||||
async def mock_update(*args, **kwargs):
|
||||
await asyncio.sleep(0)
|
||||
|
||||
# Start should be called exactly once by update itself. Restart should never be called
|
||||
with patch.object(DockerAddon, "stop", new=mock_stop), patch.object(
|
||||
DockerAddon, "update", new=mock_update
|
||||
), patch.object(Addon, "start") as start, patch.object(Addon, "restart") as restart:
|
||||
await coresys.addons.update("local_ssh")
|
||||
await asyncio.sleep(0)
|
||||
start.assert_called_once()
|
||||
restart.assert_not_called()
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path, PurePath
|
||||
from unittest.mock import AsyncMock, patch
|
||||
from unittest.mock import ANY, AsyncMock, patch
|
||||
|
||||
from aiohttp.test_utils import TestClient
|
||||
from awesomeversion import AwesomeVersion
|
||||
@@ -11,6 +11,7 @@ import pytest
|
||||
from supervisor.backups.backup import Backup
|
||||
from supervisor.const import CoreState
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.homeassistant.module import HomeAssistant
|
||||
from supervisor.mounts.mount import Mount
|
||||
|
||||
|
||||
@@ -167,3 +168,34 @@ async def test_api_freeze_thaw(
|
||||
call.args[0] == {"type": "backup/end"}
|
||||
for call in ha_ws_client.async_send_command.call_args_list
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"partial_backup,exclude_db_setting",
|
||||
[(False, True), (True, True), (False, False), (True, False)],
|
||||
)
|
||||
async def test_api_backup_exclude_database(
|
||||
api_client: TestClient,
|
||||
coresys: CoreSys,
|
||||
partial_backup: bool,
|
||||
exclude_db_setting: bool,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
):
|
||||
"""Test backups exclude the database when specified."""
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
coresys.homeassistant.version = AwesomeVersion("2023.09.0")
|
||||
coresys.homeassistant.backups_exclude_database = exclude_db_setting
|
||||
|
||||
json = {} if exclude_db_setting else {"homeassistant_exclude_database": True}
|
||||
with patch.object(HomeAssistant, "backup") as backup:
|
||||
if partial_backup:
|
||||
resp = await api_client.post(
|
||||
"/backups/new/partial", json={"homeassistant": True} | json
|
||||
)
|
||||
else:
|
||||
resp = await api_client.post("/backups/new/full", json=json)
|
||||
|
||||
backup.assert_awaited_once_with(ANY, True)
|
||||
assert resp.status == 200
|
||||
|
@@ -1,11 +1,12 @@
|
||||
"""Test homeassistant api."""
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from aiohttp.test_utils import TestClient
|
||||
import pytest
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.homeassistant.module import HomeAssistant
|
||||
|
||||
from tests.common import load_json_fixture
|
||||
|
||||
@@ -39,3 +40,26 @@ async def test_api_stats(api_client: TestClient, coresys: CoreSys):
|
||||
assert result["data"]["memory_usage"] == 59700000
|
||||
assert result["data"]["memory_limit"] == 4000000000
|
||||
assert result["data"]["memory_percent"] == 1.49
|
||||
|
||||
|
||||
async def test_api_set_options(api_client: TestClient, coresys: CoreSys):
|
||||
"""Test setting options for homeassistant."""
|
||||
resp = await api_client.get("/homeassistant/info")
|
||||
assert resp.status == 200
|
||||
result = await resp.json()
|
||||
assert result["data"]["watchdog"] is True
|
||||
assert result["data"]["backups_exclude_database"] is False
|
||||
|
||||
with patch.object(HomeAssistant, "save_data") as save_data:
|
||||
resp = await api_client.post(
|
||||
"/homeassistant/options",
|
||||
json={"backups_exclude_database": True, "watchdog": False},
|
||||
)
|
||||
assert resp.status == 200
|
||||
save_data.assert_called_once()
|
||||
|
||||
resp = await api_client.get("/homeassistant/info")
|
||||
assert resp.status == 200
|
||||
result = await resp.json()
|
||||
assert result["data"]["watchdog"] is False
|
||||
assert result["data"]["backups_exclude_database"] is True
|
||||
|
@@ -253,5 +253,5 @@ async def test_api_network_vlan(
|
||||
assert connection["ipv6"] == {"method": Variant("s", "auto")}
|
||||
assert connection["vlan"] == {
|
||||
"id": Variant("u", 1),
|
||||
"parent": Variant("s", "eth0"),
|
||||
"parent": Variant("s", "0c23631e-2118-355c-bbb0-8943229cb0d6"),
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@ from aiohttp.test_utils import TestClient
|
||||
import pytest
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.dbus.agent.boards.interface import BoardProxy
|
||||
from supervisor.host.control import SystemControl
|
||||
from supervisor.os.manager import OSManager
|
||||
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
||||
@@ -13,6 +14,7 @@ from supervisor.resolution.data import Issue, Suggestion
|
||||
|
||||
from tests.common import mock_dbus_services
|
||||
from tests.dbus_service_mocks.agent_boards import Boards as BoardsService
|
||||
from tests.dbus_service_mocks.agent_boards_green import Green as GreenService
|
||||
from tests.dbus_service_mocks.agent_boards_yellow import Yellow as YellowService
|
||||
from tests.dbus_service_mocks.agent_datadisk import DataDisk as DataDiskService
|
||||
from tests.dbus_service_mocks.base import DBusServiceMock
|
||||
@@ -121,6 +123,7 @@ async def test_api_board_yellow_info(api_client: TestClient, coresys: CoreSys):
|
||||
assert result["data"]["heartbeat_led"] is True
|
||||
assert result["data"]["power_led"] is True
|
||||
|
||||
assert (await api_client.get("/os/boards/green")).status == 400
|
||||
assert (await api_client.get("/os/boards/supervised")).status == 400
|
||||
assert (await api_client.get("/os/boards/not-real")).status == 400
|
||||
|
||||
@@ -137,11 +140,13 @@ async def test_api_board_yellow_options(
|
||||
assert coresys.dbus.agent.board.yellow.heartbeat_led is True
|
||||
assert coresys.dbus.agent.board.yellow.power_led is True
|
||||
assert len(coresys.resolution.issues) == 0
|
||||
resp = await api_client.post(
|
||||
"/os/boards/yellow",
|
||||
json={"disk_led": False, "heartbeat_led": False, "power_led": False},
|
||||
)
|
||||
assert resp.status == 200
|
||||
with patch.object(BoardProxy, "save_data") as save_data:
|
||||
resp = await api_client.post(
|
||||
"/os/boards/yellow",
|
||||
json={"disk_led": False, "heartbeat_led": False, "power_led": False},
|
||||
)
|
||||
assert resp.status == 200
|
||||
save_data.assert_called_once()
|
||||
|
||||
await yellow_service.ping()
|
||||
assert coresys.dbus.agent.board.yellow.disk_led is False
|
||||
@@ -158,13 +163,69 @@ async def test_api_board_yellow_options(
|
||||
)
|
||||
|
||||
|
||||
async def test_api_board_green_info(
|
||||
api_client: TestClient, coresys: CoreSys, boards_service: BoardsService
|
||||
):
|
||||
"""Test green board info."""
|
||||
await mock_dbus_services({"agent_boards_green": None}, coresys.dbus.bus)
|
||||
boards_service.board = "Green"
|
||||
await coresys.dbus.agent.board.connect(coresys.dbus.bus)
|
||||
|
||||
resp = await api_client.get("/os/boards/green")
|
||||
assert resp.status == 200
|
||||
|
||||
result = await resp.json()
|
||||
assert result["data"]["activity_led"] is True
|
||||
assert result["data"]["power_led"] is True
|
||||
assert result["data"]["system_health_led"] is True
|
||||
|
||||
assert (await api_client.get("/os/boards/yellow")).status == 400
|
||||
assert (await api_client.get("/os/boards/supervised")).status == 400
|
||||
assert (await api_client.get("/os/boards/not-real")).status == 400
|
||||
|
||||
|
||||
async def test_api_board_green_options(
|
||||
api_client: TestClient,
|
||||
coresys: CoreSys,
|
||||
boards_service: BoardsService,
|
||||
):
|
||||
"""Test yellow board options."""
|
||||
green_service: GreenService = (
|
||||
await mock_dbus_services({"agent_boards_green": None}, coresys.dbus.bus)
|
||||
)["agent_boards_green"]
|
||||
boards_service.board = "Green"
|
||||
await coresys.dbus.agent.board.connect(coresys.dbus.bus)
|
||||
|
||||
assert coresys.dbus.agent.board.green.activity_led is True
|
||||
assert coresys.dbus.agent.board.green.power_led is True
|
||||
assert coresys.dbus.agent.board.green.user_led is True
|
||||
assert len(coresys.resolution.issues) == 0
|
||||
with patch.object(BoardProxy, "save_data") as save_data:
|
||||
resp = await api_client.post(
|
||||
"/os/boards/green",
|
||||
json={
|
||||
"activity_led": False,
|
||||
"power_led": False,
|
||||
"system_health_led": False,
|
||||
},
|
||||
)
|
||||
assert resp.status == 200
|
||||
save_data.assert_called_once()
|
||||
|
||||
await green_service.ping()
|
||||
assert coresys.dbus.agent.board.green.activity_led is False
|
||||
assert coresys.dbus.agent.board.green.power_led is False
|
||||
assert coresys.dbus.agent.board.green.user_led is False
|
||||
assert len(coresys.resolution.issues) == 0
|
||||
|
||||
|
||||
async def test_api_board_supervised_info(
|
||||
api_client: TestClient, coresys: CoreSys, boards_service: BoardsService
|
||||
):
|
||||
"""Test supervised board info."""
|
||||
await mock_dbus_services({"agent_boards_supervised": None}, coresys.dbus.bus)
|
||||
boards_service.board = "Supervised"
|
||||
await coresys.dbus.agent.board.update()
|
||||
await coresys.dbus.agent.board.connect(coresys.dbus.bus)
|
||||
|
||||
with patch("supervisor.os.manager.CPE.get_product", return_value=["not-hassos"]):
|
||||
await coresys.os.load()
|
||||
@@ -180,7 +241,7 @@ async def test_api_board_other_info(
|
||||
):
|
||||
"""Test info for other board without dbus object."""
|
||||
boards_service.board = "not-real"
|
||||
await coresys.dbus.agent.board.update()
|
||||
await coresys.dbus.agent.board.connect(coresys.dbus.bus)
|
||||
|
||||
with patch.object(OSManager, "board", new=PropertyMock(return_value="not-real")):
|
||||
assert (await api_client.get("/os/boards/not-real")).status == 200
|
||||
|
177
tests/api/test_proxy.py
Normal file
177
tests/api/test_proxy.py
Normal file
@@ -0,0 +1,177 @@
|
||||
"""Test Home Assistant proxy."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Awaitable, Callable, Coroutine, Generator
|
||||
from json import dumps
|
||||
from typing import Any, cast
|
||||
from unittest.mock import patch
|
||||
|
||||
from aiohttp import ClientWebSocketResponse
|
||||
from aiohttp.http_websocket import WSMessage, WSMsgType
|
||||
from aiohttp.test_utils import TestClient
|
||||
import pytest
|
||||
|
||||
from supervisor.addons.addon import Addon
|
||||
from supervisor.api.proxy import APIProxy
|
||||
from supervisor.const import ATTR_ACCESS_TOKEN
|
||||
|
||||
|
||||
def id_generator() -> Generator[int, None, None]:
|
||||
"""Generate IDs for WS messages."""
|
||||
i = 0
|
||||
while True:
|
||||
yield (i := i + 1)
|
||||
|
||||
|
||||
class MockHAClientWebSocket(ClientWebSocketResponse):
|
||||
"""Protocol for a wrapped ClientWebSocketResponse."""
|
||||
|
||||
client: TestClient
|
||||
send_json_auto_id: Callable[[dict[str, Any]], Coroutine[Any, Any, None]]
|
||||
|
||||
|
||||
class MockHAServerWebSocket:
|
||||
"""Mock of HA Websocket server."""
|
||||
|
||||
closed: bool = False
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize object."""
|
||||
self.outgoing: asyncio.Queue[WSMessage] = asyncio.Queue()
|
||||
self.incoming: asyncio.Queue[WSMessage] = asyncio.Queue()
|
||||
self._id_generator = id_generator()
|
||||
|
||||
def receive(self) -> Awaitable[WSMessage]:
|
||||
"""Receive next message."""
|
||||
return self.outgoing.get()
|
||||
|
||||
def send_str(self, data: str) -> Awaitable[None]:
|
||||
"""Incoming string message."""
|
||||
return self.incoming.put(WSMessage(WSMsgType.TEXT, data, None))
|
||||
|
||||
def send_bytes(self, data: bytes) -> Awaitable[None]:
|
||||
"""Incoming string message."""
|
||||
return self.incoming.put(WSMessage(WSMsgType.BINARY, data, None))
|
||||
|
||||
def respond_json(self, data: dict[str, Any]) -> Awaitable[None]:
|
||||
"""Respond with JSON."""
|
||||
return self.outgoing.put(
|
||||
WSMessage(
|
||||
WSMsgType.TEXT, dumps(data | {"id": next(self._id_generator)}), None
|
||||
)
|
||||
)
|
||||
|
||||
def respond_bytes(self, data: bytes) -> Awaitable[None]:
|
||||
"""Respond with binary."""
|
||||
return self.outgoing.put(WSMessage(WSMsgType.BINARY, data, None))
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close connection."""
|
||||
self.closed = True
|
||||
|
||||
|
||||
WebSocketGenerator = Callable[..., Coroutine[Any, Any, MockHAClientWebSocket]]
|
||||
|
||||
|
||||
@pytest.fixture(name="ha_ws_server")
|
||||
async def fixture_ha_ws_server() -> MockHAServerWebSocket:
|
||||
"""Mock HA WS server for testing."""
|
||||
with patch.object(
|
||||
APIProxy,
|
||||
"_websocket_client",
|
||||
return_value=(mock_server := MockHAServerWebSocket()),
|
||||
):
|
||||
yield mock_server
|
||||
|
||||
|
||||
@pytest.fixture(name="proxy_ws_client")
|
||||
def fixture_proxy_ws_client(
|
||||
api_client: TestClient, ha_ws_server: MockHAServerWebSocket
|
||||
) -> WebSocketGenerator:
|
||||
"""Websocket client fixture connected to websocket server."""
|
||||
|
||||
async def create_client(auth_token: str) -> MockHAClientWebSocket:
|
||||
"""Create a websocket client."""
|
||||
websocket = await api_client.ws_connect("/core/websocket")
|
||||
auth_resp = await websocket.receive_json()
|
||||
assert auth_resp["type"] == "auth_required"
|
||||
await websocket.send_json({"type": "auth", "access_token": auth_token})
|
||||
|
||||
auth_ok = await websocket.receive_json()
|
||||
assert auth_ok["type"] == "auth_ok"
|
||||
|
||||
_id_generator = id_generator()
|
||||
|
||||
def _send_json_auto_id(data: dict[str, Any]) -> Coroutine[Any, Any, None]:
|
||||
data["id"] = next(_id_generator)
|
||||
return websocket.send_json(data)
|
||||
|
||||
# wrap in client
|
||||
wrapped_websocket = cast(MockHAClientWebSocket, websocket)
|
||||
wrapped_websocket.client = api_client
|
||||
wrapped_websocket.send_json_auto_id = _send_json_auto_id
|
||||
return wrapped_websocket
|
||||
|
||||
return create_client
|
||||
|
||||
|
||||
async def test_proxy_message(
|
||||
proxy_ws_client: WebSocketGenerator,
|
||||
ha_ws_server: MockHAServerWebSocket,
|
||||
install_addon_ssh: Addon,
|
||||
):
|
||||
"""Test proxy a message to and from Home Assistant."""
|
||||
install_addon_ssh.persist[ATTR_ACCESS_TOKEN] = "abc123"
|
||||
client: MockHAClientWebSocket = await proxy_ws_client(
|
||||
install_addon_ssh.supervisor_token
|
||||
)
|
||||
|
||||
await client.send_json_auto_id({"hello": "world"})
|
||||
proxied_msg = await ha_ws_server.incoming.get()
|
||||
assert proxied_msg.type == WSMsgType.TEXT
|
||||
assert proxied_msg.data == '{"hello": "world", "id": 1}'
|
||||
|
||||
await ha_ws_server.respond_json({"world": "received"})
|
||||
assert await client.receive_json() == {"world": "received", "id": 1}
|
||||
|
||||
assert await client.close()
|
||||
|
||||
|
||||
async def test_proxy_binary_message(
|
||||
proxy_ws_client: WebSocketGenerator,
|
||||
ha_ws_server: MockHAServerWebSocket,
|
||||
install_addon_ssh: Addon,
|
||||
):
|
||||
"""Test proxy a binary message to and from Home Assistant."""
|
||||
install_addon_ssh.persist[ATTR_ACCESS_TOKEN] = "abc123"
|
||||
client: MockHAClientWebSocket = await proxy_ws_client(
|
||||
install_addon_ssh.supervisor_token
|
||||
)
|
||||
|
||||
await client.send_bytes(b"hello world")
|
||||
proxied_msg = await ha_ws_server.incoming.get()
|
||||
assert proxied_msg.type == WSMsgType.BINARY
|
||||
assert proxied_msg.data == b"hello world"
|
||||
|
||||
await ha_ws_server.respond_bytes(b"world received")
|
||||
assert await client.receive_bytes() == b"world received"
|
||||
|
||||
assert await client.close()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("auth_token", ["abc123", "bad"])
|
||||
async def test_proxy_invalid_auth(
|
||||
api_client: TestClient, install_addon_example: Addon, auth_token: str
|
||||
):
|
||||
"""Test invalid access token or addon with no access."""
|
||||
install_addon_example.persist[ATTR_ACCESS_TOKEN] = "abc123"
|
||||
websocket = await api_client.ws_connect("/core/websocket")
|
||||
auth_resp = await websocket.receive_json()
|
||||
assert auth_resp["type"] == "auth_required"
|
||||
await websocket.send_json({"type": "auth", "access_token": auth_token})
|
||||
|
||||
auth_not_ok = await websocket.receive_json()
|
||||
assert auth_not_ok["type"] == "auth_invalid"
|
||||
assert auth_not_ok["message"] == "Invalid access"
|
@@ -50,7 +50,7 @@ async def test_api_store_addons(api_client: TestClient, store_addon: AddonStore)
|
||||
result = await resp.json()
|
||||
print(result)
|
||||
|
||||
assert result["data"][-1]["slug"] == store_addon.slug
|
||||
assert result["data"]["addons"][-1]["slug"] == store_addon.slug
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
@@ -20,10 +20,13 @@ from supervisor.docker.addon import DockerAddon
|
||||
from supervisor.docker.const import ContainerState
|
||||
from supervisor.docker.homeassistant import DockerHomeAssistant
|
||||
from supervisor.docker.monitor import DockerContainerStateEvent
|
||||
from supervisor.exceptions import AddonsError, BackupError, DockerError
|
||||
from supervisor.exceptions import AddonsError, BackupError, BackupJobError, DockerError
|
||||
from supervisor.homeassistant.api import HomeAssistantAPI
|
||||
from supervisor.homeassistant.core import HomeAssistantCore
|
||||
from supervisor.homeassistant.module import HomeAssistant
|
||||
from supervisor.jobs.const import JobCondition
|
||||
from supervisor.mounts.mount import Mount
|
||||
from supervisor.utils.json import read_json_file, write_json_file
|
||||
|
||||
from tests.const import TEST_ADDON_SLUG
|
||||
from tests.dbus_service_mocks.base import DBusServiceMock
|
||||
@@ -1307,3 +1310,192 @@ async def test_cannot_manually_thaw_normal_freeze(coresys: CoreSys):
|
||||
coresys.core.state = CoreState.FREEZE
|
||||
with pytest.raises(BackupError):
|
||||
await coresys.backups.thaw_all()
|
||||
|
||||
|
||||
async def test_restore_only_reloads_ingress_on_change(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
):
|
||||
"""Test restore only tells core to reload ingress when something has changed."""
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
|
||||
backup_no_ingress: Backup = await coresys.backups.do_backup_partial(
|
||||
addons=["local_ssh"]
|
||||
)
|
||||
|
||||
install_addon_ssh.ingress_panel = True
|
||||
install_addon_ssh.save_persist()
|
||||
backup_with_ingress: Backup = await coresys.backups.do_backup_partial(
|
||||
addons=["local_ssh"]
|
||||
)
|
||||
|
||||
async def mock_is_running(*_) -> bool:
|
||||
return True
|
||||
|
||||
with patch.object(
|
||||
HomeAssistantCore, "is_running", new=mock_is_running
|
||||
), patch.object(AddonModel, "_validate_availability"), patch.object(
|
||||
DockerAddon, "attach"
|
||||
), patch.object(
|
||||
HomeAssistantAPI, "make_request"
|
||||
) as make_request:
|
||||
make_request.return_value.__aenter__.return_value.status = 200
|
||||
|
||||
# Has ingress before and after - not called
|
||||
await coresys.backups.do_restore_partial(
|
||||
backup_with_ingress, addons=["local_ssh"]
|
||||
)
|
||||
make_request.assert_not_called()
|
||||
|
||||
# Restore removes ingress - tell Home Assistant
|
||||
await coresys.backups.do_restore_partial(
|
||||
backup_no_ingress, addons=["local_ssh"]
|
||||
)
|
||||
make_request.assert_called_once_with(
|
||||
"delete", "api/hassio_push/panel/local_ssh"
|
||||
)
|
||||
|
||||
# No ingress before or after - not called
|
||||
make_request.reset_mock()
|
||||
await coresys.backups.do_restore_partial(
|
||||
backup_no_ingress, addons=["local_ssh"]
|
||||
)
|
||||
make_request.assert_not_called()
|
||||
|
||||
# Restore adds ingress - tell Home Assistant
|
||||
await coresys.backups.do_restore_partial(
|
||||
backup_with_ingress, addons=["local_ssh"]
|
||||
)
|
||||
make_request.assert_called_once_with("post", "api/hassio_push/panel/local_ssh")
|
||||
|
||||
|
||||
async def test_restore_new_addon(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
):
|
||||
"""Test restore installing new addon."""
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
|
||||
backup: Backup = await coresys.backups.do_backup_partial(addons=["local_ssh"])
|
||||
await coresys.addons.uninstall("local_ssh")
|
||||
assert "local_ssh" not in coresys.addons.local
|
||||
|
||||
with patch.object(AddonModel, "_validate_availability"), patch.object(
|
||||
DockerAddon, "attach"
|
||||
):
|
||||
assert await coresys.backups.do_restore_partial(backup, addons=["local_ssh"])
|
||||
|
||||
assert "local_ssh" in coresys.addons.local
|
||||
|
||||
|
||||
async def test_backup_to_mount_bypasses_free_space_condition(
|
||||
coresys: CoreSys,
|
||||
all_dbus_services: dict[str, DBusServiceMock],
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
mount_propagation,
|
||||
):
|
||||
"""Test backing up to a mount bypasses the check on local free space."""
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
coresys.hardware.disk.get_disk_free_space = lambda _: 0.1
|
||||
|
||||
# These fail due to lack of local free space
|
||||
with pytest.raises(BackupJobError):
|
||||
await coresys.backups.do_backup_full()
|
||||
with pytest.raises(BackupJobError):
|
||||
await coresys.backups.do_backup_partial(folders=["media"])
|
||||
|
||||
systemd_service: SystemdService = all_dbus_services["systemd"]
|
||||
systemd_service.response_get_unit = [
|
||||
DBusError("org.freedesktop.systemd1.NoSuchUnit", "error"),
|
||||
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
|
||||
DBusError("org.freedesktop.systemd1.NoSuchUnit", "error"),
|
||||
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
|
||||
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
|
||||
"/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount",
|
||||
]
|
||||
|
||||
# Add a backup mount
|
||||
await coresys.mounts.load()
|
||||
await coresys.mounts.create_mount(
|
||||
Mount.from_dict(
|
||||
coresys,
|
||||
{
|
||||
"name": "backup_test",
|
||||
"usage": "backup",
|
||||
"type": "cifs",
|
||||
"server": "test.local",
|
||||
"share": "test",
|
||||
},
|
||||
)
|
||||
)
|
||||
mount = coresys.mounts.get("backup_test")
|
||||
|
||||
# These succeed because local free space does not matter when using a mount
|
||||
await coresys.backups.do_backup_full(location=mount)
|
||||
await coresys.backups.do_backup_partial(folders=["media"], location=mount)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"partial_backup,exclude_db_setting",
|
||||
[(False, True), (True, True), (False, False), (True, False)],
|
||||
)
|
||||
async def test_skip_homeassistant_database(
|
||||
coresys: CoreSys,
|
||||
container: MagicMock,
|
||||
partial_backup: bool,
|
||||
exclude_db_setting: bool | None,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
):
|
||||
"""Test exclude database option skips database in backup."""
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
coresys.jobs.ignore_conditions = [
|
||||
JobCondition.INTERNET_HOST,
|
||||
JobCondition.INTERNET_SYSTEM,
|
||||
]
|
||||
coresys.homeassistant.version = AwesomeVersion("2023.09.0")
|
||||
coresys.homeassistant.backups_exclude_database = exclude_db_setting
|
||||
|
||||
test_file = coresys.config.path_homeassistant / "configuration.yaml"
|
||||
(test_db := coresys.config.path_homeassistant / "home-assistant_v2.db").touch()
|
||||
(
|
||||
test_db_wal := coresys.config.path_homeassistant / "home-assistant_v2.db-wal"
|
||||
).touch()
|
||||
(
|
||||
test_db_shm := coresys.config.path_homeassistant / "home-assistant_v2.db-shm"
|
||||
).touch()
|
||||
|
||||
write_json_file(test_file, {"default_config": {}})
|
||||
|
||||
kwargs = {} if exclude_db_setting else {"homeassistant_exclude_database": True}
|
||||
if partial_backup:
|
||||
backup: Backup = await coresys.backups.do_backup_partial(
|
||||
homeassistant=True, **kwargs
|
||||
)
|
||||
else:
|
||||
backup: Backup = await coresys.backups.do_backup_full(**kwargs)
|
||||
|
||||
test_file.unlink()
|
||||
write_json_file(test_db, {"hello": "world"})
|
||||
write_json_file(test_db_wal, {"hello": "world"})
|
||||
|
||||
with patch.object(HomeAssistantCore, "update"), patch.object(
|
||||
HomeAssistantCore, "start"
|
||||
):
|
||||
await coresys.backups.do_restore_partial(backup, homeassistant=True)
|
||||
|
||||
assert read_json_file(test_file) == {"default_config": {}}
|
||||
assert read_json_file(test_db) == {"hello": "world"}
|
||||
assert read_json_file(test_db_wal) == {"hello": "world"}
|
||||
assert not test_db_shm.exists()
|
||||
|
@@ -30,6 +30,27 @@ async def test_dbus_board(dbus_session_bus: MessageBus):
|
||||
|
||||
with pytest.raises(BoardInvalidError):
|
||||
assert not board.supervised
|
||||
with pytest.raises(BoardInvalidError):
|
||||
assert not board.green
|
||||
|
||||
|
||||
async def test_dbus_board_green(
|
||||
boards_service: BoardsService, dbus_session_bus: MessageBus
|
||||
):
|
||||
"""Test DBus Board load with Green board."""
|
||||
await mock_dbus_services({"agent_boards_green": None}, dbus_session_bus)
|
||||
boards_service.board = "Green"
|
||||
|
||||
board = BoardManager()
|
||||
await board.connect(dbus_session_bus)
|
||||
|
||||
assert board.board == "Green"
|
||||
assert board.green.activity_led is True
|
||||
|
||||
with pytest.raises(BoardInvalidError):
|
||||
assert not board.supervised
|
||||
with pytest.raises(BoardInvalidError):
|
||||
assert not board.yellow
|
||||
|
||||
|
||||
async def test_dbus_board_supervised(
|
||||
@@ -47,6 +68,8 @@ async def test_dbus_board_supervised(
|
||||
|
||||
with pytest.raises(BoardInvalidError):
|
||||
assert not board.yellow
|
||||
with pytest.raises(BoardInvalidError):
|
||||
assert not board.green
|
||||
|
||||
|
||||
async def test_dbus_board_other(
|
||||
@@ -64,3 +87,5 @@ async def test_dbus_board_other(
|
||||
assert not board.yellow
|
||||
with pytest.raises(BoardInvalidError):
|
||||
assert not board.supervised
|
||||
with pytest.raises(BoardInvalidError):
|
||||
assert not board.green
|
||||
|
81
tests/dbus/agent/boards/test_green.py
Normal file
81
tests/dbus/agent/boards/test_green.py
Normal file
@@ -0,0 +1,81 @@
|
||||
"""Test Green board."""
|
||||
# pylint: disable=import-error
|
||||
import asyncio
|
||||
from unittest.mock import patch
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
import pytest
|
||||
|
||||
from supervisor.dbus.agent.boards.green import Green
|
||||
|
||||
from tests.common import mock_dbus_services
|
||||
from tests.dbus_service_mocks.agent_boards_green import Green as GreenService
|
||||
|
||||
|
||||
@pytest.fixture(name="green_service", autouse=True)
|
||||
async def fixture_green_service(dbus_session_bus: MessageBus) -> GreenService:
|
||||
"""Mock Green Board dbus service."""
|
||||
yield (await mock_dbus_services({"agent_boards_green": None}, dbus_session_bus))[
|
||||
"agent_boards_green"
|
||||
]
|
||||
|
||||
|
||||
async def test_dbus_green(green_service: GreenService, dbus_session_bus: MessageBus):
|
||||
"""Test Green board load."""
|
||||
with patch("supervisor.utils.common.Path.is_file", return_value=True), patch(
|
||||
"supervisor.utils.common.read_json_file",
|
||||
return_value={"activity_led": False, "user_led": False},
|
||||
):
|
||||
green = Green()
|
||||
|
||||
await green.connect(dbus_session_bus)
|
||||
|
||||
assert green.name == "Green"
|
||||
assert green.activity_led is True
|
||||
assert green.power_led is True
|
||||
assert green.user_led is True
|
||||
|
||||
await asyncio.sleep(0)
|
||||
await green_service.ping()
|
||||
|
||||
assert green.activity_led is False
|
||||
assert green.user_led is False
|
||||
|
||||
|
||||
async def test_dbus_green_set_activity_led(
|
||||
green_service: GreenService, dbus_session_bus: MessageBus
|
||||
):
|
||||
"""Test setting activity led for Green board."""
|
||||
green = Green()
|
||||
await green.connect(dbus_session_bus)
|
||||
|
||||
green.activity_led = False
|
||||
await asyncio.sleep(0) # Set property via dbus is separate async task
|
||||
await green_service.ping()
|
||||
assert green.activity_led is False
|
||||
|
||||
|
||||
async def test_dbus_green_set_power_led(
|
||||
green_service: GreenService, dbus_session_bus: MessageBus
|
||||
):
|
||||
"""Test setting power led for Green board."""
|
||||
green = Green()
|
||||
await green.connect(dbus_session_bus)
|
||||
|
||||
green.power_led = False
|
||||
await asyncio.sleep(0) # Set property via dbus is separate async task
|
||||
await green_service.ping()
|
||||
assert green.power_led is False
|
||||
|
||||
|
||||
async def test_dbus_green_set_user_led(
|
||||
green_service: GreenService, dbus_session_bus: MessageBus
|
||||
):
|
||||
"""Test setting user led for Green board."""
|
||||
green = Green()
|
||||
await green.connect(dbus_session_bus)
|
||||
|
||||
green.user_led = False
|
||||
await asyncio.sleep(0) # Set property via dbus is separate async task
|
||||
await green_service.ping()
|
||||
assert green.user_led is False
|
@@ -1,6 +1,7 @@
|
||||
"""Test Yellow board."""
|
||||
# pylint: disable=import-error
|
||||
import asyncio
|
||||
from unittest.mock import patch
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
import pytest
|
||||
@@ -19,9 +20,13 @@ async def fixture_yellow_service(dbus_session_bus: MessageBus) -> YellowService:
|
||||
]
|
||||
|
||||
|
||||
async def test_dbus_yellow(dbus_session_bus: MessageBus):
|
||||
async def test_dbus_yellow(yellow_service: YellowService, dbus_session_bus: MessageBus):
|
||||
"""Test Yellow board load."""
|
||||
yellow = Yellow()
|
||||
with patch("supervisor.utils.common.Path.is_file", return_value=True), patch(
|
||||
"supervisor.utils.common.read_json_file",
|
||||
return_value={"disk_led": False, "heartbeat_led": False},
|
||||
):
|
||||
yellow = Yellow()
|
||||
await yellow.connect(dbus_session_bus)
|
||||
|
||||
assert yellow.name == "Yellow"
|
||||
@@ -29,6 +34,12 @@ async def test_dbus_yellow(dbus_session_bus: MessageBus):
|
||||
assert yellow.heartbeat_led is True
|
||||
assert yellow.power_led is True
|
||||
|
||||
await asyncio.sleep(0)
|
||||
await yellow_service.ping()
|
||||
|
||||
assert yellow.disk_led is False
|
||||
assert yellow.heartbeat_led is False
|
||||
|
||||
|
||||
async def test_dbus_yellow_set_disk_led(
|
||||
yellow_service: YellowService, dbus_session_bus: MessageBus
|
||||
|
@@ -5,6 +5,8 @@ from unittest.mock import PropertyMock, patch
|
||||
from supervisor.dbus.network import NetworkManager
|
||||
from supervisor.dbus.network.interface import NetworkInterface
|
||||
from supervisor.dbus.network.setting.generate import get_connection_from_interface
|
||||
from supervisor.host.configuration import IpConfig, VlanConfig
|
||||
from supervisor.host.const import InterfaceMethod, InterfaceType
|
||||
from supervisor.host.network import Interface
|
||||
|
||||
from tests.const import TEST_INTERFACE
|
||||
@@ -14,7 +16,7 @@ async def test_get_connection_from_interface(network_manager: NetworkManager):
|
||||
"""Test network interface."""
|
||||
dbus_interface = network_manager.get(TEST_INTERFACE)
|
||||
interface = Interface.from_dbus_interface(dbus_interface)
|
||||
connection_payload = get_connection_from_interface(interface)
|
||||
connection_payload = get_connection_from_interface(interface, network_manager)
|
||||
|
||||
assert "connection" in connection_payload
|
||||
|
||||
@@ -35,9 +37,48 @@ async def test_get_connection_no_path(network_manager: NetworkManager):
|
||||
with patch.object(NetworkInterface, "path", new=PropertyMock(return_value=None)):
|
||||
interface = Interface.from_dbus_interface(dbus_interface)
|
||||
|
||||
connection_payload = get_connection_from_interface(interface)
|
||||
connection_payload = get_connection_from_interface(interface, network_manager)
|
||||
|
||||
assert "connection" in connection_payload
|
||||
assert "match" not in connection_payload
|
||||
|
||||
assert connection_payload["connection"]["interface-name"].value == "eth0"
|
||||
|
||||
|
||||
async def test_generate_from_vlan(network_manager: NetworkManager):
|
||||
"""Test generate from a vlan interface."""
|
||||
vlan_interface = Interface(
|
||||
name="",
|
||||
mac="",
|
||||
path="",
|
||||
enabled=True,
|
||||
connected=True,
|
||||
primary=False,
|
||||
type=InterfaceType.VLAN,
|
||||
ipv4=IpConfig(InterfaceMethod.AUTO, [], None, [], None),
|
||||
ipv6=None,
|
||||
wifi=None,
|
||||
vlan=VlanConfig(1, "eth0"),
|
||||
)
|
||||
|
||||
connection_payload = get_connection_from_interface(vlan_interface, network_manager)
|
||||
assert connection_payload["connection"]["id"].value == "Supervisor .1"
|
||||
assert connection_payload["connection"]["type"].value == "vlan"
|
||||
assert "uuid" in connection_payload["connection"]
|
||||
assert "match" not in connection_payload["connection"]
|
||||
assert "interface-name" not in connection_payload["connection"]
|
||||
assert connection_payload["ipv4"]["method"].value == "auto"
|
||||
|
||||
assert connection_payload["vlan"]["id"].value == 1
|
||||
assert (
|
||||
connection_payload["vlan"]["parent"].value
|
||||
== "0c23631e-2118-355c-bbb0-8943229cb0d6"
|
||||
)
|
||||
|
||||
# Ensure value remains if parent interface is already a UUID
|
||||
vlan_interface.vlan.interface = "0c23631e-2118-355c-bbb0-8943229cb0d6"
|
||||
connection_payload = get_connection_from_interface(vlan_interface, network_manager)
|
||||
assert (
|
||||
connection_payload["vlan"]["parent"].value
|
||||
== "0c23631e-2118-355c-bbb0-8943229cb0d6"
|
||||
)
|
||||
|
@@ -1,5 +1,7 @@
|
||||
"""Test Network Manager Connection object."""
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
from dbus_fast.signature import Variant
|
||||
import pytest
|
||||
@@ -42,6 +44,7 @@ async def test_update(
|
||||
interface = Interface.from_dbus_interface(dbus_interface)
|
||||
conn = get_connection_from_interface(
|
||||
interface,
|
||||
MagicMock(),
|
||||
name=dbus_interface.settings.connection.id,
|
||||
uuid=dbus_interface.settings.connection.uuid,
|
||||
)
|
||||
@@ -105,6 +108,7 @@ async def test_ipv6_disabled_is_link_local(dbus_interface: NetworkInterface):
|
||||
interface.ipv6.method = InterfaceMethod.DISABLED
|
||||
conn = get_connection_from_interface(
|
||||
interface,
|
||||
MagicMock(),
|
||||
name=dbus_interface.settings.connection.id,
|
||||
uuid=dbus_interface.settings.connection.uuid,
|
||||
)
|
||||
|
55
tests/dbus_service_mocks/agent_boards_green.py
Normal file
55
tests/dbus_service_mocks/agent_boards_green.py
Normal file
@@ -0,0 +1,55 @@
|
||||
"""Mock of OS Agent Boards Green dbus service."""
|
||||
|
||||
from dbus_fast.service import dbus_property
|
||||
|
||||
from .base import DBusServiceMock
|
||||
|
||||
BUS_NAME = "io.hass.os"
|
||||
|
||||
|
||||
def setup(object_path: str | None = None) -> DBusServiceMock:
|
||||
"""Create dbus mock object."""
|
||||
return Green()
|
||||
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
|
||||
|
||||
class Green(DBusServiceMock):
|
||||
"""Green mock.
|
||||
|
||||
gdbus introspect --system --dest io.hass.os --object-path /io/hass/os/Boards/Green
|
||||
"""
|
||||
|
||||
object_path = "/io/hass/os/Boards/Green"
|
||||
interface = "io.hass.os.Boards.Green"
|
||||
|
||||
@dbus_property()
|
||||
def ActivityLED(self) -> "b":
|
||||
"""Get Activity LED."""
|
||||
return True
|
||||
|
||||
@ActivityLED.setter
|
||||
def ActivityLED(self, value: "b"):
|
||||
"""Set Activity LED."""
|
||||
self.emit_properties_changed({"ActivityLED": value})
|
||||
|
||||
@dbus_property()
|
||||
def PowerLED(self) -> "b":
|
||||
"""Get Power LED."""
|
||||
return True
|
||||
|
||||
@PowerLED.setter
|
||||
def PowerLED(self, value: "b"):
|
||||
"""Set Power LED."""
|
||||
self.emit_properties_changed({"PowerLED": value})
|
||||
|
||||
@dbus_property()
|
||||
def UserLED(self) -> "b":
|
||||
"""Get User LED."""
|
||||
return True
|
||||
|
||||
@UserLED.setter
|
||||
def UserLED(self, value: "b"):
|
||||
"""Set User LED."""
|
||||
self.emit_properties_changed({"UserLED": value})
|
@@ -39,13 +39,6 @@ def fixture_addonsdata_user() -> dict[str, Data]:
|
||||
yield mock
|
||||
|
||||
|
||||
@pytest.fixture(name="os_environ")
|
||||
def fixture_os_environ():
|
||||
"""Mock os.environ."""
|
||||
with patch("supervisor.config.os.environ") as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def get_docker_addon(
|
||||
coresys: CoreSys, addonsdata_system: dict[str, Data], config_file: str
|
||||
):
|
||||
@@ -60,7 +53,7 @@ def get_docker_addon(
|
||||
|
||||
|
||||
def test_base_volumes_included(
|
||||
coresys: CoreSys, addonsdata_system: dict[str, Data], os_environ
|
||||
coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern
|
||||
):
|
||||
"""Dev and data volumes always included."""
|
||||
docker_addon = get_docker_addon(
|
||||
@@ -86,7 +79,7 @@ def test_base_volumes_included(
|
||||
|
||||
|
||||
def test_addon_map_folder_defaults(
|
||||
coresys: CoreSys, addonsdata_system: dict[str, Data], os_environ
|
||||
coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern
|
||||
):
|
||||
"""Validate defaults for mapped folders in addons."""
|
||||
docker_addon = get_docker_addon(
|
||||
@@ -143,7 +136,7 @@ def test_addon_map_folder_defaults(
|
||||
|
||||
|
||||
def test_journald_addon(
|
||||
coresys: CoreSys, addonsdata_system: dict[str, Data], os_environ
|
||||
coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern
|
||||
):
|
||||
"""Validate volume for journald option."""
|
||||
docker_addon = get_docker_addon(
|
||||
@@ -171,7 +164,7 @@ def test_journald_addon(
|
||||
|
||||
|
||||
def test_not_journald_addon(
|
||||
coresys: CoreSys, addonsdata_system: dict[str, Data], os_environ
|
||||
coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern
|
||||
):
|
||||
"""Validate journald option defaults off."""
|
||||
docker_addon = get_docker_addon(
|
||||
@@ -182,10 +175,7 @@ def test_not_journald_addon(
|
||||
|
||||
|
||||
async def test_addon_run_docker_error(
|
||||
coresys: CoreSys,
|
||||
addonsdata_system: dict[str, Data],
|
||||
capture_exception: Mock,
|
||||
os_environ,
|
||||
coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern
|
||||
):
|
||||
"""Test docker error when addon is run."""
|
||||
await coresys.dbus.timedate.connect(coresys.dbus.bus)
|
||||
@@ -203,14 +193,13 @@ async def test_addon_run_docker_error(
|
||||
Issue(IssueType.MISSING_IMAGE, ContextType.ADDON, reference="test_addon")
|
||||
in coresys.resolution.issues
|
||||
)
|
||||
capture_exception.assert_not_called()
|
||||
|
||||
|
||||
async def test_addon_run_add_host_error(
|
||||
coresys: CoreSys,
|
||||
addonsdata_system: dict[str, Data],
|
||||
capture_exception: Mock,
|
||||
os_environ,
|
||||
path_extern,
|
||||
):
|
||||
"""Test error adding host when addon is run."""
|
||||
await coresys.dbus.timedate.connect(coresys.dbus.bus)
|
||||
|
@@ -10,12 +10,18 @@ from docker.models.images import Image
|
||||
import pytest
|
||||
from requests import RequestException
|
||||
|
||||
from supervisor.addons import Addon
|
||||
from supervisor.const import BusEvent, CpuArch
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import ContainerState
|
||||
from supervisor.docker.interface import DockerInterface
|
||||
from supervisor.docker.monitor import DockerContainerStateEvent
|
||||
from supervisor.exceptions import DockerAPIError, DockerError, DockerRequestError
|
||||
from supervisor.exceptions import (
|
||||
DockerAPIError,
|
||||
DockerError,
|
||||
DockerNotFound,
|
||||
DockerRequestError,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
@@ -223,3 +229,21 @@ async def test_image_pull_fail(
|
||||
)
|
||||
|
||||
capture_exception.assert_called_once_with(err)
|
||||
|
||||
|
||||
async def test_run_missing_image(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
capture_exception: Mock,
|
||||
path_extern,
|
||||
):
|
||||
"""Test run captures the exception when image is missing."""
|
||||
coresys.docker.containers.create.side_effect = [NotFound("missing"), MagicMock()]
|
||||
container.status = "stopped"
|
||||
install_addon_ssh.data["image"] = "test_image"
|
||||
|
||||
with pytest.raises(DockerNotFound):
|
||||
await install_addon_ssh.instance.run()
|
||||
|
||||
capture_exception.assert_called_once()
|
||||
|
1
tests/fixtures/addons/local/ssh/config.yaml
vendored
1
tests/fixtures/addons/local/ssh/config.yaml
vendored
@@ -17,6 +17,7 @@ panel_icon: "mdi:console"
|
||||
panel_title: Terminal
|
||||
hassio_api: true
|
||||
hassio_role: manager
|
||||
homeassistant_api: true
|
||||
audio: true
|
||||
uart: true
|
||||
ports:
|
||||
|
@@ -1,9 +1,12 @@
|
||||
"""Test Home Assistant core."""
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from unittest.mock import MagicMock, Mock, PropertyMock, patch
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
from docker.errors import DockerException, ImageNotFound, NotFound
|
||||
import pytest
|
||||
from time_machine import travel
|
||||
|
||||
from supervisor.const import CpuArch
|
||||
from supervisor.coresys import CoreSys
|
||||
@@ -14,6 +17,7 @@ from supervisor.exceptions import (
|
||||
AudioUpdateError,
|
||||
CodeNotaryError,
|
||||
DockerError,
|
||||
HomeAssistantCrashError,
|
||||
HomeAssistantError,
|
||||
HomeAssistantJobError,
|
||||
)
|
||||
@@ -263,3 +267,35 @@ async def test_stats_failures(
|
||||
|
||||
with pytest.raises(HomeAssistantError):
|
||||
await coresys.homeassistant.core.stats()
|
||||
|
||||
|
||||
async def test_api_check_timeout(
|
||||
coresys: CoreSys, container: MagicMock, caplog: pytest.LogCaptureFixture
|
||||
):
|
||||
"""Test attempts to contact the API timeout."""
|
||||
container.status = "stopped"
|
||||
coresys.homeassistant.version = AwesomeVersion("2023.9.0")
|
||||
coresys.homeassistant.api.check_api_state.return_value = False
|
||||
|
||||
async def mock_instance_start(*_):
|
||||
container.status = "running"
|
||||
|
||||
with patch.object(
|
||||
DockerHomeAssistant, "start", new=mock_instance_start
|
||||
), patch.object(DockerAPI, "container_is_initialized", return_value=True), travel(
|
||||
datetime(2023, 10, 2, 0, 0, 0), tick=False
|
||||
) as traveller:
|
||||
|
||||
async def mock_sleep(*args):
|
||||
traveller.shift(timedelta(minutes=1))
|
||||
|
||||
with patch(
|
||||
"supervisor.homeassistant.core.asyncio.sleep", new=mock_sleep
|
||||
), pytest.raises(HomeAssistantCrashError):
|
||||
await coresys.homeassistant.core.start()
|
||||
|
||||
assert coresys.homeassistant.api.check_api_state.call_count == 5
|
||||
assert (
|
||||
"No API response in 5 minutes, assuming core has had a fatal startup error"
|
||||
in caplog.text
|
||||
)
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
from supervisor.const import CoreState
|
||||
from supervisor.coresys import CoreSys
|
||||
@@ -10,9 +10,10 @@ from supervisor.docker.interface import DockerInterface
|
||||
from supervisor.homeassistant.secrets import HomeAssistantSecrets
|
||||
|
||||
|
||||
async def test_load(coresys: CoreSys, tmp_supervisor_data: Path):
|
||||
async def test_load(
|
||||
coresys: CoreSys, tmp_supervisor_data: Path, ha_ws_client: AsyncMock
|
||||
):
|
||||
"""Test homeassistant module load."""
|
||||
client = coresys.homeassistant.websocket._client # pylint: disable=protected-access
|
||||
with open(tmp_supervisor_data / "homeassistant" / "secrets.yaml", "w") as secrets:
|
||||
secrets.write("hello: world\n")
|
||||
|
||||
@@ -30,8 +31,16 @@ async def test_load(coresys: CoreSys, tmp_supervisor_data: Path):
|
||||
|
||||
coresys.core.state = CoreState.SETUP
|
||||
await coresys.homeassistant.websocket.async_send_message({"lorem": "ipsum"})
|
||||
client.async_send_command.assert_not_called()
|
||||
ha_ws_client.async_send_command.assert_not_called()
|
||||
|
||||
coresys.core.state = CoreState.RUNNING
|
||||
await asyncio.sleep(0)
|
||||
assert client.async_send_command.call_args_list[0][0][0] == {"lorem": "ipsum"}
|
||||
assert ha_ws_client.async_send_command.call_args_list[0][0][0] == {"lorem": "ipsum"}
|
||||
|
||||
|
||||
async def test_get_users_none(coresys: CoreSys, ha_ws_client: AsyncMock):
|
||||
"""Test get users returning none does not fail."""
|
||||
ha_ws_client.async_send_command.return_value = None
|
||||
assert [] == await coresys.homeassistant.get_users.__wrapped__(
|
||||
coresys.homeassistant
|
||||
)
|
||||
|
@@ -1044,3 +1044,61 @@ async def test_job_starting_separate_task(coresys: CoreSys):
|
||||
await test.job_await()
|
||||
await test.job_release()
|
||||
await task
|
||||
|
||||
|
||||
async def test_job_always_removed_on_check_failure(coresys: CoreSys):
|
||||
"""Test that the job instance is always removed if the condition or limit check fails."""
|
||||
|
||||
class TestClass:
|
||||
"""Test class."""
|
||||
|
||||
event = asyncio.Event()
|
||||
limit_job: Job | None = None
|
||||
|
||||
def __init__(self, coresys: CoreSys) -> None:
|
||||
"""Initialize object."""
|
||||
self.coresys = coresys
|
||||
|
||||
@Job(
|
||||
name="test_job_always_removed_on_check_failure_condition",
|
||||
conditions=[JobCondition.HAOS],
|
||||
on_condition=JobException,
|
||||
cleanup=False,
|
||||
)
|
||||
async def condition_check(self):
|
||||
"""Job that will fail a condition check."""
|
||||
raise AssertionError("should not run")
|
||||
|
||||
@Job(
|
||||
name="test_job_always_removed_on_check_failure_limit",
|
||||
limit=JobExecutionLimit.ONCE,
|
||||
cleanup=False,
|
||||
)
|
||||
async def limit_check(self):
|
||||
"""Job that can fail a limit check."""
|
||||
self.limit_job = self.coresys.jobs.current
|
||||
await self.event.wait()
|
||||
|
||||
def release_limit_check(self):
|
||||
"""Release the limit check job."""
|
||||
self.event.set()
|
||||
|
||||
test = TestClass(coresys)
|
||||
|
||||
with pytest.raises(JobException):
|
||||
await test.condition_check()
|
||||
assert coresys.jobs.jobs == []
|
||||
|
||||
task = coresys.create_task(test.limit_check())
|
||||
await asyncio.sleep(0)
|
||||
assert (job := test.limit_job)
|
||||
|
||||
with pytest.raises(JobException):
|
||||
await test.limit_check()
|
||||
assert test.limit_job == job
|
||||
assert coresys.jobs.jobs == [job]
|
||||
|
||||
test.release_limit_check()
|
||||
await task
|
||||
assert job.done
|
||||
assert coresys.jobs.jobs == [job]
|
||||
|
@@ -22,12 +22,19 @@ async def test_add_job(coresys: CoreSys):
|
||||
async def test_remove_job_directly(coresys: CoreSys, caplog: pytest.LogCaptureFixture):
|
||||
"""Test removing jobs from manager."""
|
||||
job = coresys.jobs.new_job(TEST_JOB)
|
||||
|
||||
assert job in coresys.jobs.jobs
|
||||
|
||||
coresys.jobs.remove_job(job)
|
||||
assert job not in coresys.jobs.jobs
|
||||
assert f"Removing incomplete job {job.name}" in caplog.text
|
||||
assert f"Removing incomplete job {job.name}" not in caplog.text
|
||||
|
||||
job = coresys.jobs.new_job(TEST_JOB)
|
||||
assert job in coresys.jobs.jobs
|
||||
|
||||
with job.start():
|
||||
coresys.jobs.remove_job(job)
|
||||
assert job not in coresys.jobs.jobs
|
||||
assert f"Removing incomplete job {job.name}" in caplog.text
|
||||
|
||||
|
||||
async def test_job_done(coresys: CoreSys):
|
||||
|
@@ -1,6 +1,6 @@
|
||||
"""Test base plugin functionality."""
|
||||
import asyncio
|
||||
from unittest.mock import Mock, PropertyMock, patch
|
||||
from unittest.mock import MagicMock, Mock, PropertyMock, patch
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
import pytest
|
||||
@@ -98,7 +98,7 @@ async def test_plugin_watchdog(coresys: CoreSys, plugin: PluginBase) -> None:
|
||||
start.assert_not_called()
|
||||
|
||||
rebuild.reset_mock()
|
||||
# Plugins are restarted anytime they stop, not just on failure
|
||||
# Stop should be ignored as it means an update or system shutdown, plugins don't stop otherwise
|
||||
current_state.return_value = ContainerState.STOPPED
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
@@ -111,9 +111,8 @@ async def test_plugin_watchdog(coresys: CoreSys, plugin: PluginBase) -> None:
|
||||
)
|
||||
await asyncio.sleep(0)
|
||||
rebuild.assert_not_called()
|
||||
start.assert_called_once()
|
||||
start.assert_not_called()
|
||||
|
||||
start.reset_mock()
|
||||
# Do not process event if container state has changed since fired
|
||||
current_state.return_value = ContainerState.HEALTHY
|
||||
coresys.bus.fire_event(
|
||||
@@ -155,41 +154,38 @@ async def test_plugin_watchdog(coresys: CoreSys, plugin: PluginBase) -> None:
|
||||
],
|
||||
indirect=["plugin"],
|
||||
)
|
||||
async def test_plugin_watchdog_rebuild_on_failure(
|
||||
coresys: CoreSys, capture_exception: Mock, plugin: PluginBase, error: PluginError
|
||||
async def test_plugin_watchdog_max_failed_attempts(
|
||||
coresys: CoreSys,
|
||||
capture_exception: Mock,
|
||||
plugin: PluginBase,
|
||||
error: PluginError,
|
||||
container: MagicMock,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
) -> None:
|
||||
"""Test plugin watchdog rebuilds if start fails."""
|
||||
with patch.object(type(plugin.instance), "attach"), patch.object(
|
||||
type(plugin.instance), "is_running", return_value=True
|
||||
):
|
||||
"""Test plugin watchdog gives up after max failed attempts."""
|
||||
with patch.object(type(plugin.instance), "attach"):
|
||||
await plugin.load()
|
||||
|
||||
container.status = "stopped"
|
||||
container.attrs = {"State": {"ExitCode": 1}}
|
||||
with patch("supervisor.plugins.base.WATCHDOG_RETRY_SECONDS", 0), patch.object(
|
||||
type(plugin), "rebuild"
|
||||
) as rebuild, patch.object(
|
||||
type(plugin), "start", side_effect=error
|
||||
) as start, patch.object(
|
||||
type(plugin.instance),
|
||||
"current_state",
|
||||
side_effect=[
|
||||
ContainerState.STOPPED,
|
||||
ContainerState.STOPPED,
|
||||
],
|
||||
):
|
||||
coresys.bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
) as start:
|
||||
await plugin.watchdog_container(
|
||||
DockerContainerStateEvent(
|
||||
name=plugin.instance.name,
|
||||
state=ContainerState.STOPPED,
|
||||
state=ContainerState.FAILED,
|
||||
id="abc123",
|
||||
time=1,
|
||||
),
|
||||
)
|
||||
)
|
||||
await asyncio.sleep(0.1)
|
||||
start.assert_called_once()
|
||||
rebuild.assert_called_once()
|
||||
assert start.call_count == 5
|
||||
|
||||
capture_exception.assert_called_once_with(error)
|
||||
capture_exception.assert_called_with(error)
|
||||
assert (
|
||||
f"Watchdog cannot restart {plugin.slug} plugin, failed all 5 attempts"
|
||||
in caplog.text
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
73
tests/resolution/fixup/test_addon_execute_repair.py
Normal file
73
tests/resolution/fixup/test_addon_execute_repair.py
Normal file
@@ -0,0 +1,73 @@
|
||||
"""Test fixup core execute repair."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from docker.errors import NotFound
|
||||
|
||||
from supervisor.addons.addon import Addon
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.addon import DockerAddon
|
||||
from supervisor.docker.interface import DockerInterface
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
||||
from supervisor.resolution.fixups.addon_execute_repair import FixupAddonExecuteRepair
|
||||
|
||||
|
||||
async def test_fixup(docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test fixup rebuilds addon's container."""
|
||||
docker.images.get.side_effect = NotFound("missing")
|
||||
install_addon_ssh.data["image"] = "test_image"
|
||||
|
||||
addon_execute_repair = FixupAddonExecuteRepair(coresys)
|
||||
assert addon_execute_repair.auto is True
|
||||
|
||||
coresys.resolution.create_issue(
|
||||
IssueType.MISSING_IMAGE,
|
||||
ContextType.ADDON,
|
||||
reference="local_ssh",
|
||||
suggestions=[SuggestionType.EXECUTE_REPAIR],
|
||||
)
|
||||
with patch.object(DockerInterface, "install") as install:
|
||||
await addon_execute_repair()
|
||||
install.assert_called_once()
|
||||
|
||||
assert not coresys.resolution.issues
|
||||
assert not coresys.resolution.suggestions
|
||||
|
||||
|
||||
async def test_fixup_no_addon(coresys: CoreSys):
|
||||
"""Test fixup dismisses if addon is missing."""
|
||||
addon_execute_repair = FixupAddonExecuteRepair(coresys)
|
||||
assert addon_execute_repair.auto is True
|
||||
|
||||
coresys.resolution.create_issue(
|
||||
IssueType.MISSING_IMAGE,
|
||||
ContextType.ADDON,
|
||||
reference="local_ssh",
|
||||
suggestions=[SuggestionType.EXECUTE_REPAIR],
|
||||
)
|
||||
|
||||
with patch.object(DockerAddon, "install") as install:
|
||||
await addon_execute_repair()
|
||||
install.assert_not_called()
|
||||
|
||||
|
||||
async def test_fixup_image_exists(
|
||||
docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon
|
||||
):
|
||||
"""Test fixup dismisses if image exists."""
|
||||
docker.images.get.return_value = MagicMock()
|
||||
|
||||
addon_execute_repair = FixupAddonExecuteRepair(coresys)
|
||||
assert addon_execute_repair.auto is True
|
||||
|
||||
coresys.resolution.create_issue(
|
||||
IssueType.MISSING_IMAGE,
|
||||
ContextType.ADDON,
|
||||
reference="local_ssh",
|
||||
suggestions=[SuggestionType.EXECUTE_REPAIR],
|
||||
)
|
||||
|
||||
with patch.object(DockerAddon, "install") as install:
|
||||
await addon_execute_repair()
|
||||
install.assert_not_called()
|
@@ -50,3 +50,4 @@ async def test_fixup(
|
||||
assert systemd_service.StopUnit.calls == [
|
||||
("mnt-data-supervisor-mounts-test.mount", "fail")
|
||||
]
|
||||
coresys.mounts.save_data.assert_called_once()
|
||||
|
Reference in New Issue
Block a user