mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-11-07 01:50:21 +00:00
Compare commits
51 Commits
2025.09.3
...
wip-suppor
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
230f359bf7 | ||
|
|
f996e60784 | ||
|
|
c9ceb4a4e3 | ||
|
|
d33305379f | ||
|
|
1448a33dbf | ||
|
|
1657769044 | ||
|
|
a8b7923a42 | ||
|
|
b3b7bc29fa | ||
|
|
2098168d04 | ||
|
|
02c4fd4a8c | ||
|
|
0bee5c6f37 | ||
|
|
9c0174f1fd | ||
|
|
dc3d8b9266 | ||
|
|
06d96db55b | ||
|
|
131cc3b6d1 | ||
|
|
b92f5976a3 | ||
|
|
370c961c9e | ||
|
|
b903e1196f | ||
|
|
9f8e8ab15a | ||
|
|
56bffc839b | ||
|
|
952a553c3b | ||
|
|
717f1c85f5 | ||
|
|
ffd498a515 | ||
|
|
35f0645cb9 | ||
|
|
15c6547382 | ||
|
|
adefa242e5 | ||
|
|
583a8a82fb | ||
|
|
322df15e73 | ||
|
|
51490c8e41 | ||
|
|
3c21a8b8ef | ||
|
|
ddb8588d77 | ||
|
|
81e46b20b8 | ||
|
|
5041a1ed5c | ||
|
|
337731a55a | ||
|
|
53a8044aff | ||
|
|
c71553f37d | ||
|
|
c1eb97d8ab | ||
|
|
190b734332 | ||
|
|
559b6982a3 | ||
|
|
301362e9e5 | ||
|
|
fc928d294c | ||
|
|
f42aeb4937 | ||
|
|
fd21886de9 | ||
|
|
e4bb415e30 | ||
|
|
622dda5382 | ||
|
|
78a2e15ebb | ||
|
|
f3e1e0f423 | ||
|
|
5779b567f1 | ||
|
|
3c5f4920a0 | ||
|
|
64f94a159c | ||
|
|
ab3b147876 |
35
.github/workflows/builder.yml
vendored
35
.github/workflows/builder.yml
vendored
@@ -107,7 +107,7 @@ jobs:
|
||||
# home-assistant/wheels doesn't support sha pinning
|
||||
- name: Build wheels
|
||||
if: needs.init.outputs.requirements == 'true'
|
||||
uses: home-assistant/wheels@2025.09.1
|
||||
uses: home-assistant/wheels@2025.10.0
|
||||
with:
|
||||
abi: cp313
|
||||
tag: musllinux_1_2
|
||||
@@ -132,7 +132,7 @@ jobs:
|
||||
|
||||
- name: Install Cosign
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
with:
|
||||
cosign-release: "v2.5.3"
|
||||
|
||||
@@ -150,7 +150,7 @@ jobs:
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -170,8 +170,6 @@ jobs:
|
||||
--target /data \
|
||||
--cosign \
|
||||
--generic ${{ needs.init.outputs.version }}
|
||||
env:
|
||||
CAS_API_KEY: ${{ secrets.CAS_TOKEN }}
|
||||
|
||||
version:
|
||||
name: Update version
|
||||
@@ -293,33 +291,6 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check the Supervisor code sign
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
run: |
|
||||
echo "Enable Content-Trust"
|
||||
test=$(docker exec hassio_cli ha security options --content-trust=true --no-progress --raw-json | jq -r '.result')
|
||||
if [ "$test" != "ok" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Run supervisor health check"
|
||||
test=$(docker exec hassio_cli ha resolution healthcheck --no-progress --raw-json | jq -r '.result')
|
||||
if [ "$test" != "ok" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Check supervisor unhealthy"
|
||||
test=$(docker exec hassio_cli ha resolution info --no-progress --raw-json | jq -r '.data.unhealthy[]')
|
||||
if [ "$test" != "" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Check supervisor supported"
|
||||
test=$(docker exec hassio_cli ha resolution info --no-progress --raw-json | jq -r '.data.unsupported[]')
|
||||
if [[ "$test" =~ source_mods ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Create full backup
|
||||
id: backup
|
||||
run: |
|
||||
|
||||
6
.github/workflows/ci.yaml
vendored
6
.github/workflows/ci.yaml
vendored
@@ -346,7 +346,7 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
with:
|
||||
cosign-release: "v2.5.3"
|
||||
- name: Restore Python virtual environment
|
||||
@@ -386,7 +386,7 @@ jobs:
|
||||
-o console_output_style=count \
|
||||
tests
|
||||
- name: Upload coverage artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: coverage
|
||||
path: .coverage
|
||||
@@ -417,7 +417,7 @@ jobs:
|
||||
echo "Failed to restore Python virtual environment from cache"
|
||||
exit 1
|
||||
- name: Download all coverage artifacts
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: coverage
|
||||
path: coverage/
|
||||
|
||||
2
.github/workflows/sentry.yaml
vendored
2
.github/workflows/sentry.yaml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Sentry Release
|
||||
uses: getsentry/action-release@4f502acc1df792390abe36f2dcb03612ef144818 # v3.3.0
|
||||
uses: getsentry/action-release@128c5058bbbe93c8e02147fe0a9c713f166259a6 # v3.4.0
|
||||
env:
|
||||
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
|
||||
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}
|
||||
|
||||
3
.github/workflows/stale.yml
vendored
3
.github/workflows/stale.yml
vendored
@@ -9,13 +9,14 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v10.0.0
|
||||
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
days-before-stale: 30
|
||||
days-before-close: 7
|
||||
stale-issue-label: "stale"
|
||||
exempt-issue-labels: "no-stale,Help%20wanted,help-wanted,pinned,rfc,security"
|
||||
only-issue-types: "bug"
|
||||
stale-issue-message: >
|
||||
There hasn't been any activity on this issue recently. Due to the
|
||||
high number of incoming GitHub notifications, we have to clean some
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.11.10
|
||||
rev: v0.14.3
|
||||
hooks:
|
||||
- id: ruff
|
||||
args:
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
aiodns==3.5.0
|
||||
aiohttp==3.12.15
|
||||
aiohttp==3.13.2
|
||||
atomicwrites-homeassistant==1.4.1
|
||||
attrs==25.3.0
|
||||
attrs==25.4.0
|
||||
awesomeversion==25.8.0
|
||||
blockbuster==1.5.25
|
||||
brotli==1.1.0
|
||||
ciso8601==2.3.3
|
||||
colorlog==6.9.0
|
||||
colorlog==6.10.1
|
||||
cpe==1.3.1
|
||||
cryptography==46.0.1
|
||||
cryptography==46.0.3
|
||||
debugpy==1.8.17
|
||||
deepmerge==2.0
|
||||
dirhash==0.5.0
|
||||
@@ -17,14 +17,14 @@ faust-cchardet==2.1.19
|
||||
gitpython==3.1.45
|
||||
jinja2==3.1.6
|
||||
log-rate-limit==1.4.2
|
||||
orjson==3.11.3
|
||||
orjson==3.11.4
|
||||
pulsectl==24.12.0
|
||||
pyudev==0.24.3
|
||||
pyudev==0.24.4
|
||||
PyYAML==6.0.3
|
||||
requests==2.32.5
|
||||
securetar==2025.2.1
|
||||
sentry-sdk==2.39.0
|
||||
sentry-sdk==2.43.0
|
||||
setuptools==80.9.0
|
||||
voluptuous==0.15.2
|
||||
dbus-fast==2.44.3
|
||||
dbus-fast==2.44.5
|
||||
zlib-fast==0.2.1
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
astroid==3.3.11
|
||||
coverage==7.10.7
|
||||
astroid==4.0.1
|
||||
coverage==7.11.0
|
||||
mypy==1.18.2
|
||||
pre-commit==4.3.0
|
||||
pylint==3.3.8
|
||||
pylint==4.0.2
|
||||
pytest-aiohttp==1.1.0
|
||||
pytest-asyncio==0.25.2
|
||||
pytest-cov==7.0.0
|
||||
pytest-timeout==2.4.0
|
||||
pytest==8.4.2
|
||||
ruff==0.13.2
|
||||
ruff==0.14.3
|
||||
time-machine==2.19.0
|
||||
types-docker==7.1.0.20250916
|
||||
types-docker==7.1.0.20251009
|
||||
types-pyyaml==6.0.12.20250915
|
||||
types-requests==2.32.4.20250913
|
||||
urllib3==2.5.0
|
||||
|
||||
@@ -72,7 +72,6 @@ from ..exceptions import (
|
||||
AddonsJobError,
|
||||
ConfigurationFileError,
|
||||
DockerError,
|
||||
HomeAssistantAPIError,
|
||||
HostAppArmorError,
|
||||
)
|
||||
from ..hardware.data import Device
|
||||
@@ -227,6 +226,7 @@ class Addon(AddonModel):
|
||||
)
|
||||
|
||||
await self._check_ingress_port()
|
||||
|
||||
default_image = self._image(self.data)
|
||||
try:
|
||||
await self.instance.attach(version=self.version)
|
||||
@@ -775,7 +775,6 @@ class Addon(AddonModel):
|
||||
raise AddonsError("Missing from store, cannot install!")
|
||||
|
||||
await self.sys_addons.data.install(self.addon_store)
|
||||
await self.load()
|
||||
|
||||
def setup_data():
|
||||
if not self.path_data.is_dir():
|
||||
@@ -798,6 +797,9 @@ class Addon(AddonModel):
|
||||
await self.sys_addons.data.uninstall(self)
|
||||
raise AddonsError() from err
|
||||
|
||||
# Finish initialization and set up listeners
|
||||
await self.load()
|
||||
|
||||
# Add to addon manager
|
||||
self.sys_addons.local[self.slug] = self
|
||||
|
||||
@@ -842,8 +844,7 @@ class Addon(AddonModel):
|
||||
# Cleanup Ingress panel from sidebar
|
||||
if self.ingress_panel:
|
||||
self.ingress_panel = False
|
||||
with suppress(HomeAssistantAPIError):
|
||||
await self.sys_ingress.update_hass_panel(self)
|
||||
await self.sys_ingress.update_hass_panel(self)
|
||||
|
||||
# Cleanup Ingress dynamic port assignment
|
||||
need_ingress_token_cleanup = False
|
||||
@@ -1512,13 +1513,6 @@ class Addon(AddonModel):
|
||||
_LOGGER.info("Finished restore for add-on %s", self.slug)
|
||||
return wait_for_start
|
||||
|
||||
def check_trust(self) -> Awaitable[None]:
|
||||
"""Calculate Addon docker content trust.
|
||||
|
||||
Return Coroutine.
|
||||
"""
|
||||
return self.instance.check_trust()
|
||||
|
||||
@Job(
|
||||
name="addon_restart_after_problem",
|
||||
throttle_period=WATCHDOG_THROTTLE_PERIOD,
|
||||
@@ -1561,7 +1555,15 @@ class Addon(AddonModel):
|
||||
)
|
||||
break
|
||||
|
||||
await asyncio.sleep(WATCHDOG_RETRY_SECONDS)
|
||||
# Exponential backoff to spread retries over the throttle window
|
||||
delay = WATCHDOG_RETRY_SECONDS * (1 << max(attempts - 1, 0))
|
||||
_LOGGER.debug(
|
||||
"Watchdog will retry addon %s in %s seconds (attempt %s)",
|
||||
self.name,
|
||||
delay,
|
||||
attempts + 1,
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
async def container_state_changed(self, event: DockerContainerStateEvent) -> None:
|
||||
"""Set addon state from container state."""
|
||||
|
||||
@@ -9,8 +9,6 @@ from typing import Self, Union
|
||||
|
||||
from attr import evolve
|
||||
|
||||
from supervisor.jobs.const import JobConcurrency
|
||||
|
||||
from ..const import AddonBoot, AddonStartup, AddonState
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import (
|
||||
@@ -20,8 +18,9 @@ from ..exceptions import (
|
||||
CoreDNSError,
|
||||
DockerError,
|
||||
HassioError,
|
||||
HomeAssistantAPIError,
|
||||
)
|
||||
from ..jobs import ChildJobSyncFilter
|
||||
from ..jobs.const import JobConcurrency
|
||||
from ..jobs.decorator import Job, JobCondition
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..store.addon import AddonStore
|
||||
@@ -183,6 +182,9 @@ class AddonManager(CoreSysAttributes):
|
||||
conditions=ADDON_UPDATE_CONDITIONS,
|
||||
on_condition=AddonsJobError,
|
||||
concurrency=JobConcurrency.QUEUE,
|
||||
child_job_syncs=[
|
||||
ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
|
||||
],
|
||||
)
|
||||
async def install(
|
||||
self, slug: str, *, validation_complete: asyncio.Event | None = None
|
||||
@@ -230,6 +232,13 @@ class AddonManager(CoreSysAttributes):
|
||||
name="addon_manager_update",
|
||||
conditions=ADDON_UPDATE_CONDITIONS,
|
||||
on_condition=AddonsJobError,
|
||||
# We assume for now the docker image pull is 100% of this task for progress
|
||||
# allocation. But from a user perspective that isn't true. Other steps
|
||||
# that take time which is not accounted for in progress include:
|
||||
# partial backup, image cleanup, apparmor update, and addon restart
|
||||
child_job_syncs=[
|
||||
ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
|
||||
],
|
||||
)
|
||||
async def update(
|
||||
self,
|
||||
@@ -272,7 +281,10 @@ class AddonManager(CoreSysAttributes):
|
||||
addons=[addon.slug],
|
||||
)
|
||||
|
||||
return await addon.update()
|
||||
task = await addon.update()
|
||||
|
||||
_LOGGER.info("Add-on '%s' successfully updated", slug)
|
||||
return task
|
||||
|
||||
@Job(
|
||||
name="addon_manager_rebuild",
|
||||
@@ -351,8 +363,7 @@ class AddonManager(CoreSysAttributes):
|
||||
# Update ingress
|
||||
if had_ingress != addon.ingress_panel:
|
||||
await self.sys_ingress.reload()
|
||||
with suppress(HomeAssistantAPIError):
|
||||
await self.sys_ingress.update_hass_panel(addon)
|
||||
await self.sys_ingress.update_hass_panel(addon)
|
||||
|
||||
return wait_for_start
|
||||
|
||||
|
||||
@@ -72,6 +72,7 @@ from ..const import (
|
||||
ATTR_TYPE,
|
||||
ATTR_UART,
|
||||
ATTR_UDEV,
|
||||
ATTR_ULIMITS,
|
||||
ATTR_URL,
|
||||
ATTR_USB,
|
||||
ATTR_VERSION,
|
||||
@@ -102,7 +103,6 @@ from .configuration import FolderMapping
|
||||
from .const import (
|
||||
ATTR_BACKUP,
|
||||
ATTR_BREAKING_VERSIONS,
|
||||
ATTR_CODENOTARY,
|
||||
ATTR_PATH,
|
||||
ATTR_READ_ONLY,
|
||||
AddonBackupMode,
|
||||
@@ -462,6 +462,11 @@ class AddonModel(JobGroup, ABC):
|
||||
"""Return True if the add-on have his own udev."""
|
||||
return self.data[ATTR_UDEV]
|
||||
|
||||
@property
|
||||
def ulimits(self) -> dict[str, Any]:
|
||||
"""Return ulimits configuration."""
|
||||
return self.data[ATTR_ULIMITS]
|
||||
|
||||
@property
|
||||
def with_kernel_modules(self) -> bool:
|
||||
"""Return True if the add-on access to kernel modules."""
|
||||
@@ -626,13 +631,8 @@ class AddonModel(JobGroup, ABC):
|
||||
|
||||
@property
|
||||
def signed(self) -> bool:
|
||||
"""Return True if the image is signed."""
|
||||
return ATTR_CODENOTARY in self.data
|
||||
|
||||
@property
|
||||
def codenotary(self) -> str | None:
|
||||
"""Return Signer email address for CAS."""
|
||||
return self.data.get(ATTR_CODENOTARY)
|
||||
"""Currently no signing support."""
|
||||
return False
|
||||
|
||||
@property
|
||||
def breaking_versions(self) -> list[AwesomeVersion]:
|
||||
|
||||
@@ -88,6 +88,7 @@ from ..const import (
|
||||
ATTR_TYPE,
|
||||
ATTR_UART,
|
||||
ATTR_UDEV,
|
||||
ATTR_ULIMITS,
|
||||
ATTR_URL,
|
||||
ATTR_USB,
|
||||
ATTR_USER,
|
||||
@@ -206,6 +207,12 @@ def _warn_addon_config(config: dict[str, Any]):
|
||||
name,
|
||||
)
|
||||
|
||||
if ATTR_CODENOTARY in config:
|
||||
_LOGGER.warning(
|
||||
"Add-on '%s' uses deprecated 'codenotary' field in config. This field is no longer used and will be ignored. Please report this to the maintainer.",
|
||||
name,
|
||||
)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
@@ -416,13 +423,26 @@ _SCHEMA_ADDON_CONFIG = vol.Schema(
|
||||
vol.Optional(ATTR_BACKUP, default=AddonBackupMode.HOT): vol.Coerce(
|
||||
AddonBackupMode
|
||||
),
|
||||
vol.Optional(ATTR_CODENOTARY): vol.Email(),
|
||||
vol.Optional(ATTR_OPTIONS, default={}): dict,
|
||||
vol.Optional(ATTR_SCHEMA, default={}): vol.Any(
|
||||
vol.Schema({str: SCHEMA_ELEMENT}),
|
||||
False,
|
||||
),
|
||||
vol.Optional(ATTR_IMAGE): docker_image,
|
||||
vol.Optional(ATTR_ULIMITS, default=dict): vol.Any(
|
||||
{str: vol.Coerce(int)}, # Simple format: {name: limit}
|
||||
{
|
||||
str: vol.Any(
|
||||
vol.Coerce(int), # Simple format for individual entries
|
||||
vol.Schema(
|
||||
{ # Detailed format for individual entries
|
||||
vol.Required("soft"): vol.Coerce(int),
|
||||
vol.Required("hard"): vol.Coerce(int),
|
||||
}
|
||||
),
|
||||
)
|
||||
},
|
||||
),
|
||||
vol.Optional(ATTR_TIMEOUT, default=10): vol.All(
|
||||
vol.Coerce(int), vol.Range(min=10, max=300)
|
||||
),
|
||||
|
||||
@@ -77,10 +77,10 @@ class APIProxy(CoreSysAttributes):
|
||||
yield resp
|
||||
return
|
||||
|
||||
except HomeAssistantAuthError:
|
||||
_LOGGER.error("Authenticate error on API for request %s", path)
|
||||
except HomeAssistantAPIError:
|
||||
_LOGGER.error("Error on API for request %s", path)
|
||||
except HomeAssistantAuthError as err:
|
||||
_LOGGER.error("Authenticate error on API for request %s: %s", path, err)
|
||||
except HomeAssistantAPIError as err:
|
||||
_LOGGER.error("Error on API for request %s: %s", path, err)
|
||||
except aiohttp.ClientError as err:
|
||||
_LOGGER.error("Client error on API %s request %s", path, err)
|
||||
except TimeoutError:
|
||||
|
||||
@@ -1,24 +1,20 @@
|
||||
"""Init file for Supervisor Security RESTful API."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import web
|
||||
import attr
|
||||
import voluptuous as vol
|
||||
|
||||
from ..const import ATTR_CONTENT_TRUST, ATTR_FORCE_SECURITY, ATTR_PWNED
|
||||
from supervisor.exceptions import APIGone
|
||||
|
||||
from ..const import ATTR_FORCE_SECURITY, ATTR_PWNED
|
||||
from ..coresys import CoreSysAttributes
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_OPTIONS = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_PWNED): vol.Boolean(),
|
||||
vol.Optional(ATTR_CONTENT_TRUST): vol.Boolean(),
|
||||
vol.Optional(ATTR_FORCE_SECURITY): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
@@ -31,7 +27,6 @@ class APISecurity(CoreSysAttributes):
|
||||
async def info(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return Security information."""
|
||||
return {
|
||||
ATTR_CONTENT_TRUST: self.sys_security.content_trust,
|
||||
ATTR_PWNED: self.sys_security.pwned,
|
||||
ATTR_FORCE_SECURITY: self.sys_security.force,
|
||||
}
|
||||
@@ -43,8 +38,6 @@ class APISecurity(CoreSysAttributes):
|
||||
|
||||
if ATTR_PWNED in body:
|
||||
self.sys_security.pwned = body[ATTR_PWNED]
|
||||
if ATTR_CONTENT_TRUST in body:
|
||||
self.sys_security.content_trust = body[ATTR_CONTENT_TRUST]
|
||||
if ATTR_FORCE_SECURITY in body:
|
||||
self.sys_security.force = body[ATTR_FORCE_SECURITY]
|
||||
|
||||
@@ -54,6 +47,9 @@ class APISecurity(CoreSysAttributes):
|
||||
|
||||
@api_process
|
||||
async def integrity_check(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Run backend integrity check."""
|
||||
result = await asyncio.shield(self.sys_security.integrity_check())
|
||||
return attr.asdict(result)
|
||||
"""Run backend integrity check.
|
||||
|
||||
CodeNotary integrity checking has been removed. This endpoint now returns
|
||||
an error indicating the feature is gone.
|
||||
"""
|
||||
raise APIGone("Integrity check feature has been removed.")
|
||||
|
||||
@@ -16,14 +16,12 @@ from ..const import (
|
||||
ATTR_BLK_READ,
|
||||
ATTR_BLK_WRITE,
|
||||
ATTR_CHANNEL,
|
||||
ATTR_CONTENT_TRUST,
|
||||
ATTR_COUNTRY,
|
||||
ATTR_CPU_PERCENT,
|
||||
ATTR_DEBUG,
|
||||
ATTR_DEBUG_BLOCK,
|
||||
ATTR_DETECT_BLOCKING_IO,
|
||||
ATTR_DIAGNOSTICS,
|
||||
ATTR_FORCE_SECURITY,
|
||||
ATTR_HEALTHY,
|
||||
ATTR_ICON,
|
||||
ATTR_IP_ADDRESS,
|
||||
@@ -69,8 +67,6 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
vol.Optional(ATTR_DEBUG): vol.Boolean(),
|
||||
vol.Optional(ATTR_DEBUG_BLOCK): vol.Boolean(),
|
||||
vol.Optional(ATTR_DIAGNOSTICS): vol.Boolean(),
|
||||
vol.Optional(ATTR_CONTENT_TRUST): vol.Boolean(),
|
||||
vol.Optional(ATTR_FORCE_SECURITY): vol.Boolean(),
|
||||
vol.Optional(ATTR_AUTO_UPDATE): vol.Boolean(),
|
||||
vol.Optional(ATTR_DETECT_BLOCKING_IO): vol.Coerce(DetectBlockingIO),
|
||||
vol.Optional(ATTR_COUNTRY): str,
|
||||
|
||||
@@ -151,7 +151,7 @@ def api_return_error(
|
||||
if check_exception_chain(error, DockerAPIError):
|
||||
message = format_message(message)
|
||||
if not message:
|
||||
message = "Unknown error, see supervisor"
|
||||
message = "Unknown error, see Supervisor logs (check with 'ha supervisor logs')"
|
||||
|
||||
match error_type:
|
||||
case const.CONTENT_TYPE_TEXT:
|
||||
|
||||
@@ -132,8 +132,8 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
_LOGGER.warning("Unauthorized login for '%s'", username)
|
||||
await self._dismatch_cache(username, password)
|
||||
return False
|
||||
except HomeAssistantAPIError:
|
||||
_LOGGER.error("Can't request auth on Home Assistant!")
|
||||
except HomeAssistantAPIError as err:
|
||||
_LOGGER.error("Can't request auth on Home Assistant: %s", err)
|
||||
finally:
|
||||
self._running.pop(username, None)
|
||||
|
||||
@@ -152,8 +152,8 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
return
|
||||
|
||||
_LOGGER.warning("The user '%s' is not registered", username)
|
||||
except HomeAssistantAPIError:
|
||||
_LOGGER.error("Can't request password reset on Home Assistant!")
|
||||
except HomeAssistantAPIError as err:
|
||||
_LOGGER.error("Can't request password reset on Home Assistant: %s", err)
|
||||
|
||||
raise AuthPasswordResetError()
|
||||
|
||||
|
||||
@@ -105,7 +105,6 @@ async def initialize_coresys() -> CoreSys:
|
||||
|
||||
if coresys.dev:
|
||||
coresys.updater.channel = UpdateChannel.DEV
|
||||
coresys.security.content_trust = False
|
||||
|
||||
# Convert datetime
|
||||
logging.Formatter.converter = lambda *args: coresys.now().timetuple()
|
||||
|
||||
@@ -348,6 +348,7 @@ ATTR_TRANSLATIONS = "translations"
|
||||
ATTR_TYPE = "type"
|
||||
ATTR_UART = "uart"
|
||||
ATTR_UDEV = "udev"
|
||||
ATTR_ULIMITS = "ulimits"
|
||||
ATTR_UNHEALTHY = "unhealthy"
|
||||
ATTR_UNSAVED = "unsaved"
|
||||
ATTR_UNSUPPORTED = "unsupported"
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from uuid import uuid4
|
||||
@@ -119,7 +118,7 @@ class Discovery(CoreSysAttributes, FileConfiguration):
|
||||
data = attr.asdict(message)
|
||||
data.pop(ATTR_CONFIG)
|
||||
|
||||
with suppress(HomeAssistantAPIError):
|
||||
try:
|
||||
async with self.sys_homeassistant.api.make_request(
|
||||
command,
|
||||
f"api/hassio_push/discovery/{message.uuid}",
|
||||
@@ -128,5 +127,5 @@ class Discovery(CoreSysAttributes, FileConfiguration):
|
||||
):
|
||||
_LOGGER.info("Discovery %s message send", message.uuid)
|
||||
return
|
||||
|
||||
_LOGGER.warning("Discovery %s message fail", message.uuid)
|
||||
except HomeAssistantAPIError as err:
|
||||
_LOGGER.error("Discovery %s message failed: %s", message.uuid, err)
|
||||
|
||||
@@ -318,7 +318,18 @@ class DockerAddon(DockerInterface):
|
||||
mem = 128 * 1024 * 1024
|
||||
limits.append(docker.types.Ulimit(name="memlock", soft=mem, hard=mem))
|
||||
|
||||
# Return None if no capabilities is present
|
||||
# Add configurable ulimits from add-on config
|
||||
for name, config in self.addon.ulimits.items():
|
||||
if isinstance(config, int):
|
||||
# Simple format: both soft and hard limits are the same
|
||||
limits.append(docker.types.Ulimit(name=name, soft=config, hard=config))
|
||||
elif isinstance(config, dict):
|
||||
# Detailed format: both soft and hard limits are mandatory
|
||||
soft = config["soft"]
|
||||
hard = config["hard"]
|
||||
limits.append(docker.types.Ulimit(name=name, soft=soft, hard=hard))
|
||||
|
||||
# Return None if no ulimits are present
|
||||
if limits:
|
||||
return limits
|
||||
return None
|
||||
@@ -835,16 +846,6 @@ class DockerAddon(DockerInterface):
|
||||
):
|
||||
self.sys_resolution.dismiss_issue(self.addon.device_access_missing_issue)
|
||||
|
||||
async def _validate_trust(self, image_id: str) -> None:
|
||||
"""Validate trust of content."""
|
||||
if not self.addon.signed:
|
||||
return
|
||||
|
||||
checksum = image_id.partition(":")[2]
|
||||
return await self.sys_security.verify_content(
|
||||
cast(str, self.addon.codenotary), checksum
|
||||
)
|
||||
|
||||
@Job(
|
||||
name="docker_addon_hardware_events",
|
||||
conditions=[JobCondition.OS_AGENT],
|
||||
|
||||
@@ -5,7 +5,7 @@ from ipaddress import IPv4Address
|
||||
import logging
|
||||
import re
|
||||
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
from awesomeversion import AwesomeVersion
|
||||
from docker.types import Mount
|
||||
|
||||
from ..const import LABEL_MACHINE
|
||||
@@ -244,13 +244,3 @@ class DockerHomeAssistant(DockerInterface):
|
||||
self.image,
|
||||
self.sys_homeassistant.version,
|
||||
)
|
||||
|
||||
async def _validate_trust(self, image_id: str) -> None:
|
||||
"""Validate trust of content."""
|
||||
try:
|
||||
if self.version in {None, LANDINGPAGE} or self.version < _VERIFY_TRUST:
|
||||
return
|
||||
except AwesomeVersionCompareException:
|
||||
return
|
||||
|
||||
await super()._validate_trust(image_id)
|
||||
|
||||
@@ -31,15 +31,12 @@ from ..const import (
|
||||
)
|
||||
from ..coresys import CoreSys
|
||||
from ..exceptions import (
|
||||
CodeNotaryError,
|
||||
CodeNotaryUntrusted,
|
||||
DockerAPIError,
|
||||
DockerError,
|
||||
DockerJobError,
|
||||
DockerLogOutOfOrder,
|
||||
DockerNotFound,
|
||||
DockerRequestError,
|
||||
DockerTrustError,
|
||||
)
|
||||
from ..jobs import SupervisorJob
|
||||
from ..jobs.const import JOB_GROUP_DOCKER_INTERFACE, JobConcurrency
|
||||
@@ -220,10 +217,12 @@ class DockerInterface(JobGroup, ABC):
|
||||
|
||||
await self.sys_run_in_executor(self.sys_docker.docker.login, **credentials)
|
||||
|
||||
def _process_pull_image_log(self, job_id: str, reference: PullLogEntry) -> None:
|
||||
def _process_pull_image_log( # noqa: C901
|
||||
self, install_job_id: str, reference: PullLogEntry
|
||||
) -> None:
|
||||
"""Process events fired from a docker while pulling an image, filtered to a given job id."""
|
||||
if (
|
||||
reference.job_id != job_id
|
||||
reference.job_id != install_job_id
|
||||
or not reference.id
|
||||
or not reference.status
|
||||
or not (stage := PullImageLayerStage.from_status(reference.status))
|
||||
@@ -237,21 +236,22 @@ class DockerInterface(JobGroup, ABC):
|
||||
name="Pulling container image layer",
|
||||
initial_stage=stage.status,
|
||||
reference=reference.id,
|
||||
parent_id=job_id,
|
||||
parent_id=install_job_id,
|
||||
internal=True,
|
||||
)
|
||||
job.done = False
|
||||
return
|
||||
|
||||
# Find our sub job to update details of
|
||||
for j in self.sys_jobs.jobs:
|
||||
if j.parent_id == job_id and j.reference == reference.id:
|
||||
if j.parent_id == install_job_id and j.reference == reference.id:
|
||||
job = j
|
||||
break
|
||||
|
||||
# This likely only occurs if the logs came in out of sync and we got progress before the Pulling FS Layer one
|
||||
if not job:
|
||||
raise DockerLogOutOfOrder(
|
||||
f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {job_id} but could not find a matching job, skipping",
|
||||
f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {install_job_id} but could not find a matching job, skipping",
|
||||
_LOGGER.debug,
|
||||
)
|
||||
|
||||
@@ -303,6 +303,8 @@ class DockerInterface(JobGroup, ABC):
|
||||
# Our filters have all passed. Time to update the job
|
||||
# Only downloading and extracting have progress details. Use that to set extra
|
||||
# We'll leave it around on later stages as the total bytes may be useful after that stage
|
||||
# Enforce range to prevent float drift error
|
||||
progress = max(0, min(progress, 100))
|
||||
if (
|
||||
stage in {PullImageLayerStage.DOWNLOADING, PullImageLayerStage.EXTRACTING}
|
||||
and reference.progress_detail
|
||||
@@ -316,19 +318,86 @@ class DockerInterface(JobGroup, ABC):
|
||||
},
|
||||
)
|
||||
else:
|
||||
# If we reach DOWNLOAD_COMPLETE without ever having set extra (small layers that skip
|
||||
# the downloading phase), set a minimal extra so aggregate progress calculation can proceed
|
||||
extra = job.extra
|
||||
if stage == PullImageLayerStage.DOWNLOAD_COMPLETE and not job.extra:
|
||||
extra = {"current": 1, "total": 1}
|
||||
|
||||
job.update(
|
||||
progress=progress,
|
||||
stage=stage.status,
|
||||
done=stage == PullImageLayerStage.PULL_COMPLETE,
|
||||
extra=None
|
||||
if stage == PullImageLayerStage.RETRYING_DOWNLOAD
|
||||
else job.extra,
|
||||
extra=None if stage == PullImageLayerStage.RETRYING_DOWNLOAD else extra,
|
||||
)
|
||||
|
||||
# Once we have received a progress update for every child job, start to set status of the main one
|
||||
install_job = self.sys_jobs.get_job(install_job_id)
|
||||
layer_jobs = [
|
||||
job
|
||||
for job in self.sys_jobs.jobs
|
||||
if job.parent_id == install_job.uuid
|
||||
and job.name == "Pulling container image layer"
|
||||
]
|
||||
|
||||
# First set the total bytes to be downloaded/extracted on the main job
|
||||
# Note: With containerd snapshotter, total may be None (time-based progress instead of byte-based)
|
||||
if not install_job.extra:
|
||||
total = 0
|
||||
has_byte_progress = True
|
||||
for job in layer_jobs:
|
||||
if not job.extra:
|
||||
return
|
||||
# If any layer has None for total, we can't do byte-weighted aggregation
|
||||
if job.extra["total"] is None:
|
||||
has_byte_progress = False
|
||||
break
|
||||
total += job.extra["total"]
|
||||
|
||||
# Store whether we have byte-based progress for later use
|
||||
install_job.extra = {"total": total if has_byte_progress else None}
|
||||
else:
|
||||
total = install_job.extra["total"]
|
||||
has_byte_progress = total is not None
|
||||
|
||||
# Then determine total progress based on progress of each sub-job
|
||||
# If we have byte counts, weight by size. Otherwise, simple average.
|
||||
progress = 0.0
|
||||
stage = PullImageLayerStage.PULL_COMPLETE
|
||||
for job in layer_jobs:
|
||||
if not job.extra:
|
||||
return
|
||||
|
||||
if has_byte_progress:
|
||||
# Byte-weighted progress (classic Docker behavior)
|
||||
progress += job.progress * (job.extra["total"] / total)
|
||||
else:
|
||||
# Simple average progress (containerd snapshotter with time-based progress)
|
||||
progress += job.progress / len(layer_jobs)
|
||||
|
||||
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
|
||||
|
||||
if job_stage < PullImageLayerStage.EXTRACTING:
|
||||
stage = PullImageLayerStage.DOWNLOADING
|
||||
elif (
|
||||
stage == PullImageLayerStage.PULL_COMPLETE
|
||||
and job_stage < PullImageLayerStage.PULL_COMPLETE
|
||||
):
|
||||
stage = PullImageLayerStage.EXTRACTING
|
||||
|
||||
# Ensure progress is 100 at this point to prevent float drift
|
||||
if stage == PullImageLayerStage.PULL_COMPLETE:
|
||||
progress = 100
|
||||
|
||||
# To reduce noise, limit updates to when result has changed by an entire percent or when stage changed
|
||||
if stage != install_job.stage or progress >= install_job.progress + 1:
|
||||
install_job.update(stage=stage.status, progress=max(0, min(progress, 100)))
|
||||
|
||||
@Job(
|
||||
name="docker_interface_install",
|
||||
on_condition=DockerJobError,
|
||||
concurrency=JobConcurrency.GROUP_REJECT,
|
||||
internal=True,
|
||||
)
|
||||
async def install(
|
||||
self,
|
||||
@@ -351,11 +420,11 @@ class DockerInterface(JobGroup, ABC):
|
||||
# Try login if we have defined credentials
|
||||
await self._docker_login(image)
|
||||
|
||||
job_id = self.sys_jobs.current.uuid
|
||||
curr_job_id = self.sys_jobs.current.uuid
|
||||
|
||||
async def process_pull_image_log(reference: PullLogEntry) -> None:
|
||||
try:
|
||||
self._process_pull_image_log(job_id, reference)
|
||||
self._process_pull_image_log(curr_job_id, reference)
|
||||
except DockerLogOutOfOrder as err:
|
||||
# Send all these to sentry. Missing a few progress updates
|
||||
# shouldn't matter to users but matters to us
|
||||
@@ -374,18 +443,6 @@ class DockerInterface(JobGroup, ABC):
|
||||
platform=MAP_ARCH[image_arch],
|
||||
)
|
||||
|
||||
# Validate content
|
||||
try:
|
||||
await self._validate_trust(cast(str, docker_image.id))
|
||||
except CodeNotaryError:
|
||||
with suppress(docker.errors.DockerException):
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.images.remove,
|
||||
image=f"{image}:{version!s}",
|
||||
force=True,
|
||||
)
|
||||
raise
|
||||
|
||||
# Tag latest
|
||||
if latest:
|
||||
_LOGGER.info(
|
||||
@@ -411,16 +468,6 @@ class DockerInterface(JobGroup, ABC):
|
||||
raise DockerError(
|
||||
f"Unknown error with {image}:{version!s} -> {err!s}", _LOGGER.error
|
||||
) from err
|
||||
except CodeNotaryUntrusted as err:
|
||||
raise DockerTrustError(
|
||||
f"Pulled image {image}:{version!s} failed on content-trust verification!",
|
||||
_LOGGER.critical,
|
||||
) from err
|
||||
except CodeNotaryError as err:
|
||||
raise DockerTrustError(
|
||||
f"Error happened on Content-Trust check for {image}:{version!s}: {err!s}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
finally:
|
||||
if listener:
|
||||
self.sys_bus.remove_listener(listener)
|
||||
@@ -629,7 +676,10 @@ class DockerInterface(JobGroup, ABC):
|
||||
concurrency=JobConcurrency.GROUP_REJECT,
|
||||
)
|
||||
async def update(
|
||||
self, version: AwesomeVersion, image: str | None = None, latest: bool = False
|
||||
self,
|
||||
version: AwesomeVersion,
|
||||
image: str | None = None,
|
||||
latest: bool = False,
|
||||
) -> None:
|
||||
"""Update a Docker image."""
|
||||
image = image or self.image
|
||||
@@ -755,24 +805,3 @@ class DockerInterface(JobGroup, ABC):
|
||||
return self.sys_run_in_executor(
|
||||
self.sys_docker.container_run_inside, self.name, command
|
||||
)
|
||||
|
||||
async def _validate_trust(self, image_id: str) -> None:
|
||||
"""Validate trust of content."""
|
||||
checksum = image_id.partition(":")[2]
|
||||
return await self.sys_security.verify_own_content(checksum)
|
||||
|
||||
@Job(
|
||||
name="docker_interface_check_trust",
|
||||
on_condition=DockerJobError,
|
||||
concurrency=JobConcurrency.GROUP_REJECT,
|
||||
)
|
||||
async def check_trust(self) -> None:
|
||||
"""Check trust of exists Docker image."""
|
||||
try:
|
||||
image = await self.sys_run_in_executor(
|
||||
self.sys_docker.images.get, f"{self.image}:{self.version!s}"
|
||||
)
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
return
|
||||
|
||||
await self._validate_trust(cast(str, image.id))
|
||||
|
||||
@@ -326,11 +326,19 @@ class DockerAPI(CoreSysAttributes):
|
||||
if name:
|
||||
cidfile_path = self.coresys.config.path_cid_files / f"{name}.cid"
|
||||
|
||||
# Remove the file if it exists e.g. as a leftover from unclean shutdown
|
||||
if cidfile_path.is_file():
|
||||
with suppress(OSError):
|
||||
# Remove the file/directory if it exists e.g. as a leftover from unclean shutdown
|
||||
# Note: Can be a directory if Docker auto-started container with restart policy
|
||||
# before Supervisor could write the CID file
|
||||
with suppress(OSError):
|
||||
if cidfile_path.is_dir():
|
||||
cidfile_path.rmdir()
|
||||
elif cidfile_path.is_file():
|
||||
cidfile_path.unlink(missing_ok=True)
|
||||
|
||||
# Create empty CID file before adding it to volumes to prevent Docker
|
||||
# from creating it as a directory if container auto-starts
|
||||
cidfile_path.touch()
|
||||
|
||||
extern_cidfile_path = (
|
||||
self.coresys.config.path_extern_cid_files / f"{name}.cid"
|
||||
)
|
||||
|
||||
@@ -423,6 +423,12 @@ class APINotFound(APIError):
|
||||
status = 404
|
||||
|
||||
|
||||
class APIGone(APIError):
|
||||
"""API is no longer available."""
|
||||
|
||||
status = 410
|
||||
|
||||
|
||||
class APIAddonNotInstalled(APIError):
|
||||
"""Not installed addon requested at addons API."""
|
||||
|
||||
@@ -577,21 +583,6 @@ class PwnedConnectivityError(PwnedError):
|
||||
"""Connectivity errors while checking pwned passwords."""
|
||||
|
||||
|
||||
# util/codenotary
|
||||
|
||||
|
||||
class CodeNotaryError(HassioError):
|
||||
"""Error general with CodeNotary."""
|
||||
|
||||
|
||||
class CodeNotaryUntrusted(CodeNotaryError):
|
||||
"""Error on untrusted content."""
|
||||
|
||||
|
||||
class CodeNotaryBackendError(CodeNotaryError):
|
||||
"""CodeNotary backend error happening."""
|
||||
|
||||
|
||||
# util/whoami
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,12 @@ from typing import Any
|
||||
from supervisor.resolution.const import UnhealthyReason
|
||||
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import DBusError, DBusObjectError, HardwareNotFound
|
||||
from ..exceptions import (
|
||||
DBusError,
|
||||
DBusNotConnectedError,
|
||||
DBusObjectError,
|
||||
HardwareNotFound,
|
||||
)
|
||||
from .const import UdevSubsystem
|
||||
from .data import Device
|
||||
|
||||
@@ -207,6 +212,8 @@ class HwDisk(CoreSysAttributes):
|
||||
try:
|
||||
block_device = self.sys_dbus.udisks2.get_block_device_by_path(device_path)
|
||||
drive = self.sys_dbus.udisks2.get_drive(block_device.drive)
|
||||
except DBusNotConnectedError:
|
||||
return None
|
||||
except DBusObjectError:
|
||||
_LOGGER.warning(
|
||||
"Unable to find UDisks2 drive for device at %s", device_path.as_posix()
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
import asyncio
|
||||
from collections.abc import AsyncIterator
|
||||
from contextlib import asynccontextmanager, suppress
|
||||
from contextlib import asynccontextmanager
|
||||
from dataclasses import dataclass
|
||||
from datetime import UTC, datetime, timedelta
|
||||
import logging
|
||||
@@ -15,9 +15,7 @@ from multidict import MultiMapping
|
||||
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import HomeAssistantAPIError, HomeAssistantAuthError
|
||||
from ..jobs.const import JobConcurrency
|
||||
from ..jobs.decorator import Job
|
||||
from ..utils import check_port, version_is_new_enough
|
||||
from ..utils import version_is_new_enough
|
||||
from .const import LANDINGPAGE
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@@ -43,14 +41,19 @@ class HomeAssistantAPI(CoreSysAttributes):
|
||||
# We don't persist access tokens. Instead we fetch new ones when needed
|
||||
self.access_token: str | None = None
|
||||
self._access_token_expires: datetime | None = None
|
||||
self._token_lock: asyncio.Lock = asyncio.Lock()
|
||||
|
||||
@Job(
|
||||
name="home_assistant_api_ensure_access_token",
|
||||
internal=True,
|
||||
concurrency=JobConcurrency.QUEUE,
|
||||
)
|
||||
async def ensure_access_token(self) -> None:
|
||||
"""Ensure there is an access token."""
|
||||
"""Ensure there is a valid access token.
|
||||
|
||||
Raises:
|
||||
HomeAssistantAuthError: When we cannot get a valid token
|
||||
aiohttp.ClientError: On network or connection errors
|
||||
TimeoutError: On request timeouts
|
||||
|
||||
"""
|
||||
# Fast path check without lock (avoid unnecessary locking
|
||||
# for the majority of calls).
|
||||
if (
|
||||
self.access_token
|
||||
and self._access_token_expires
|
||||
@@ -58,7 +61,15 @@ class HomeAssistantAPI(CoreSysAttributes):
|
||||
):
|
||||
return
|
||||
|
||||
with suppress(asyncio.TimeoutError, aiohttp.ClientError):
|
||||
async with self._token_lock:
|
||||
# Double-check after acquiring lock (avoid race condition)
|
||||
if (
|
||||
self.access_token
|
||||
and self._access_token_expires
|
||||
and self._access_token_expires > datetime.now(tz=UTC)
|
||||
):
|
||||
return
|
||||
|
||||
async with self.sys_websession.post(
|
||||
f"{self.sys_homeassistant.api_url}/auth/token",
|
||||
timeout=aiohttp.ClientTimeout(total=30),
|
||||
@@ -92,7 +103,36 @@ class HomeAssistantAPI(CoreSysAttributes):
|
||||
params: MultiMapping[str] | None = None,
|
||||
headers: dict[str, str] | None = None,
|
||||
) -> AsyncIterator[aiohttp.ClientResponse]:
|
||||
"""Async context manager to make a request with right auth."""
|
||||
"""Async context manager to make authenticated requests to Home Assistant API.
|
||||
|
||||
This context manager handles authentication token management automatically,
|
||||
including token refresh on 401 responses. It yields the HTTP response
|
||||
for the caller to handle.
|
||||
|
||||
Error Handling:
|
||||
- HTTP error status codes (4xx, 5xx) are preserved in the response
|
||||
- Authentication is handled transparently with one retry on 401
|
||||
- Network/connection failures raise HomeAssistantAPIError
|
||||
- No logging is performed - callers should handle logging as needed
|
||||
|
||||
Args:
|
||||
method: HTTP method (get, post, etc.)
|
||||
path: API path relative to Home Assistant base URL
|
||||
json: JSON data to send in request body
|
||||
content_type: Override content-type header
|
||||
data: Raw data to send in request body
|
||||
timeout: Request timeout in seconds
|
||||
params: URL query parameters
|
||||
headers: Additional HTTP headers
|
||||
|
||||
Yields:
|
||||
aiohttp.ClientResponse: The HTTP response object
|
||||
|
||||
Raises:
|
||||
HomeAssistantAPIError: When request cannot be completed due to
|
||||
network errors, timeouts, or connection failures
|
||||
|
||||
"""
|
||||
url = f"{self.sys_homeassistant.api_url}/{path}"
|
||||
headers = headers or {}
|
||||
|
||||
@@ -101,10 +141,9 @@ class HomeAssistantAPI(CoreSysAttributes):
|
||||
headers[hdrs.CONTENT_TYPE] = content_type
|
||||
|
||||
for _ in (1, 2):
|
||||
await self.ensure_access_token()
|
||||
headers[hdrs.AUTHORIZATION] = f"Bearer {self.access_token}"
|
||||
|
||||
try:
|
||||
await self.ensure_access_token()
|
||||
headers[hdrs.AUTHORIZATION] = f"Bearer {self.access_token}"
|
||||
async with getattr(self.sys_websession, method)(
|
||||
url,
|
||||
data=data,
|
||||
@@ -120,23 +159,19 @@ class HomeAssistantAPI(CoreSysAttributes):
|
||||
continue
|
||||
yield resp
|
||||
return
|
||||
except TimeoutError:
|
||||
_LOGGER.error("Timeout on call %s.", url)
|
||||
break
|
||||
except TimeoutError as err:
|
||||
_LOGGER.debug("Timeout on call %s.", url)
|
||||
raise HomeAssistantAPIError(str(err)) from err
|
||||
except aiohttp.ClientError as err:
|
||||
_LOGGER.error("Error on call %s: %s", url, err)
|
||||
break
|
||||
|
||||
raise HomeAssistantAPIError()
|
||||
_LOGGER.debug("Error on call %s: %s", url, err)
|
||||
raise HomeAssistantAPIError(str(err)) from err
|
||||
|
||||
async def _get_json(self, path: str) -> dict[str, Any]:
|
||||
"""Return Home Assistant get API."""
|
||||
async with self.make_request("get", path) as resp:
|
||||
if resp.status in (200, 201):
|
||||
return await resp.json()
|
||||
else:
|
||||
_LOGGER.debug("Home Assistant API return: %d", resp.status)
|
||||
raise HomeAssistantAPIError()
|
||||
raise HomeAssistantAPIError(f"Home Assistant Core API return {resp.status}")
|
||||
|
||||
async def get_config(self) -> dict[str, Any]:
|
||||
"""Return Home Assistant config."""
|
||||
@@ -155,15 +190,8 @@ class HomeAssistantAPI(CoreSysAttributes):
|
||||
):
|
||||
return None
|
||||
|
||||
# Check if port is up
|
||||
if not await check_port(
|
||||
self.sys_homeassistant.ip_address,
|
||||
self.sys_homeassistant.api_port,
|
||||
):
|
||||
return None
|
||||
|
||||
# Check if API is up
|
||||
with suppress(HomeAssistantAPIError):
|
||||
try:
|
||||
# get_core_state is available since 2023.8.0 and preferred
|
||||
# since it is significantly faster than get_config because
|
||||
# it does not require serializing the entire config
|
||||
@@ -181,6 +209,8 @@ class HomeAssistantAPI(CoreSysAttributes):
|
||||
migrating = recorder_state.get("migration_in_progress", False)
|
||||
live_migration = recorder_state.get("migration_is_live", False)
|
||||
return APIState(state, migrating and not live_migration)
|
||||
except HomeAssistantAPIError as err:
|
||||
_LOGGER.debug("Can't connect to Home Assistant API: %s", err)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ from ..exceptions import (
|
||||
HomeAssistantUpdateError,
|
||||
JobException,
|
||||
)
|
||||
from ..jobs import ChildJobSyncFilter
|
||||
from ..jobs.const import JOB_GROUP_HOME_ASSISTANT_CORE, JobConcurrency, JobThrottle
|
||||
from ..jobs.decorator import Job, JobCondition
|
||||
from ..jobs.job_group import JobGroup
|
||||
@@ -224,6 +225,13 @@ class HomeAssistantCore(JobGroup):
|
||||
],
|
||||
on_condition=HomeAssistantJobError,
|
||||
concurrency=JobConcurrency.GROUP_REJECT,
|
||||
# We assume for now the docker image pull is 100% of this task. But from
|
||||
# a user perspective that isn't true. Other steps that take time which
|
||||
# is not accounted for in progress include: partial backup, image
|
||||
# cleanup, and Home Assistant restart
|
||||
child_job_syncs=[
|
||||
ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
|
||||
],
|
||||
)
|
||||
async def update(
|
||||
self,
|
||||
@@ -420,13 +428,6 @@ class HomeAssistantCore(JobGroup):
|
||||
"""
|
||||
return self.instance.logs()
|
||||
|
||||
def check_trust(self) -> Awaitable[None]:
|
||||
"""Calculate HomeAssistant docker content trust.
|
||||
|
||||
Return Coroutine.
|
||||
"""
|
||||
return self.instance.check_trust()
|
||||
|
||||
async def stats(self) -> DockerStats:
|
||||
"""Return stats of Home Assistant."""
|
||||
try:
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
from typing import Any, TypeVar, cast
|
||||
|
||||
@@ -202,7 +203,8 @@ class HomeAssistantWebSocket(CoreSysAttributes):
|
||||
if self._client is not None and self._client.connected:
|
||||
return self._client
|
||||
|
||||
await self.sys_homeassistant.api.ensure_access_token()
|
||||
with suppress(asyncio.TimeoutError, aiohttp.ClientError):
|
||||
await self.sys_homeassistant.api.ensure_access_token()
|
||||
client = await WSClient.connect_with_auth(
|
||||
self.sys_websession,
|
||||
self.sys_loop,
|
||||
|
||||
@@ -15,6 +15,7 @@ from .const import (
|
||||
IngressSessionDataDict,
|
||||
)
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .exceptions import HomeAssistantAPIError
|
||||
from .utils import check_port
|
||||
from .utils.common import FileConfiguration
|
||||
from .utils.dt import utc_from_timestamp, utcnow
|
||||
@@ -191,12 +192,17 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
||||
|
||||
# Update UI
|
||||
method = "post" if addon.ingress_panel else "delete"
|
||||
async with self.sys_homeassistant.api.make_request(
|
||||
method, f"api/hassio_push/panel/{addon.slug}"
|
||||
) as resp:
|
||||
if resp.status in (200, 201):
|
||||
_LOGGER.info("Update Ingress as panel for %s", addon.slug)
|
||||
else:
|
||||
_LOGGER.warning(
|
||||
"Fails Ingress panel for %s with %i", addon.slug, resp.status
|
||||
)
|
||||
try:
|
||||
async with self.sys_homeassistant.api.make_request(
|
||||
method, f"api/hassio_push/panel/{addon.slug}"
|
||||
) as resp:
|
||||
if resp.status in (200, 201):
|
||||
_LOGGER.info("Update Ingress as panel for %s", addon.slug)
|
||||
else:
|
||||
_LOGGER.warning(
|
||||
"Failed to update the Ingress panel for %s with %i",
|
||||
addon.slug,
|
||||
resp.status,
|
||||
)
|
||||
except HomeAssistantAPIError as err:
|
||||
_LOGGER.error("Panel update request failed for %s: %s", addon.slug, err)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
"""Supervisor job manager."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Coroutine, Generator
|
||||
from contextlib import contextmanager, suppress
|
||||
@@ -10,6 +12,7 @@ import logging
|
||||
from typing import Any, Self
|
||||
from uuid import uuid4
|
||||
|
||||
from attr.validators import gt, lt
|
||||
from attrs import Attribute, define, field
|
||||
from attrs.setters import convert as attr_convert, frozen, validate as attr_validate
|
||||
from attrs.validators import ge, le
|
||||
@@ -47,13 +50,13 @@ def _remove_current_job(context: Context) -> Context:
|
||||
return context
|
||||
|
||||
|
||||
def _invalid_if_done(instance: "SupervisorJob", *_) -> None:
|
||||
def _invalid_if_done(instance: SupervisorJob, *_) -> None:
|
||||
"""Validate that job is not done."""
|
||||
if instance.done:
|
||||
raise ValueError("Cannot update a job that is done")
|
||||
|
||||
|
||||
def _on_change(instance: "SupervisorJob", attribute: Attribute, value: Any) -> Any:
|
||||
def _on_change(instance: SupervisorJob, attribute: Attribute, value: Any) -> Any:
|
||||
"""Forward a change to a field on to the listener if defined."""
|
||||
value = attr_convert(instance, attribute, value)
|
||||
value = attr_validate(instance, attribute, value)
|
||||
@@ -62,18 +65,42 @@ def _on_change(instance: "SupervisorJob", attribute: Attribute, value: Any) -> A
|
||||
return value
|
||||
|
||||
|
||||
def _invalid_if_started(instance: "SupervisorJob", *_) -> None:
|
||||
def _invalid_if_started(instance: SupervisorJob, *_) -> None:
|
||||
"""Validate that job has not been started."""
|
||||
if instance.done is not None:
|
||||
raise ValueError("Field cannot be updated once job has started")
|
||||
|
||||
|
||||
@define(frozen=True)
|
||||
class ChildJobSyncFilter:
|
||||
"""Filter to identify a child job to sync progress from."""
|
||||
|
||||
name: str
|
||||
reference: str | None | type[DEFAULT] = DEFAULT
|
||||
progress_allocation: float = field(default=1.0, validator=[gt(0.0), le(1.0)])
|
||||
|
||||
def matches(self, job: SupervisorJob) -> bool:
|
||||
"""Return true if job matches filter."""
|
||||
return job.name == self.name and self.reference in (DEFAULT, job.reference)
|
||||
|
||||
|
||||
@define(frozen=True)
|
||||
class ParentJobSync:
|
||||
"""Parent job sync details."""
|
||||
|
||||
uuid: str
|
||||
starting_progress: float = field(validator=[ge(0.0), lt(100.0)])
|
||||
progress_allocation: float = field(validator=[gt(0.0), le(1.0)])
|
||||
|
||||
|
||||
@define
|
||||
class SupervisorJobError:
|
||||
"""Representation of an error occurring during a supervisor job."""
|
||||
|
||||
type_: type[HassioError] = HassioError
|
||||
message: str = "Unknown error, see supervisor logs"
|
||||
message: str = (
|
||||
"Unknown error, see Supervisor logs (check with 'ha supervisor logs')"
|
||||
)
|
||||
stage: str | None = None
|
||||
|
||||
def as_dict(self) -> dict[str, str | None]:
|
||||
@@ -103,13 +130,15 @@ class SupervisorJob:
|
||||
)
|
||||
parent_id: str | None = field(factory=_CURRENT_JOB.get, on_setattr=frozen)
|
||||
done: bool | None = field(init=False, default=None, on_setattr=_on_change)
|
||||
on_change: Callable[["SupervisorJob", Attribute, Any], None] | None = None
|
||||
on_change: Callable[[SupervisorJob, Attribute, Any], None] | None = None
|
||||
internal: bool = field(default=False)
|
||||
errors: list[SupervisorJobError] = field(
|
||||
init=False, factory=list, on_setattr=_on_change
|
||||
)
|
||||
release_event: asyncio.Event | None = None
|
||||
extra: dict[str, Any] | None = None
|
||||
child_job_syncs: list[ChildJobSyncFilter] | None = None
|
||||
parent_job_syncs: list[ParentJobSync] = field(init=False, factory=list)
|
||||
|
||||
def as_dict(self) -> dict[str, Any]:
|
||||
"""Return dictionary representation."""
|
||||
@@ -152,8 +181,14 @@ class SupervisorJob:
|
||||
try:
|
||||
token = _CURRENT_JOB.set(self.uuid)
|
||||
yield self
|
||||
# Cannot have an else without an except so we do nothing and re-raise
|
||||
except: # noqa: TRY203
|
||||
raise
|
||||
else:
|
||||
self.update(progress=100, done=True)
|
||||
finally:
|
||||
self.done = True
|
||||
if not self.done:
|
||||
self.done = True
|
||||
if token:
|
||||
_CURRENT_JOB.reset(token)
|
||||
|
||||
@@ -174,12 +209,14 @@ class SupervisorJob:
|
||||
self.stage = stage
|
||||
if extra != DEFAULT:
|
||||
self.extra = extra
|
||||
|
||||
# Done has special event. use that to trigger on change if included
|
||||
# If not then just use any other field to trigger
|
||||
self.on_change = on_change
|
||||
if done is not None:
|
||||
self.done = done
|
||||
|
||||
self.on_change = on_change
|
||||
# Just triggers the normal on change
|
||||
self.reference = self.reference
|
||||
else:
|
||||
self.reference = self.reference
|
||||
|
||||
|
||||
class JobManager(FileConfiguration, CoreSysAttributes):
|
||||
@@ -225,16 +262,37 @@ class JobManager(FileConfiguration, CoreSysAttributes):
|
||||
"""Return true if there is an active job for the current asyncio task."""
|
||||
return _CURRENT_JOB.get() is not None
|
||||
|
||||
def _notify_on_job_change(
|
||||
def _on_job_change(
|
||||
self, job: SupervisorJob, attribute: Attribute, value: Any
|
||||
) -> None:
|
||||
"""Notify Home Assistant of a change to a job and bus on job start/end."""
|
||||
"""Take on change actions such as notify home assistant and sync progress."""
|
||||
# Job object will be before the change. Combine the change with current data
|
||||
if attribute.name == "errors":
|
||||
value = [err.as_dict() for err in value]
|
||||
job_data = job.as_dict() | {attribute.name: value}
|
||||
|
||||
self.sys_homeassistant.websocket.supervisor_event(
|
||||
WSEvent.JOB, job.as_dict() | {attribute.name: value}
|
||||
)
|
||||
# Notify Home Assistant of change if its not internal
|
||||
if not job.internal:
|
||||
self.sys_homeassistant.websocket.supervisor_event(WSEvent.JOB, job_data)
|
||||
|
||||
# If we have any parent job syncs, sync progress to them
|
||||
for sync in job.parent_job_syncs:
|
||||
try:
|
||||
parent_job = self.get_job(sync.uuid)
|
||||
except JobNotFound:
|
||||
# Shouldn't happen but failure to find a parent for progress
|
||||
# reporting shouldn't raise and break the active job
|
||||
continue
|
||||
|
||||
progress = min(
|
||||
100,
|
||||
sync.starting_progress
|
||||
+ (sync.progress_allocation * job_data["progress"]),
|
||||
)
|
||||
# Using max would always trigger on change even if progress was unchanged
|
||||
# pylint: disable-next=R1731
|
||||
if parent_job.progress < progress: # noqa: PLR1730
|
||||
parent_job.progress = progress
|
||||
|
||||
if attribute.name == "done":
|
||||
if value is False:
|
||||
@@ -249,16 +307,52 @@ class JobManager(FileConfiguration, CoreSysAttributes):
|
||||
initial_stage: str | None = None,
|
||||
internal: bool = False,
|
||||
parent_id: str | None = DEFAULT, # type: ignore
|
||||
child_job_syncs: list[ChildJobSyncFilter] | None = None,
|
||||
) -> SupervisorJob:
|
||||
"""Create a new job."""
|
||||
job = SupervisorJob(
|
||||
name,
|
||||
reference=reference,
|
||||
stage=initial_stage,
|
||||
on_change=None if internal else self._notify_on_job_change,
|
||||
on_change=self._on_job_change,
|
||||
internal=internal,
|
||||
child_job_syncs=child_job_syncs,
|
||||
**({} if parent_id == DEFAULT else {"parent_id": parent_id}), # type: ignore
|
||||
)
|
||||
|
||||
# Shouldn't happen but inability to find a parent for progress reporting
|
||||
# shouldn't raise and break the active job
|
||||
with suppress(JobNotFound):
|
||||
curr_parent = job
|
||||
while curr_parent.parent_id:
|
||||
curr_parent = self.get_job(curr_parent.parent_id)
|
||||
if not curr_parent.child_job_syncs:
|
||||
continue
|
||||
|
||||
# HACK: If parent trigger the same child job, we just skip this second
|
||||
# sync. Maybe it would be better to have this reflected in the job stage
|
||||
# and reset progress to 0 instead? There is no support for such stage
|
||||
# information on Core update entities today though.
|
||||
if curr_parent.done is True or curr_parent.progress >= 100:
|
||||
_LOGGER.debug(
|
||||
"Skipping parent job sync for done parent job %s",
|
||||
curr_parent.name,
|
||||
)
|
||||
continue
|
||||
|
||||
# Break after first match at each parent as it doesn't make sense
|
||||
# to match twice. But it could match multiple parents
|
||||
for sync in curr_parent.child_job_syncs:
|
||||
if sync.matches(job):
|
||||
job.parent_job_syncs.append(
|
||||
ParentJobSync(
|
||||
curr_parent.uuid,
|
||||
starting_progress=curr_parent.progress,
|
||||
progress_allocation=sync.progress_allocation,
|
||||
)
|
||||
)
|
||||
break
|
||||
|
||||
self._jobs[job.uuid] = job
|
||||
return job
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ from ..resolution.const import (
|
||||
UnsupportedReason,
|
||||
)
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from . import SupervisorJob
|
||||
from . import ChildJobSyncFilter, SupervisorJob
|
||||
from .const import JobConcurrency, JobCondition, JobThrottle
|
||||
from .job_group import JobGroup
|
||||
|
||||
@@ -48,6 +48,7 @@ class Job(CoreSysAttributes):
|
||||
| None = None,
|
||||
throttle_max_calls: int | None = None,
|
||||
internal: bool = False,
|
||||
child_job_syncs: list[ChildJobSyncFilter] | None = None,
|
||||
): # pylint: disable=too-many-positional-arguments
|
||||
"""Initialize the Job decorator.
|
||||
|
||||
@@ -61,6 +62,7 @@ class Job(CoreSysAttributes):
|
||||
throttle_period (timedelta | Callable | None): Throttle period as a timedelta or a callable returning a timedelta (for throttled jobs).
|
||||
throttle_max_calls (int | None): Maximum number of calls allowed within the throttle period (for rate-limited jobs).
|
||||
internal (bool): Whether the job is internal (not exposed through the Supervisor API). Defaults to False.
|
||||
child_job_syncs (list[ChildJobSyncFilter] | None): Use if jobs progress should be kept in sync with progress of one or more of its child jobs.ye
|
||||
|
||||
Raises:
|
||||
RuntimeError: If job name is not unique, or required throttle parameters are missing for the selected throttle policy.
|
||||
@@ -80,6 +82,7 @@ class Job(CoreSysAttributes):
|
||||
self._last_call: dict[str | None, datetime] = {}
|
||||
self._rate_limited_calls: dict[str | None, list[datetime]] | None = None
|
||||
self._internal = internal
|
||||
self._child_job_syncs = child_job_syncs
|
||||
|
||||
self.concurrency = concurrency
|
||||
self.throttle = throttle
|
||||
@@ -258,6 +261,7 @@ class Job(CoreSysAttributes):
|
||||
job = _job__use_existing
|
||||
job.name = self.name
|
||||
job.internal = self._internal
|
||||
job.child_job_syncs = self._child_job_syncs
|
||||
if job_group:
|
||||
job.reference = job_group.job_reference
|
||||
else:
|
||||
@@ -265,6 +269,7 @@ class Job(CoreSysAttributes):
|
||||
self.name,
|
||||
job_group.job_reference if job_group else None,
|
||||
internal=self._internal,
|
||||
child_job_syncs=self._child_job_syncs,
|
||||
)
|
||||
|
||||
try:
|
||||
|
||||
@@ -76,13 +76,6 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
"""Return True if a task is in progress."""
|
||||
return self.instance.in_progress
|
||||
|
||||
def check_trust(self) -> Awaitable[None]:
|
||||
"""Calculate plugin docker content trust.
|
||||
|
||||
Return Coroutine.
|
||||
"""
|
||||
return self.instance.check_trust()
|
||||
|
||||
def logs(self) -> Awaitable[bytes]:
|
||||
"""Get docker plugin logs.
|
||||
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
"""Helpers to check supervisor trust."""
|
||||
|
||||
import logging
|
||||
|
||||
from ...const import CoreState
|
||||
from ...coresys import CoreSys
|
||||
from ...exceptions import CodeNotaryError, CodeNotaryUntrusted
|
||||
from ..const import ContextType, IssueType, UnhealthyReason
|
||||
from .base import CheckBase
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup(coresys: CoreSys) -> CheckBase:
|
||||
"""Check setup function."""
|
||||
return CheckSupervisorTrust(coresys)
|
||||
|
||||
|
||||
class CheckSupervisorTrust(CheckBase):
|
||||
"""CheckSystemTrust class for check."""
|
||||
|
||||
async def run_check(self) -> None:
|
||||
"""Run check if not affected by issue."""
|
||||
if not self.sys_security.content_trust:
|
||||
_LOGGER.warning(
|
||||
"Skipping %s, content_trust is globally disabled", self.slug
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
await self.sys_supervisor.check_trust()
|
||||
except CodeNotaryUntrusted:
|
||||
self.sys_resolution.add_unhealthy_reason(UnhealthyReason.UNTRUSTED)
|
||||
self.sys_resolution.create_issue(IssueType.TRUST, ContextType.SUPERVISOR)
|
||||
except CodeNotaryError:
|
||||
pass
|
||||
|
||||
async def approve_check(self, reference: str | None = None) -> bool:
|
||||
"""Approve check if it is affected by issue."""
|
||||
try:
|
||||
await self.sys_supervisor.check_trust()
|
||||
except CodeNotaryError:
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def issue(self) -> IssueType:
|
||||
"""Return a IssueType enum."""
|
||||
return IssueType.TRUST
|
||||
|
||||
@property
|
||||
def context(self) -> ContextType:
|
||||
"""Return a ContextType enum."""
|
||||
return ContextType.SUPERVISOR
|
||||
|
||||
@property
|
||||
def states(self) -> list[CoreState]:
|
||||
"""Return a list of valid states when this check can run."""
|
||||
return [CoreState.RUNNING, CoreState.STARTUP]
|
||||
@@ -39,7 +39,6 @@ class UnsupportedReason(StrEnum):
|
||||
APPARMOR = "apparmor"
|
||||
CGROUP_VERSION = "cgroup_version"
|
||||
CONNECTIVITY_CHECK = "connectivity_check"
|
||||
CONTENT_TRUST = "content_trust"
|
||||
DBUS = "dbus"
|
||||
DNS_SERVER = "dns_server"
|
||||
DOCKER_CONFIGURATION = "docker_configuration"
|
||||
@@ -54,7 +53,6 @@ class UnsupportedReason(StrEnum):
|
||||
PRIVILEGED = "privileged"
|
||||
RESTART_POLICY = "restart_policy"
|
||||
SOFTWARE = "software"
|
||||
SOURCE_MODS = "source_mods"
|
||||
SUPERVISOR_VERSION = "supervisor_version"
|
||||
SYSTEMD = "systemd"
|
||||
SYSTEMD_JOURNAL = "systemd_journal"
|
||||
@@ -103,7 +101,6 @@ class IssueType(StrEnum):
|
||||
PWNED = "pwned"
|
||||
REBOOT_REQUIRED = "reboot_required"
|
||||
SECURITY = "security"
|
||||
TRUST = "trust"
|
||||
UPDATE_FAILED = "update_failed"
|
||||
UPDATE_ROLLBACK = "update_rollback"
|
||||
|
||||
@@ -115,7 +112,6 @@ class SuggestionType(StrEnum):
|
||||
CLEAR_FULL_BACKUP = "clear_full_backup"
|
||||
CREATE_FULL_BACKUP = "create_full_backup"
|
||||
DISABLE_BOOT = "disable_boot"
|
||||
EXECUTE_INTEGRITY = "execute_integrity"
|
||||
EXECUTE_REBOOT = "execute_reboot"
|
||||
EXECUTE_REBUILD = "execute_rebuild"
|
||||
EXECUTE_RELOAD = "execute_reload"
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
"""Evaluation class for Content Trust."""
|
||||
|
||||
from ...const import CoreState
|
||||
from ...coresys import CoreSys
|
||||
from ..const import UnsupportedReason
|
||||
from .base import EvaluateBase
|
||||
|
||||
|
||||
def setup(coresys: CoreSys) -> EvaluateBase:
|
||||
"""Initialize evaluation-setup function."""
|
||||
return EvaluateContentTrust(coresys)
|
||||
|
||||
|
||||
class EvaluateContentTrust(EvaluateBase):
|
||||
"""Evaluate system content trust level."""
|
||||
|
||||
@property
|
||||
def reason(self) -> UnsupportedReason:
|
||||
"""Return a UnsupportedReason enum."""
|
||||
return UnsupportedReason.CONTENT_TRUST
|
||||
|
||||
@property
|
||||
def on_failure(self) -> str:
|
||||
"""Return a string that is printed when self.evaluate is True."""
|
||||
return "System run with disabled trusted content security."
|
||||
|
||||
@property
|
||||
def states(self) -> list[CoreState]:
|
||||
"""Return a list of valid states when this evaluation can run."""
|
||||
return [CoreState.INITIALIZE, CoreState.SETUP, CoreState.RUNNING]
|
||||
|
||||
async def evaluate(self) -> bool:
|
||||
"""Run evaluation."""
|
||||
return not self.sys_security.content_trust
|
||||
@@ -8,7 +8,7 @@ from ..const import UnsupportedReason
|
||||
from .base import EvaluateBase
|
||||
|
||||
EXPECTED_LOGGING = "journald"
|
||||
EXPECTED_STORAGE = "overlay2"
|
||||
EXPECTED_STORAGE = ("overlay2", "overlayfs")
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -41,14 +41,18 @@ class EvaluateDockerConfiguration(EvaluateBase):
|
||||
storage_driver = self.sys_docker.info.storage
|
||||
logging_driver = self.sys_docker.info.logging
|
||||
|
||||
if storage_driver != EXPECTED_STORAGE:
|
||||
is_unsupported = False
|
||||
|
||||
if storage_driver not in EXPECTED_STORAGE:
|
||||
is_unsupported = True
|
||||
_LOGGER.warning(
|
||||
"Docker storage driver %s is not supported!", storage_driver
|
||||
)
|
||||
|
||||
if logging_driver != EXPECTED_LOGGING:
|
||||
is_unsupported = True
|
||||
_LOGGER.warning(
|
||||
"Docker logging driver %s is not supported!", logging_driver
|
||||
)
|
||||
|
||||
return storage_driver != EXPECTED_STORAGE or logging_driver != EXPECTED_LOGGING
|
||||
return is_unsupported
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
"""Evaluation class for Content Trust."""
|
||||
|
||||
import errno
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from ...const import CoreState
|
||||
from ...coresys import CoreSys
|
||||
from ...exceptions import CodeNotaryError, CodeNotaryUntrusted
|
||||
from ...utils.codenotary import calc_checksum_path_sourcecode
|
||||
from ..const import ContextType, IssueType, UnhealthyReason, UnsupportedReason
|
||||
from .base import EvaluateBase
|
||||
|
||||
_SUPERVISOR_SOURCE = Path("/usr/src/supervisor/supervisor")
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup(coresys: CoreSys) -> EvaluateBase:
|
||||
"""Initialize evaluation-setup function."""
|
||||
return EvaluateSourceMods(coresys)
|
||||
|
||||
|
||||
class EvaluateSourceMods(EvaluateBase):
|
||||
"""Evaluate supervisor source modifications."""
|
||||
|
||||
@property
|
||||
def reason(self) -> UnsupportedReason:
|
||||
"""Return a UnsupportedReason enum."""
|
||||
return UnsupportedReason.SOURCE_MODS
|
||||
|
||||
@property
|
||||
def on_failure(self) -> str:
|
||||
"""Return a string that is printed when self.evaluate is True."""
|
||||
return "System detect unauthorized source code modifications."
|
||||
|
||||
@property
|
||||
def states(self) -> list[CoreState]:
|
||||
"""Return a list of valid states when this evaluation can run."""
|
||||
return [CoreState.RUNNING]
|
||||
|
||||
async def evaluate(self) -> bool:
|
||||
"""Run evaluation."""
|
||||
if not self.sys_security.content_trust:
|
||||
_LOGGER.warning("Disabled content-trust, skipping evaluation")
|
||||
return False
|
||||
|
||||
# Calculate sume of the sourcecode
|
||||
try:
|
||||
checksum = await self.sys_run_in_executor(
|
||||
calc_checksum_path_sourcecode, _SUPERVISOR_SOURCE
|
||||
)
|
||||
except OSError as err:
|
||||
if err.errno == errno.EBADMSG:
|
||||
self.sys_resolution.add_unhealthy_reason(
|
||||
UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
)
|
||||
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.CORRUPT_FILESYSTEM, ContextType.SYSTEM
|
||||
)
|
||||
_LOGGER.error("Can't calculate checksum of source code: %s", err)
|
||||
return False
|
||||
|
||||
# Validate checksum
|
||||
try:
|
||||
await self.sys_security.verify_own_content(checksum)
|
||||
except CodeNotaryUntrusted:
|
||||
return True
|
||||
except CodeNotaryError:
|
||||
pass
|
||||
|
||||
return False
|
||||
@@ -1,67 +0,0 @@
|
||||
"""Helpers to check and fix issues with free space."""
|
||||
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
|
||||
from ...coresys import CoreSys
|
||||
from ...exceptions import ResolutionFixupError, ResolutionFixupJobError
|
||||
from ...jobs.const import JobCondition, JobThrottle
|
||||
from ...jobs.decorator import Job
|
||||
from ...security.const import ContentTrustResult
|
||||
from ..const import ContextType, IssueType, SuggestionType
|
||||
from .base import FixupBase
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup(coresys: CoreSys) -> FixupBase:
|
||||
"""Check setup function."""
|
||||
return FixupSystemExecuteIntegrity(coresys)
|
||||
|
||||
|
||||
class FixupSystemExecuteIntegrity(FixupBase):
|
||||
"""Storage class for fixup."""
|
||||
|
||||
@Job(
|
||||
name="fixup_system_execute_integrity_process",
|
||||
conditions=[JobCondition.INTERNET_SYSTEM],
|
||||
on_condition=ResolutionFixupJobError,
|
||||
throttle_period=timedelta(hours=8),
|
||||
throttle=JobThrottle.THROTTLE,
|
||||
)
|
||||
async def process_fixup(self, reference: str | None = None) -> None:
|
||||
"""Initialize the fixup class."""
|
||||
result = await self.sys_security.integrity_check()
|
||||
|
||||
if ContentTrustResult.FAILED in (result.core, result.supervisor):
|
||||
raise ResolutionFixupError()
|
||||
|
||||
for plugin in result.plugins:
|
||||
if plugin != ContentTrustResult.FAILED:
|
||||
continue
|
||||
raise ResolutionFixupError()
|
||||
|
||||
for addon in result.addons:
|
||||
if addon != ContentTrustResult.FAILED:
|
||||
continue
|
||||
raise ResolutionFixupError()
|
||||
|
||||
@property
|
||||
def suggestion(self) -> SuggestionType:
|
||||
"""Return a SuggestionType enum."""
|
||||
return SuggestionType.EXECUTE_INTEGRITY
|
||||
|
||||
@property
|
||||
def context(self) -> ContextType:
|
||||
"""Return a ContextType enum."""
|
||||
return ContextType.SYSTEM
|
||||
|
||||
@property
|
||||
def issues(self) -> list[IssueType]:
|
||||
"""Return a IssueType enum list."""
|
||||
return [IssueType.TRUST]
|
||||
|
||||
@property
|
||||
def auto(self) -> bool:
|
||||
"""Return if a fixup can be apply as auto fix."""
|
||||
return True
|
||||
@@ -52,5 +52,5 @@ class ResolutionNotify(CoreSysAttributes):
|
||||
_LOGGER.debug("Successfully created persistent_notification")
|
||||
else:
|
||||
_LOGGER.error("Can't create persistant notification")
|
||||
except HomeAssistantAPIError:
|
||||
_LOGGER.error("Can't create persistant notification")
|
||||
except HomeAssistantAPIError as err:
|
||||
_LOGGER.error("Can't create persistant notification: %s", err)
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
"""Security constants."""
|
||||
|
||||
from enum import StrEnum
|
||||
|
||||
import attr
|
||||
|
||||
|
||||
class ContentTrustResult(StrEnum):
|
||||
"""Content trust result enum."""
|
||||
|
||||
PASS = "pass"
|
||||
ERROR = "error"
|
||||
FAILED = "failed"
|
||||
UNTESTED = "untested"
|
||||
|
||||
|
||||
@attr.s
|
||||
class IntegrityResult:
|
||||
"""Result of a full integrity check."""
|
||||
|
||||
supervisor: ContentTrustResult = attr.ib(default=ContentTrustResult.UNTESTED)
|
||||
core: ContentTrustResult = attr.ib(default=ContentTrustResult.UNTESTED)
|
||||
plugins: dict[str, ContentTrustResult] = attr.ib(default={})
|
||||
addons: dict[str, ContentTrustResult] = attr.ib(default={})
|
||||
@@ -4,27 +4,12 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from ..const import (
|
||||
ATTR_CONTENT_TRUST,
|
||||
ATTR_FORCE_SECURITY,
|
||||
ATTR_PWNED,
|
||||
FILE_HASSIO_SECURITY,
|
||||
)
|
||||
from ..const import ATTR_FORCE_SECURITY, ATTR_PWNED, FILE_HASSIO_SECURITY
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import (
|
||||
CodeNotaryError,
|
||||
CodeNotaryUntrusted,
|
||||
PwnedError,
|
||||
SecurityJobError,
|
||||
)
|
||||
from ..jobs.const import JobConcurrency
|
||||
from ..jobs.decorator import Job, JobCondition
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..utils.codenotary import cas_validate
|
||||
from ..exceptions import PwnedError
|
||||
from ..utils.common import FileConfiguration
|
||||
from ..utils.pwned import check_pwned_password
|
||||
from ..validate import SCHEMA_SECURITY_CONFIG
|
||||
from .const import ContentTrustResult, IntegrityResult
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -37,16 +22,6 @@ class Security(FileConfiguration, CoreSysAttributes):
|
||||
super().__init__(FILE_HASSIO_SECURITY, SCHEMA_SECURITY_CONFIG)
|
||||
self.coresys = coresys
|
||||
|
||||
@property
|
||||
def content_trust(self) -> bool:
|
||||
"""Return if content trust is enabled/disabled."""
|
||||
return self._data[ATTR_CONTENT_TRUST]
|
||||
|
||||
@content_trust.setter
|
||||
def content_trust(self, value: bool) -> None:
|
||||
"""Set content trust is enabled/disabled."""
|
||||
self._data[ATTR_CONTENT_TRUST] = value
|
||||
|
||||
@property
|
||||
def force(self) -> bool:
|
||||
"""Return if force security is enabled/disabled."""
|
||||
@@ -67,30 +42,6 @@ class Security(FileConfiguration, CoreSysAttributes):
|
||||
"""Set pwned is enabled/disabled."""
|
||||
self._data[ATTR_PWNED] = value
|
||||
|
||||
async def verify_content(self, signer: str, checksum: str) -> None:
|
||||
"""Verify content on CAS."""
|
||||
if not self.content_trust:
|
||||
_LOGGER.warning("Disabled content-trust, skip validation")
|
||||
return
|
||||
|
||||
try:
|
||||
await cas_validate(signer, checksum)
|
||||
except CodeNotaryUntrusted:
|
||||
raise
|
||||
except CodeNotaryError:
|
||||
if self.force:
|
||||
raise
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.TRUST,
|
||||
ContextType.SYSTEM,
|
||||
suggestions=[SuggestionType.EXECUTE_INTEGRITY],
|
||||
)
|
||||
return
|
||||
|
||||
async def verify_own_content(self, checksum: str) -> None:
|
||||
"""Verify content from HA org."""
|
||||
return await self.verify_content("notary@home-assistant.io", checksum)
|
||||
|
||||
async def verify_secret(self, pwned_hash: str) -> None:
|
||||
"""Verify pwned state of a secret."""
|
||||
if not self.pwned:
|
||||
@@ -103,73 +54,3 @@ class Security(FileConfiguration, CoreSysAttributes):
|
||||
if self.force:
|
||||
raise
|
||||
return
|
||||
|
||||
@Job(
|
||||
name="security_manager_integrity_check",
|
||||
conditions=[JobCondition.INTERNET_SYSTEM],
|
||||
on_condition=SecurityJobError,
|
||||
concurrency=JobConcurrency.REJECT,
|
||||
)
|
||||
async def integrity_check(self) -> IntegrityResult:
|
||||
"""Run a full system integrity check of the platform.
|
||||
|
||||
We only allow to install trusted content.
|
||||
This is a out of the band manual check.
|
||||
"""
|
||||
result: IntegrityResult = IntegrityResult()
|
||||
if not self.content_trust:
|
||||
_LOGGER.warning(
|
||||
"Skipping integrity check, content_trust is globally disabled"
|
||||
)
|
||||
return result
|
||||
|
||||
# Supervisor
|
||||
try:
|
||||
await self.sys_supervisor.check_trust()
|
||||
result.supervisor = ContentTrustResult.PASS
|
||||
except CodeNotaryUntrusted:
|
||||
result.supervisor = ContentTrustResult.ERROR
|
||||
self.sys_resolution.create_issue(IssueType.TRUST, ContextType.SUPERVISOR)
|
||||
except CodeNotaryError:
|
||||
result.supervisor = ContentTrustResult.FAILED
|
||||
|
||||
# Core
|
||||
try:
|
||||
await self.sys_homeassistant.core.check_trust()
|
||||
result.core = ContentTrustResult.PASS
|
||||
except CodeNotaryUntrusted:
|
||||
result.core = ContentTrustResult.ERROR
|
||||
self.sys_resolution.create_issue(IssueType.TRUST, ContextType.CORE)
|
||||
except CodeNotaryError:
|
||||
result.core = ContentTrustResult.FAILED
|
||||
|
||||
# Plugins
|
||||
for plugin in self.sys_plugins.all_plugins:
|
||||
try:
|
||||
await plugin.check_trust()
|
||||
result.plugins[plugin.slug] = ContentTrustResult.PASS
|
||||
except CodeNotaryUntrusted:
|
||||
result.plugins[plugin.slug] = ContentTrustResult.ERROR
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.TRUST, ContextType.PLUGIN, reference=plugin.slug
|
||||
)
|
||||
except CodeNotaryError:
|
||||
result.plugins[plugin.slug] = ContentTrustResult.FAILED
|
||||
|
||||
# Add-ons
|
||||
for addon in self.sys_addons.installed:
|
||||
if not addon.signed:
|
||||
result.addons[addon.slug] = ContentTrustResult.UNTESTED
|
||||
continue
|
||||
try:
|
||||
await addon.check_trust()
|
||||
result.addons[addon.slug] = ContentTrustResult.PASS
|
||||
except CodeNotaryUntrusted:
|
||||
result.addons[addon.slug] = ContentTrustResult.ERROR
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.TRUST, ContextType.ADDON, reference=addon.slug
|
||||
)
|
||||
except CodeNotaryError:
|
||||
result.addons[addon.slug] = ContentTrustResult.FAILED
|
||||
|
||||
return result
|
||||
|
||||
@@ -13,6 +13,8 @@ import aiohttp
|
||||
from aiohttp.client_exceptions import ClientError
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionException
|
||||
|
||||
from supervisor.jobs import ChildJobSyncFilter
|
||||
|
||||
from .const import (
|
||||
ATTR_SUPERVISOR_INTERNET,
|
||||
SUPERVISOR_VERSION,
|
||||
@@ -23,8 +25,6 @@ from .coresys import CoreSys, CoreSysAttributes
|
||||
from .docker.stats import DockerStats
|
||||
from .docker.supervisor import DockerSupervisor
|
||||
from .exceptions import (
|
||||
CodeNotaryError,
|
||||
CodeNotaryUntrusted,
|
||||
DockerError,
|
||||
HostAppArmorError,
|
||||
SupervisorAppArmorError,
|
||||
@@ -35,7 +35,6 @@ from .exceptions import (
|
||||
from .jobs.const import JobCondition, JobThrottle
|
||||
from .jobs.decorator import Job
|
||||
from .resolution.const import ContextType, IssueType, UnhealthyReason
|
||||
from .utils.codenotary import calc_checksum
|
||||
from .utils.sentry import async_capture_exception
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@@ -148,20 +147,6 @@ class Supervisor(CoreSysAttributes):
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
# Validate
|
||||
try:
|
||||
await self.sys_security.verify_own_content(calc_checksum(data))
|
||||
except CodeNotaryUntrusted as err:
|
||||
raise SupervisorAppArmorError(
|
||||
"Content-Trust is broken for the AppArmor profile fetch!",
|
||||
_LOGGER.critical,
|
||||
) from err
|
||||
except CodeNotaryError as err:
|
||||
raise SupervisorAppArmorError(
|
||||
f"CodeNotary error while processing AppArmor fetch: {err!s}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
# Load
|
||||
temp_dir: TemporaryDirectory | None = None
|
||||
|
||||
@@ -195,6 +180,15 @@ class Supervisor(CoreSysAttributes):
|
||||
if temp_dir:
|
||||
await self.sys_run_in_executor(temp_dir.cleanup)
|
||||
|
||||
@Job(
|
||||
name="supervisor_update",
|
||||
# We assume for now the docker image pull is 100% of this task. But from
|
||||
# a user perspective that isn't true. Other steps that take time which
|
||||
# is not accounted for in progress include: app armor update and restart
|
||||
child_job_syncs=[
|
||||
ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
|
||||
],
|
||||
)
|
||||
async def update(self, version: AwesomeVersion | None = None) -> None:
|
||||
"""Update Supervisor version."""
|
||||
version = version or self.latest_version or self.version
|
||||
@@ -221,6 +215,7 @@ class Supervisor(CoreSysAttributes):
|
||||
|
||||
# Update container
|
||||
_LOGGER.info("Update Supervisor to version %s", version)
|
||||
|
||||
try:
|
||||
await self.instance.install(version, image=image)
|
||||
await self.instance.update_start_tag(image, version)
|
||||
@@ -261,13 +256,6 @@ class Supervisor(CoreSysAttributes):
|
||||
"""
|
||||
return self.instance.logs()
|
||||
|
||||
def check_trust(self) -> Awaitable[None]:
|
||||
"""Calculate Supervisor docker content trust.
|
||||
|
||||
Return Coroutine.
|
||||
"""
|
||||
return self.instance.check_trust()
|
||||
|
||||
async def stats(self) -> DockerStats:
|
||||
"""Return stats of Supervisor."""
|
||||
try:
|
||||
|
||||
@@ -31,14 +31,8 @@ from .const import (
|
||||
UpdateChannel,
|
||||
)
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .exceptions import (
|
||||
CodeNotaryError,
|
||||
CodeNotaryUntrusted,
|
||||
UpdaterError,
|
||||
UpdaterJobError,
|
||||
)
|
||||
from .exceptions import UpdaterError, UpdaterJobError
|
||||
from .jobs.decorator import Job, JobCondition
|
||||
from .utils.codenotary import calc_checksum
|
||||
from .utils.common import FileConfiguration
|
||||
from .validate import SCHEMA_UPDATER_CONFIG
|
||||
|
||||
@@ -289,19 +283,6 @@ class Updater(FileConfiguration, CoreSysAttributes):
|
||||
self.sys_bus.remove_listener(self._connectivity_listener)
|
||||
self._connectivity_listener = None
|
||||
|
||||
# Validate
|
||||
try:
|
||||
await self.sys_security.verify_own_content(calc_checksum(data))
|
||||
except CodeNotaryUntrusted as err:
|
||||
raise UpdaterError(
|
||||
"Content-Trust is broken for the version file fetch!", _LOGGER.critical
|
||||
) from err
|
||||
except CodeNotaryError as err:
|
||||
raise UpdaterError(
|
||||
f"CodeNotary error while processing version fetch: {err!s}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
# Parse data
|
||||
try:
|
||||
data = json.loads(data)
|
||||
|
||||
@@ -1,109 +0,0 @@
|
||||
"""Small wrapper for CodeNotary."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import shlex
|
||||
from typing import Final
|
||||
|
||||
from dirhash import dirhash
|
||||
|
||||
from ..exceptions import CodeNotaryBackendError, CodeNotaryError, CodeNotaryUntrusted
|
||||
from . import clean_env
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
_CAS_CMD: str = (
|
||||
"cas authenticate --signerID {signer} --silent --output json --hash {sum}"
|
||||
)
|
||||
_CACHE: set[tuple[str, str]] = set()
|
||||
|
||||
|
||||
_ATTR_ERROR: Final = "error"
|
||||
_ATTR_STATUS: Final = "status"
|
||||
_FALLBACK_ERROR: Final = "Unknown CodeNotary backend issue"
|
||||
|
||||
|
||||
def calc_checksum(data: str | bytes) -> str:
|
||||
"""Generate checksum for CodeNotary."""
|
||||
if isinstance(data, str):
|
||||
return hashlib.sha256(data.encode()).hexdigest()
|
||||
return hashlib.sha256(data).hexdigest()
|
||||
|
||||
|
||||
def calc_checksum_path_sourcecode(folder: Path) -> str:
|
||||
"""Calculate checksum for a path source code.
|
||||
|
||||
Need catch OSError.
|
||||
"""
|
||||
return dirhash(folder.as_posix(), "sha256", match=["*.py"])
|
||||
|
||||
|
||||
# pylint: disable=unreachable
|
||||
async def cas_validate(
|
||||
signer: str,
|
||||
checksum: str,
|
||||
) -> None:
|
||||
"""Validate data against CodeNotary."""
|
||||
return
|
||||
if (checksum, signer) in _CACHE:
|
||||
return
|
||||
|
||||
# Generate command for request
|
||||
command = shlex.split(_CAS_CMD.format(signer=signer, sum=checksum))
|
||||
|
||||
# Request notary authorization
|
||||
_LOGGER.debug("Send cas command: %s", command)
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
*command,
|
||||
stdin=asyncio.subprocess.DEVNULL,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
env=clean_env(),
|
||||
)
|
||||
|
||||
async with asyncio.timeout(15):
|
||||
data, error = await proc.communicate()
|
||||
except TimeoutError:
|
||||
raise CodeNotaryBackendError(
|
||||
"Timeout while processing CodeNotary", _LOGGER.warning
|
||||
) from None
|
||||
except OSError as err:
|
||||
raise CodeNotaryError(
|
||||
f"CodeNotary fatal error: {err!s}", _LOGGER.critical
|
||||
) from err
|
||||
|
||||
# Check if Notarized
|
||||
if proc.returncode != 0 and not data:
|
||||
if error:
|
||||
try:
|
||||
error = error.decode("utf-8")
|
||||
except UnicodeDecodeError as err:
|
||||
raise CodeNotaryBackendError(_FALLBACK_ERROR, _LOGGER.warning) from err
|
||||
if "not notarized" in error:
|
||||
raise CodeNotaryUntrusted()
|
||||
else:
|
||||
error = _FALLBACK_ERROR
|
||||
raise CodeNotaryBackendError(error, _LOGGER.warning)
|
||||
|
||||
# Parse data
|
||||
try:
|
||||
data_json = json.loads(data)
|
||||
_LOGGER.debug("CodeNotary response with: %s", data_json)
|
||||
except (json.JSONDecodeError, UnicodeDecodeError) as err:
|
||||
raise CodeNotaryError(
|
||||
f"Can't parse CodeNotary output: {data!s} - {err!s}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
if _ATTR_ERROR in data_json:
|
||||
raise CodeNotaryBackendError(data_json[_ATTR_ERROR], _LOGGER.warning)
|
||||
|
||||
if data_json[_ATTR_STATUS] == 0:
|
||||
_CACHE.add((checksum, signer))
|
||||
else:
|
||||
raise CodeNotaryUntrusted()
|
||||
@@ -117,7 +117,7 @@ async def journal_logs_reader(
|
||||
continue
|
||||
|
||||
# strip \n for simple fields before decoding
|
||||
entries[field_name] = data[:-1].decode("utf-8")
|
||||
entries[field_name] = data[:-1].decode("utf-8", errors="replace")
|
||||
|
||||
|
||||
def _parse_boot_json(boot_json_bytes: bytes) -> tuple[int, str]:
|
||||
|
||||
@@ -12,7 +12,6 @@ from .const import (
|
||||
ATTR_AUTO_UPDATE,
|
||||
ATTR_CHANNEL,
|
||||
ATTR_CLI,
|
||||
ATTR_CONTENT_TRUST,
|
||||
ATTR_COUNTRY,
|
||||
ATTR_DEBUG,
|
||||
ATTR_DEBUG_BLOCK,
|
||||
@@ -229,7 +228,6 @@ SCHEMA_INGRESS_CONFIG = vol.Schema(
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_SECURITY_CONFIG = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_CONTENT_TRUST, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_PWNED, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_FORCE_SECURITY, default=False): vol.Boolean(),
|
||||
},
|
||||
|
||||
@@ -419,3 +419,71 @@ def test_valid_schema():
|
||||
config["schema"] = {"field": "invalid"}
|
||||
with pytest.raises(vol.Invalid):
|
||||
assert vd.SCHEMA_ADDON_CONFIG(config)
|
||||
|
||||
|
||||
def test_ulimits_simple_format():
|
||||
"""Test ulimits simple format validation."""
|
||||
config = load_json_fixture("basic-addon-config.json")
|
||||
|
||||
config["ulimits"] = {"nofile": 65535, "nproc": 32768, "memlock": 134217728}
|
||||
|
||||
valid_config = vd.SCHEMA_ADDON_CONFIG(config)
|
||||
assert valid_config["ulimits"]["nofile"] == 65535
|
||||
assert valid_config["ulimits"]["nproc"] == 32768
|
||||
assert valid_config["ulimits"]["memlock"] == 134217728
|
||||
|
||||
|
||||
def test_ulimits_detailed_format():
|
||||
"""Test ulimits detailed format validation."""
|
||||
config = load_json_fixture("basic-addon-config.json")
|
||||
|
||||
config["ulimits"] = {
|
||||
"nofile": {"soft": 20000, "hard": 40000},
|
||||
"nproc": 32768, # Mixed format should work
|
||||
"memlock": {"soft": 67108864, "hard": 134217728},
|
||||
}
|
||||
|
||||
valid_config = vd.SCHEMA_ADDON_CONFIG(config)
|
||||
assert valid_config["ulimits"]["nofile"]["soft"] == 20000
|
||||
assert valid_config["ulimits"]["nofile"]["hard"] == 40000
|
||||
assert valid_config["ulimits"]["nproc"] == 32768
|
||||
assert valid_config["ulimits"]["memlock"]["soft"] == 67108864
|
||||
assert valid_config["ulimits"]["memlock"]["hard"] == 134217728
|
||||
|
||||
|
||||
def test_ulimits_empty_dict():
|
||||
"""Test ulimits with empty dict (default)."""
|
||||
config = load_json_fixture("basic-addon-config.json")
|
||||
|
||||
valid_config = vd.SCHEMA_ADDON_CONFIG(config)
|
||||
assert valid_config["ulimits"] == {}
|
||||
|
||||
|
||||
def test_ulimits_invalid_values():
|
||||
"""Test ulimits with invalid values."""
|
||||
config = load_json_fixture("basic-addon-config.json")
|
||||
|
||||
# Invalid string values
|
||||
config["ulimits"] = {"nofile": "invalid"}
|
||||
with pytest.raises(vol.Invalid):
|
||||
vd.SCHEMA_ADDON_CONFIG(config)
|
||||
|
||||
# Invalid detailed format
|
||||
config["ulimits"] = {"nofile": {"invalid_key": 1000}}
|
||||
with pytest.raises(vol.Invalid):
|
||||
vd.SCHEMA_ADDON_CONFIG(config)
|
||||
|
||||
# Missing hard value in detailed format
|
||||
config["ulimits"] = {"nofile": {"soft": 1000}}
|
||||
with pytest.raises(vol.Invalid):
|
||||
vd.SCHEMA_ADDON_CONFIG(config)
|
||||
|
||||
# Missing soft value in detailed format
|
||||
config["ulimits"] = {"nofile": {"hard": 1000}}
|
||||
with pytest.raises(vol.Invalid):
|
||||
vd.SCHEMA_ADDON_CONFIG(config)
|
||||
|
||||
# Empty dict in detailed format
|
||||
config["ulimits"] = {"nofile": {}}
|
||||
with pytest.raises(vol.Invalid):
|
||||
vd.SCHEMA_ADDON_CONFIG(config)
|
||||
|
||||
@@ -2,16 +2,19 @@
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, PropertyMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
|
||||
|
||||
from aiohttp.test_utils import TestClient
|
||||
from awesomeversion import AwesomeVersion
|
||||
import pytest
|
||||
|
||||
from supervisor.backups.manager import BackupManager
|
||||
from supervisor.const import CoreState
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.homeassistant import DockerHomeAssistant
|
||||
from supervisor.docker.interface import DockerInterface
|
||||
from supervisor.homeassistant.api import APIState
|
||||
from supervisor.homeassistant.api import APIState, HomeAssistantAPI
|
||||
from supervisor.homeassistant.const import WSEvent
|
||||
from supervisor.homeassistant.core import HomeAssistantCore
|
||||
from supervisor.homeassistant.module import HomeAssistant
|
||||
|
||||
@@ -271,3 +274,96 @@ async def test_background_home_assistant_update_fails_fast(
|
||||
assert resp.status == 400
|
||||
body = await resp.json()
|
||||
assert body["message"] == "Version 2025.8.3 is already installed"
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data")
|
||||
async def test_api_progress_updates_home_assistant_update(
|
||||
api_client: TestClient, coresys: CoreSys, ha_ws_client: AsyncMock
|
||||
):
|
||||
"""Test progress updates sent to Home Assistant for updates."""
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
coresys.core.set_state(CoreState.RUNNING)
|
||||
coresys.docker.docker.api.pull.return_value = load_json_fixture(
|
||||
"docker_pull_image_log.json"
|
||||
)
|
||||
coresys.homeassistant.version = AwesomeVersion("2025.8.0")
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
DockerHomeAssistant,
|
||||
"version",
|
||||
new=PropertyMock(return_value=AwesomeVersion("2025.8.0")),
|
||||
),
|
||||
patch.object(
|
||||
HomeAssistantAPI, "get_config", return_value={"components": ["frontend"]}
|
||||
),
|
||||
):
|
||||
resp = await api_client.post("/core/update", json={"version": "2025.8.3"})
|
||||
|
||||
assert resp.status == 200
|
||||
|
||||
events = [
|
||||
{
|
||||
"stage": evt.args[0]["data"]["data"]["stage"],
|
||||
"progress": evt.args[0]["data"]["data"]["progress"],
|
||||
"done": evt.args[0]["data"]["data"]["done"],
|
||||
}
|
||||
for evt in ha_ws_client.async_send_command.call_args_list
|
||||
if "data" in evt.args[0]
|
||||
and evt.args[0]["data"]["event"] == WSEvent.JOB
|
||||
and evt.args[0]["data"]["data"]["name"] == "home_assistant_core_update"
|
||||
]
|
||||
assert events[:5] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0,
|
||||
"done": None,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0.1,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 1.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 2.8,
|
||||
"done": False,
|
||||
},
|
||||
]
|
||||
assert events[-5:] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 97.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.4,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 99.4,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 100,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 100,
|
||||
"done": True,
|
||||
},
|
||||
]
|
||||
|
||||
@@ -152,7 +152,7 @@ async def test_jobs_tree_representation(api_client: TestClient, coresys: CoreSys
|
||||
"name": "test_jobs_tree_alt",
|
||||
"reference": None,
|
||||
"uuid": ANY,
|
||||
"progress": 0,
|
||||
"progress": 100,
|
||||
"stage": "end",
|
||||
"done": True,
|
||||
"child_jobs": [],
|
||||
@@ -282,7 +282,7 @@ async def test_jobs_sorted(api_client: TestClient, coresys: CoreSys):
|
||||
"name": "test_jobs_sorted_2",
|
||||
"reference": None,
|
||||
"uuid": ANY,
|
||||
"progress": 0,
|
||||
"progress": 100,
|
||||
"stage": None,
|
||||
"done": True,
|
||||
"errors": [],
|
||||
@@ -294,7 +294,7 @@ async def test_jobs_sorted(api_client: TestClient, coresys: CoreSys):
|
||||
"name": "test_jobs_sorted_1",
|
||||
"reference": None,
|
||||
"uuid": ANY,
|
||||
"progress": 0,
|
||||
"progress": 100,
|
||||
"stage": None,
|
||||
"done": True,
|
||||
"errors": [],
|
||||
@@ -305,7 +305,7 @@ async def test_jobs_sorted(api_client: TestClient, coresys: CoreSys):
|
||||
"name": "test_jobs_sorted_inner_1",
|
||||
"reference": None,
|
||||
"uuid": ANY,
|
||||
"progress": 0,
|
||||
"progress": 100,
|
||||
"stage": None,
|
||||
"done": True,
|
||||
"errors": [],
|
||||
@@ -317,7 +317,7 @@ async def test_jobs_sorted(api_client: TestClient, coresys: CoreSys):
|
||||
"name": "test_jobs_sorted_inner_2",
|
||||
"reference": None,
|
||||
"uuid": ANY,
|
||||
"progress": 0,
|
||||
"progress": 100,
|
||||
"stage": None,
|
||||
"done": True,
|
||||
"errors": [],
|
||||
|
||||
@@ -17,16 +17,6 @@ async def test_api_security_options_force_security(api_client, coresys: CoreSys)
|
||||
assert coresys.security.force
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_security_options_content_trust(api_client, coresys: CoreSys):
|
||||
"""Test security options content trust."""
|
||||
assert coresys.security.content_trust
|
||||
|
||||
await api_client.post("/security/options", json={"content_trust": False})
|
||||
|
||||
assert not coresys.security.content_trust
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_security_options_pwned(api_client, coresys: CoreSys):
|
||||
"""Test security options pwned."""
|
||||
@@ -41,11 +31,8 @@ async def test_api_security_options_pwned(api_client, coresys: CoreSys):
|
||||
async def test_api_integrity_check(
|
||||
api_client, coresys: CoreSys, supervisor_internet: AsyncMock
|
||||
):
|
||||
"""Test security integrity check."""
|
||||
coresys.security.content_trust = False
|
||||
|
||||
"""Test security integrity check - now deprecated."""
|
||||
resp = await api_client.post("/security/integrity")
|
||||
result = await resp.json()
|
||||
|
||||
assert result["data"]["core"] == "untested"
|
||||
assert result["data"]["supervisor"] == "untested"
|
||||
# CodeNotary integrity check has been removed, should return 410 Gone
|
||||
assert resp.status == 410
|
||||
|
||||
@@ -13,12 +13,13 @@ from supervisor.addons.addon import Addon
|
||||
from supervisor.arch import CpuArch
|
||||
from supervisor.backups.manager import BackupManager
|
||||
from supervisor.config import CoreConfig
|
||||
from supervisor.const import AddonState
|
||||
from supervisor.const import AddonState, CoreState
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.addon import DockerAddon
|
||||
from supervisor.docker.const import ContainerState
|
||||
from supervisor.docker.interface import DockerInterface
|
||||
from supervisor.docker.monitor import DockerContainerStateEvent
|
||||
from supervisor.homeassistant.const import WSEvent
|
||||
from supervisor.homeassistant.module import HomeAssistant
|
||||
from supervisor.store.addon import AddonStore
|
||||
from supervisor.store.repository import Repository
|
||||
@@ -709,3 +710,101 @@ async def test_api_store_addons_addon_availability_installed_addon(
|
||||
assert (
|
||||
"requires Home Assistant version 2023.1.1 or greater" in result["message"]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("action", "job_name", "addon_slug"),
|
||||
[
|
||||
("install", "addon_manager_install", "local_ssh"),
|
||||
("update", "addon_manager_update", "local_example"),
|
||||
],
|
||||
)
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data")
|
||||
async def test_api_progress_updates_addon_install_update(
|
||||
api_client: TestClient,
|
||||
coresys: CoreSys,
|
||||
ha_ws_client: AsyncMock,
|
||||
install_addon_example: Addon,
|
||||
action: str,
|
||||
job_name: str,
|
||||
addon_slug: str,
|
||||
):
|
||||
"""Test progress updates sent to Home Assistant for installs/updates."""
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
coresys.core.set_state(CoreState.RUNNING)
|
||||
coresys.docker.docker.api.pull.return_value = load_json_fixture(
|
||||
"docker_pull_image_log.json"
|
||||
)
|
||||
coresys.arch._supported_arch = ["amd64"] # pylint: disable=protected-access
|
||||
install_addon_example.data_store["version"] = AwesomeVersion("2.0.0")
|
||||
|
||||
with (
|
||||
patch.object(Addon, "load"),
|
||||
patch.object(Addon, "need_build", new=PropertyMock(return_value=False)),
|
||||
patch.object(Addon, "latest_need_build", new=PropertyMock(return_value=False)),
|
||||
):
|
||||
resp = await api_client.post(f"/store/addons/{addon_slug}/{action}")
|
||||
|
||||
assert resp.status == 200
|
||||
|
||||
events = [
|
||||
{
|
||||
"stage": evt.args[0]["data"]["data"]["stage"],
|
||||
"progress": evt.args[0]["data"]["data"]["progress"],
|
||||
"done": evt.args[0]["data"]["data"]["done"],
|
||||
}
|
||||
for evt in ha_ws_client.async_send_command.call_args_list
|
||||
if "data" in evt.args[0]
|
||||
and evt.args[0]["data"]["event"] == WSEvent.JOB
|
||||
and evt.args[0]["data"]["data"]["name"] == job_name
|
||||
and evt.args[0]["data"]["data"]["reference"] == addon_slug
|
||||
]
|
||||
assert events[:4] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0.1,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 1.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 2.8,
|
||||
"done": False,
|
||||
},
|
||||
]
|
||||
assert events[-5:] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 97.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.4,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 99.4,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 100,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 100,
|
||||
"done": True,
|
||||
},
|
||||
]
|
||||
|
||||
@@ -2,17 +2,24 @@
|
||||
|
||||
# pylint: disable=protected-access
|
||||
import time
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
|
||||
|
||||
from aiohttp.test_utils import TestClient
|
||||
from awesomeversion import AwesomeVersion
|
||||
from blockbuster import BlockingError
|
||||
import pytest
|
||||
|
||||
from supervisor.const import CoreState
|
||||
from supervisor.core import Core
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.exceptions import HassioError, HostNotSupportedError, StoreGitError
|
||||
from supervisor.homeassistant.const import WSEvent
|
||||
from supervisor.store.repository import Repository
|
||||
from supervisor.supervisor import Supervisor
|
||||
from supervisor.updater import Updater
|
||||
|
||||
from tests.api import common_test_api_advanced_logs
|
||||
from tests.common import load_json_fixture
|
||||
from tests.dbus_service_mocks.base import DBusServiceMock
|
||||
from tests.dbus_service_mocks.os_agent import OSAgent as OSAgentService
|
||||
|
||||
@@ -316,3 +323,97 @@ async def test_api_supervisor_options_blocking_io(
|
||||
|
||||
# This should not raise blocking error anymore
|
||||
time.sleep(0)
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data")
|
||||
async def test_api_progress_updates_supervisor_update(
|
||||
api_client: TestClient, coresys: CoreSys, ha_ws_client: AsyncMock
|
||||
):
|
||||
"""Test progress updates sent to Home Assistant for updates."""
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
coresys.core.set_state(CoreState.RUNNING)
|
||||
coresys.docker.docker.api.pull.return_value = load_json_fixture(
|
||||
"docker_pull_image_log.json"
|
||||
)
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
Supervisor,
|
||||
"version",
|
||||
new=PropertyMock(return_value=AwesomeVersion("2025.08.0")),
|
||||
),
|
||||
patch.object(
|
||||
Updater,
|
||||
"version_supervisor",
|
||||
new=PropertyMock(return_value=AwesomeVersion("2025.08.3")),
|
||||
),
|
||||
patch.object(
|
||||
Updater, "image_supervisor", new=PropertyMock(return_value="supervisor")
|
||||
),
|
||||
patch.object(Supervisor, "update_apparmor"),
|
||||
patch.object(Core, "stop"),
|
||||
):
|
||||
resp = await api_client.post("/supervisor/update")
|
||||
|
||||
assert resp.status == 200
|
||||
|
||||
events = [
|
||||
{
|
||||
"stage": evt.args[0]["data"]["data"]["stage"],
|
||||
"progress": evt.args[0]["data"]["data"]["progress"],
|
||||
"done": evt.args[0]["data"]["data"]["done"],
|
||||
}
|
||||
for evt in ha_ws_client.async_send_command.call_args_list
|
||||
if "data" in evt.args[0]
|
||||
and evt.args[0]["data"]["event"] == WSEvent.JOB
|
||||
and evt.args[0]["data"]["data"]["name"] == "supervisor_update"
|
||||
]
|
||||
assert events[:4] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0.1,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 1.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 2.8,
|
||||
"done": False,
|
||||
},
|
||||
]
|
||||
assert events[-5:] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 97.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.4,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 99.4,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 100,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 100,
|
||||
"done": True,
|
||||
},
|
||||
]
|
||||
|
||||
@@ -1110,6 +1110,7 @@ def _make_backup_message_for_assert(
|
||||
reference: str,
|
||||
stage: str | None,
|
||||
done: bool = False,
|
||||
progress: float = 0.0,
|
||||
):
|
||||
"""Make a backup message to use for assert test."""
|
||||
return {
|
||||
@@ -1120,7 +1121,7 @@ def _make_backup_message_for_assert(
|
||||
"name": f"backup_manager_{action}",
|
||||
"reference": reference,
|
||||
"uuid": ANY,
|
||||
"progress": 0,
|
||||
"progress": progress,
|
||||
"stage": stage,
|
||||
"done": done,
|
||||
"parent_id": None,
|
||||
@@ -1132,13 +1133,12 @@ def _make_backup_message_for_assert(
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
|
||||
async def test_backup_progress(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
ha_ws_client: AsyncMock,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
):
|
||||
"""Test progress is tracked during backups."""
|
||||
container.status = "running"
|
||||
@@ -1182,7 +1182,10 @@ async def test_backup_progress(
|
||||
reference=full_backup.slug, stage="await_addon_restarts"
|
||||
),
|
||||
_make_backup_message_for_assert(
|
||||
reference=full_backup.slug, stage="await_addon_restarts", done=True
|
||||
reference=full_backup.slug,
|
||||
stage="await_addon_restarts",
|
||||
done=True,
|
||||
progress=100,
|
||||
),
|
||||
]
|
||||
|
||||
@@ -1227,18 +1230,17 @@ async def test_backup_progress(
|
||||
reference=partial_backup.slug,
|
||||
stage="finishing_file",
|
||||
done=True,
|
||||
progress=100,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("supervisor_internet", "tmp_supervisor_data", "path_extern")
|
||||
async def test_restore_progress(
|
||||
coresys: CoreSys,
|
||||
supervisor_internet,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
ha_ws_client: AsyncMock,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
):
|
||||
"""Test progress is tracked during backups."""
|
||||
container.status = "running"
|
||||
@@ -1320,6 +1322,7 @@ async def test_restore_progress(
|
||||
reference=full_backup.slug,
|
||||
stage="await_home_assistant_restart",
|
||||
done=True,
|
||||
progress=100,
|
||||
),
|
||||
]
|
||||
|
||||
@@ -1358,6 +1361,7 @@ async def test_restore_progress(
|
||||
reference=folders_backup.slug,
|
||||
stage="folders",
|
||||
done=True,
|
||||
progress=100,
|
||||
),
|
||||
]
|
||||
|
||||
@@ -1404,17 +1408,17 @@ async def test_restore_progress(
|
||||
reference=addon_backup.slug,
|
||||
stage="addons",
|
||||
done=True,
|
||||
progress=100,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
|
||||
async def test_freeze_thaw(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
ha_ws_client: AsyncMock,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
):
|
||||
"""Test manual freeze and thaw for external snapshots."""
|
||||
container.status = "running"
|
||||
@@ -1460,7 +1464,11 @@ async def test_freeze_thaw(
|
||||
action="thaw_all", reference=None, stage=None
|
||||
),
|
||||
_make_backup_message_for_assert(
|
||||
action="freeze_all", reference=None, stage="addons", done=True
|
||||
action="freeze_all",
|
||||
reference=None,
|
||||
stage="addons",
|
||||
done=True,
|
||||
progress=100,
|
||||
),
|
||||
]
|
||||
|
||||
@@ -1488,7 +1496,11 @@ async def test_freeze_thaw(
|
||||
action="thaw_all", reference=None, stage="addons"
|
||||
),
|
||||
_make_backup_message_for_assert(
|
||||
action="thaw_all", reference=None, stage="addons", done=True
|
||||
action="thaw_all",
|
||||
reference=None,
|
||||
stage="addons",
|
||||
done=True,
|
||||
progress=100,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@@ -318,7 +318,10 @@ def test_not_journald_addon(
|
||||
|
||||
|
||||
async def test_addon_run_docker_error(
|
||||
coresys: CoreSys, addonsdata_system: dict[str, Data], path_extern
|
||||
coresys: CoreSys,
|
||||
addonsdata_system: dict[str, Data],
|
||||
path_extern,
|
||||
tmp_supervisor_data: Path,
|
||||
):
|
||||
"""Test docker error when addon is run."""
|
||||
await coresys.dbus.timedate.connect(coresys.dbus.bus)
|
||||
@@ -500,3 +503,93 @@ async def test_addon_new_device_no_haos(
|
||||
await install_addon_ssh.stop()
|
||||
assert coresys.resolution.issues == []
|
||||
assert coresys.resolution.suggestions == []
|
||||
|
||||
|
||||
async def test_ulimits_integration(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
):
|
||||
"""Test ulimits integration with Docker addon."""
|
||||
docker_addon = DockerAddon(coresys, install_addon_ssh)
|
||||
|
||||
# Test default case (no ulimits, no realtime)
|
||||
assert docker_addon.ulimits is None
|
||||
|
||||
# Test with realtime enabled (should have built-in ulimits)
|
||||
install_addon_ssh.data["realtime"] = True
|
||||
ulimits = docker_addon.ulimits
|
||||
assert ulimits is not None
|
||||
assert len(ulimits) == 2
|
||||
# Check for rtprio limit
|
||||
rtprio_limit = next((u for u in ulimits if u.name == "rtprio"), None)
|
||||
assert rtprio_limit is not None
|
||||
assert rtprio_limit.soft == 90
|
||||
assert rtprio_limit.hard == 99
|
||||
# Check for memlock limit
|
||||
memlock_limit = next((u for u in ulimits if u.name == "memlock"), None)
|
||||
assert memlock_limit is not None
|
||||
assert memlock_limit.soft == 128 * 1024 * 1024
|
||||
assert memlock_limit.hard == 128 * 1024 * 1024
|
||||
|
||||
# Test with configurable ulimits (simple format)
|
||||
install_addon_ssh.data["realtime"] = False
|
||||
install_addon_ssh.data["ulimits"] = {"nofile": 65535, "nproc": 32768}
|
||||
ulimits = docker_addon.ulimits
|
||||
assert ulimits is not None
|
||||
assert len(ulimits) == 2
|
||||
|
||||
nofile_limit = next((u for u in ulimits if u.name == "nofile"), None)
|
||||
assert nofile_limit is not None
|
||||
assert nofile_limit.soft == 65535
|
||||
assert nofile_limit.hard == 65535
|
||||
|
||||
nproc_limit = next((u for u in ulimits if u.name == "nproc"), None)
|
||||
assert nproc_limit is not None
|
||||
assert nproc_limit.soft == 32768
|
||||
assert nproc_limit.hard == 32768
|
||||
|
||||
# Test with configurable ulimits (detailed format)
|
||||
install_addon_ssh.data["ulimits"] = {
|
||||
"nofile": {"soft": 20000, "hard": 40000},
|
||||
"memlock": {"soft": 67108864, "hard": 134217728},
|
||||
}
|
||||
ulimits = docker_addon.ulimits
|
||||
assert ulimits is not None
|
||||
assert len(ulimits) == 2
|
||||
|
||||
nofile_limit = next((u for u in ulimits if u.name == "nofile"), None)
|
||||
assert nofile_limit is not None
|
||||
assert nofile_limit.soft == 20000
|
||||
assert nofile_limit.hard == 40000
|
||||
|
||||
memlock_limit = next((u for u in ulimits if u.name == "memlock"), None)
|
||||
assert memlock_limit is not None
|
||||
assert memlock_limit.soft == 67108864
|
||||
assert memlock_limit.hard == 134217728
|
||||
|
||||
# Test mixed format and realtime (realtime + custom ulimits)
|
||||
install_addon_ssh.data["realtime"] = True
|
||||
install_addon_ssh.data["ulimits"] = {
|
||||
"nofile": 65535,
|
||||
"core": {"soft": 0, "hard": 0}, # Disable core dumps
|
||||
}
|
||||
ulimits = docker_addon.ulimits
|
||||
assert ulimits is not None
|
||||
assert (
|
||||
len(ulimits) == 4
|
||||
) # rtprio, memlock (from realtime) + nofile, core (from config)
|
||||
|
||||
# Check realtime limits still present
|
||||
rtprio_limit = next((u for u in ulimits if u.name == "rtprio"), None)
|
||||
assert rtprio_limit is not None
|
||||
|
||||
# Check custom limits added
|
||||
nofile_limit = next((u for u in ulimits if u.name == "nofile"), None)
|
||||
assert nofile_limit is not None
|
||||
assert nofile_limit.soft == 65535
|
||||
assert nofile_limit.hard == 65535
|
||||
|
||||
core_limit = next((u for u in ulimits if u.name == "core"), None)
|
||||
assert core_limit is not None
|
||||
assert core_limit.soft == 0
|
||||
assert core_limit.hard == 0
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Test Docker interface."""
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, PropertyMock, call, patch
|
||||
|
||||
@@ -25,21 +26,11 @@ from supervisor.exceptions import (
|
||||
DockerNotFound,
|
||||
DockerRequestError,
|
||||
)
|
||||
from supervisor.homeassistant.const import WSEvent
|
||||
from supervisor.jobs import JobSchedulerOptions, SupervisorJob
|
||||
|
||||
from tests.common import load_json_fixture
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_verify_content(coresys: CoreSys):
|
||||
"""Mock verify_content utility during tests."""
|
||||
with patch.object(
|
||||
coresys.security, "verify_content", return_value=None
|
||||
) as verify_content:
|
||||
yield verify_content
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cpu_arch, platform",
|
||||
[
|
||||
@@ -281,6 +272,7 @@ async def test_run_missing_image(
|
||||
container: MagicMock,
|
||||
capture_exception: Mock,
|
||||
path_extern,
|
||||
tmp_supervisor_data: Path,
|
||||
):
|
||||
"""Test run captures the exception when image is missing."""
|
||||
coresys.docker.containers.create.side_effect = [NotFound("missing"), MagicMock()]
|
||||
@@ -415,196 +407,17 @@ async def test_install_fires_progress_events(
|
||||
]
|
||||
|
||||
|
||||
async def test_install_sends_progress_to_home_assistant(
|
||||
coresys: CoreSys, test_docker_interface: DockerInterface, ha_ws_client: AsyncMock
|
||||
):
|
||||
"""Test progress events are sent as job updates to Home Assistant."""
|
||||
coresys.core.set_state(CoreState.RUNNING)
|
||||
coresys.docker.docker.api.pull.return_value = load_json_fixture(
|
||||
"docker_pull_image_log.json"
|
||||
)
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
type(coresys.supervisor), "arch", PropertyMock(return_value="i386")
|
||||
),
|
||||
):
|
||||
# Schedule job so we can listen for the end. Then we can assert against the WS mock
|
||||
event = asyncio.Event()
|
||||
job, install_task = coresys.jobs.schedule_job(
|
||||
test_docker_interface.install,
|
||||
JobSchedulerOptions(),
|
||||
AwesomeVersion("1.2.3"),
|
||||
"test",
|
||||
)
|
||||
|
||||
async def listen_for_job_end(reference: SupervisorJob):
|
||||
if reference.uuid != job.uuid:
|
||||
return
|
||||
event.set()
|
||||
|
||||
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
|
||||
await install_task
|
||||
await event.wait()
|
||||
|
||||
events = [
|
||||
evt.args[0]["data"]["data"]
|
||||
for evt in ha_ws_client.async_send_command.call_args_list
|
||||
if "data" in evt.args[0] and evt.args[0]["data"]["event"] == WSEvent.JOB
|
||||
]
|
||||
assert events[0]["name"] == "docker_interface_install"
|
||||
assert events[0]["uuid"] == job.uuid
|
||||
assert events[0]["done"] is None
|
||||
assert events[1]["name"] == "docker_interface_install"
|
||||
assert events[1]["uuid"] == job.uuid
|
||||
assert events[1]["done"] is False
|
||||
assert events[-1]["name"] == "docker_interface_install"
|
||||
assert events[-1]["uuid"] == job.uuid
|
||||
assert events[-1]["done"] is True
|
||||
|
||||
def make_sub_log(layer_id: str):
|
||||
return [
|
||||
{
|
||||
"stage": evt["stage"],
|
||||
"progress": evt["progress"],
|
||||
"done": evt["done"],
|
||||
"extra": evt["extra"],
|
||||
}
|
||||
for evt in events
|
||||
if evt["name"] == "Pulling container image layer"
|
||||
and evt["reference"] == layer_id
|
||||
and evt["parent_id"] == job.uuid
|
||||
]
|
||||
|
||||
layer_1_log = make_sub_log("1e214cd6d7d0")
|
||||
layer_2_log = make_sub_log("1a38e1d5e18d")
|
||||
assert len(layer_1_log) == 20
|
||||
assert len(layer_2_log) == 19
|
||||
assert len(events) == 42
|
||||
assert layer_1_log == [
|
||||
{"stage": "Pulling fs layer", "progress": 0, "done": False, "extra": None},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 0.1,
|
||||
"done": False,
|
||||
"extra": {"current": 539462, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 0.6,
|
||||
"done": False,
|
||||
"extra": {"current": 4864838, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 0.9,
|
||||
"done": False,
|
||||
"extra": {"current": 7552896, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 1.2,
|
||||
"done": False,
|
||||
"extra": {"current": 10252544, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 2.9,
|
||||
"done": False,
|
||||
"extra": {"current": 25369792, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 11.9,
|
||||
"done": False,
|
||||
"extra": {"current": 103619904, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 26.1,
|
||||
"done": False,
|
||||
"extra": {"current": 227726144, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 49.6,
|
||||
"done": False,
|
||||
"extra": {"current": 433170048, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Verifying Checksum",
|
||||
"progress": 50,
|
||||
"done": False,
|
||||
"extra": {"current": 433170048, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Download complete",
|
||||
"progress": 50,
|
||||
"done": False,
|
||||
"extra": {"current": 433170048, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Extracting",
|
||||
"progress": 50.1,
|
||||
"done": False,
|
||||
"extra": {"current": 557056, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Extracting",
|
||||
"progress": 60.3,
|
||||
"done": False,
|
||||
"extra": {"current": 89686016, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Extracting",
|
||||
"progress": 70.0,
|
||||
"done": False,
|
||||
"extra": {"current": 174358528, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Extracting",
|
||||
"progress": 80.0,
|
||||
"done": False,
|
||||
"extra": {"current": 261816320, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Extracting",
|
||||
"progress": 88.4,
|
||||
"done": False,
|
||||
"extra": {"current": 334790656, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Extracting",
|
||||
"progress": 94.0,
|
||||
"done": False,
|
||||
"extra": {"current": 383811584, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Extracting",
|
||||
"progress": 99.9,
|
||||
"done": False,
|
||||
"extra": {"current": 435617792, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Extracting",
|
||||
"progress": 100.0,
|
||||
"done": False,
|
||||
"extra": {"current": 436480882, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Pull complete",
|
||||
"progress": 100.0,
|
||||
"done": True,
|
||||
"extra": {"current": 436480882, "total": 436480882},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
async def test_install_progress_rounding_does_not_cause_misses(
|
||||
coresys: CoreSys, test_docker_interface: DockerInterface, ha_ws_client: AsyncMock
|
||||
coresys: CoreSys,
|
||||
test_docker_interface: DockerInterface,
|
||||
ha_ws_client: AsyncMock,
|
||||
capture_exception: Mock,
|
||||
):
|
||||
"""Test extremely close progress events do not create rounding issues."""
|
||||
coresys.core.set_state(CoreState.RUNNING)
|
||||
# Current numbers chosen to create a rounding issue with original code
|
||||
# Where a progress update came in with a value between the actual previous
|
||||
# value and what it was rounded to. It should not raise an out of order exception
|
||||
coresys.docker.docker.api.pull.return_value = [
|
||||
{
|
||||
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
|
||||
@@ -669,65 +482,7 @@ async def test_install_progress_rounding_does_not_cause_misses(
|
||||
await install_task
|
||||
await event.wait()
|
||||
|
||||
events = [
|
||||
evt.args[0]["data"]["data"]
|
||||
for evt in ha_ws_client.async_send_command.call_args_list
|
||||
if "data" in evt.args[0]
|
||||
and evt.args[0]["data"]["event"] == WSEvent.JOB
|
||||
and evt.args[0]["data"]["data"]["reference"] == "1e214cd6d7d0"
|
||||
and evt.args[0]["data"]["data"]["stage"] in {"Downloading", "Extracting"}
|
||||
]
|
||||
|
||||
assert events == [
|
||||
{
|
||||
"name": "Pulling container image layer",
|
||||
"stage": "Downloading",
|
||||
"progress": 49.6,
|
||||
"done": False,
|
||||
"extra": {"current": 432700000, "total": 436480882},
|
||||
"reference": "1e214cd6d7d0",
|
||||
"parent_id": job.uuid,
|
||||
"errors": [],
|
||||
"uuid": ANY,
|
||||
"created": ANY,
|
||||
},
|
||||
{
|
||||
"name": "Pulling container image layer",
|
||||
"stage": "Downloading",
|
||||
"progress": 49.6,
|
||||
"done": False,
|
||||
"extra": {"current": 432800000, "total": 436480882},
|
||||
"reference": "1e214cd6d7d0",
|
||||
"parent_id": job.uuid,
|
||||
"errors": [],
|
||||
"uuid": ANY,
|
||||
"created": ANY,
|
||||
},
|
||||
{
|
||||
"name": "Pulling container image layer",
|
||||
"stage": "Extracting",
|
||||
"progress": 99.6,
|
||||
"done": False,
|
||||
"extra": {"current": 432700000, "total": 436480882},
|
||||
"reference": "1e214cd6d7d0",
|
||||
"parent_id": job.uuid,
|
||||
"errors": [],
|
||||
"uuid": ANY,
|
||||
"created": ANY,
|
||||
},
|
||||
{
|
||||
"name": "Pulling container image layer",
|
||||
"stage": "Extracting",
|
||||
"progress": 99.6,
|
||||
"done": False,
|
||||
"extra": {"current": 432800000, "total": 436480882},
|
||||
"reference": "1e214cd6d7d0",
|
||||
"parent_id": job.uuid,
|
||||
"errors": [],
|
||||
"uuid": ANY,
|
||||
"created": ANY,
|
||||
},
|
||||
]
|
||||
capture_exception.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -777,10 +532,15 @@ async def test_install_raises_on_pull_error(
|
||||
|
||||
|
||||
async def test_install_progress_handles_download_restart(
|
||||
coresys: CoreSys, test_docker_interface: DockerInterface, ha_ws_client: AsyncMock
|
||||
coresys: CoreSys,
|
||||
test_docker_interface: DockerInterface,
|
||||
ha_ws_client: AsyncMock,
|
||||
capture_exception: Mock,
|
||||
):
|
||||
"""Test install handles docker progress events that include a download restart."""
|
||||
coresys.core.set_state(CoreState.RUNNING)
|
||||
# Fixture emulates a download restart as it docker logs it
|
||||
# A log out of order exception should not be raised
|
||||
coresys.docker.docker.api.pull.return_value = load_json_fixture(
|
||||
"docker_pull_image_log_restart.json"
|
||||
)
|
||||
@@ -808,106 +568,173 @@ async def test_install_progress_handles_download_restart(
|
||||
await install_task
|
||||
await event.wait()
|
||||
|
||||
events = [
|
||||
evt.args[0]["data"]["data"]
|
||||
for evt in ha_ws_client.async_send_command.call_args_list
|
||||
if "data" in evt.args[0] and evt.args[0]["data"]["event"] == WSEvent.JOB
|
||||
capture_exception.assert_not_called()
|
||||
|
||||
|
||||
async def test_install_progress_handles_layers_skipping_download(
|
||||
coresys: CoreSys,
|
||||
test_docker_interface: DockerInterface,
|
||||
capture_exception: Mock,
|
||||
):
|
||||
"""Test install handles small layers that skip downloading phase and go directly to download complete."""
|
||||
coresys.core.set_state(CoreState.RUNNING)
|
||||
# Simulate multiple layers where one small layer (96 bytes) skips the downloading phase
|
||||
# This layer should not block progress reporting for the parent job
|
||||
coresys.docker.docker.api.pull.return_value = [
|
||||
{"status": "Pulling from test/image", "id": "latest"},
|
||||
# Layer 1: Normal layer with downloading phase
|
||||
{"status": "Pulling fs layer", "progressDetail": {}, "id": "layer1"},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": {"current": 100, "total": 1000},
|
||||
"progress": "[=====> ] 100B/1000B",
|
||||
"id": "layer1",
|
||||
},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": {"current": 1000, "total": 1000},
|
||||
"progress": "[==================================================>] 1000B/1000B",
|
||||
"id": "layer1",
|
||||
},
|
||||
{"status": "Download complete", "progressDetail": {}, "id": "layer1"},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": {"current": 1000, "total": 1000},
|
||||
"progress": "[==================================================>] 1000B/1000B",
|
||||
"id": "layer1",
|
||||
},
|
||||
{"status": "Pull complete", "progressDetail": {}, "id": "layer1"},
|
||||
# Layer 2: Small layer that skips downloading (like 02a6e69d8d00 from the logs)
|
||||
{"status": "Pulling fs layer", "progressDetail": {}, "id": "layer2"},
|
||||
{"status": "Waiting", "progressDetail": {}, "id": "layer2"},
|
||||
# Goes straight to Download complete without Downloading phase
|
||||
{"status": "Download complete", "progressDetail": {}, "id": "layer2"},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": {"current": 96, "total": 96},
|
||||
"progress": "[==================================================>] 96B/96B",
|
||||
"id": "layer2",
|
||||
},
|
||||
{"status": "Pull complete", "progressDetail": {}, "id": "layer2"},
|
||||
{"status": "Digest: sha256:test"},
|
||||
{"status": "Status: Downloaded newer image for test/image:latest"},
|
||||
]
|
||||
|
||||
def make_sub_log(layer_id: str):
|
||||
return [
|
||||
{
|
||||
"stage": evt["stage"],
|
||||
"progress": evt["progress"],
|
||||
"done": evt["done"],
|
||||
"extra": evt["extra"],
|
||||
}
|
||||
for evt in events
|
||||
if evt["name"] == "Pulling container image layer"
|
||||
and evt["reference"] == layer_id
|
||||
and evt["parent_id"] == job.uuid
|
||||
]
|
||||
with patch.object(
|
||||
type(coresys.supervisor), "arch", PropertyMock(return_value="amd64")
|
||||
):
|
||||
# Schedule job so we can observe that it completes successfully
|
||||
event = asyncio.Event()
|
||||
job, install_task = coresys.jobs.schedule_job(
|
||||
test_docker_interface.install,
|
||||
JobSchedulerOptions(),
|
||||
AwesomeVersion("1.2.3"),
|
||||
"test",
|
||||
)
|
||||
|
||||
layer_1_log = make_sub_log("1e214cd6d7d0")
|
||||
assert len(layer_1_log) == 14
|
||||
assert layer_1_log == [
|
||||
{"stage": "Pulling fs layer", "progress": 0, "done": False, "extra": None},
|
||||
async def listen_for_job_end(reference: SupervisorJob):
|
||||
if reference.uuid != job.uuid:
|
||||
return
|
||||
event.set()
|
||||
|
||||
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
|
||||
await install_task
|
||||
await event.wait()
|
||||
|
||||
# The key assertion: Job should complete successfully without errors
|
||||
# Without the fix, layer2 would block all progress reporting until it reached Extracting,
|
||||
# preventing the aggregate progress calculation from running
|
||||
assert job.done is True
|
||||
assert job.progress == 100
|
||||
capture_exception.assert_not_called()
|
||||
|
||||
|
||||
async def test_install_progress_handles_containerd_snapshotter(
|
||||
coresys: CoreSys,
|
||||
test_docker_interface: DockerInterface,
|
||||
capture_exception: Mock,
|
||||
):
|
||||
"""Test install handles containerd snapshotter time-based progress (total=None)."""
|
||||
coresys.core.set_state(CoreState.RUNNING)
|
||||
# Containerd snapshotter reports extraction progress as time elapsed (e.g., "7 s")
|
||||
# with current=7, total=None instead of byte-based progress
|
||||
coresys.docker.docker.api.pull.return_value = [
|
||||
{"status": "Pulling from test/image", "id": "latest"},
|
||||
{"status": "Pulling fs layer", "progressDetail": {}, "id": "layer1"},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 11.9,
|
||||
"done": False,
|
||||
"extra": {"current": 103619904, "total": 436480882},
|
||||
"status": "Downloading",
|
||||
"progressDetail": {"current": 100, "total": 1000},
|
||||
"progress": "[=====> ] 100B/1000B",
|
||||
"id": "layer1",
|
||||
},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 26.1,
|
||||
"done": False,
|
||||
"extra": {"current": 227726144, "total": 436480882},
|
||||
"status": "Downloading",
|
||||
"progressDetail": {"current": 1000, "total": 1000},
|
||||
"progress": "[==================================================>] 1000B/1000B",
|
||||
"id": "layer1",
|
||||
},
|
||||
{"status": "Download complete", "progressDetail": {}, "id": "layer1"},
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": {"current": 1000, "total": 1000},
|
||||
"progress": "[==================================================>] 1000B/1000B",
|
||||
"id": "layer1",
|
||||
},
|
||||
{"status": "Pull complete", "progressDetail": {}, "id": "layer1"},
|
||||
# Layer 2: Containerd snapshotter with time-based extraction
|
||||
{"status": "Pulling fs layer", "progressDetail": {}, "id": "layer2"},
|
||||
{
|
||||
"status": "Downloading",
|
||||
"progressDetail": {"current": 50, "total": 500},
|
||||
"progress": "[=====> ] 50B/500B",
|
||||
"id": "layer2",
|
||||
},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 49.6,
|
||||
"done": False,
|
||||
"extra": {"current": 433170048, "total": 436480882},
|
||||
"status": "Downloading",
|
||||
"progressDetail": {"current": 500, "total": 500},
|
||||
"progress": "[==================================================>] 500B/500B",
|
||||
"id": "layer2",
|
||||
},
|
||||
{"status": "Download complete", "progressDetail": {}, "id": "layer2"},
|
||||
# Time-based extraction progress (containerd snapshotter)
|
||||
{
|
||||
"status": "Extracting",
|
||||
"progressDetail": {"current": 3, "total": None},
|
||||
"progress": "3 s",
|
||||
"id": "layer2",
|
||||
},
|
||||
{
|
||||
"stage": "Retrying download",
|
||||
"progress": 0,
|
||||
"done": False,
|
||||
"extra": None,
|
||||
},
|
||||
{
|
||||
"stage": "Retrying download",
|
||||
"progress": 0,
|
||||
"done": False,
|
||||
"extra": None,
|
||||
},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 11.9,
|
||||
"done": False,
|
||||
"extra": {"current": 103619904, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 26.1,
|
||||
"done": False,
|
||||
"extra": {"current": 227726144, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Downloading",
|
||||
"progress": 49.6,
|
||||
"done": False,
|
||||
"extra": {"current": 433170048, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Verifying Checksum",
|
||||
"progress": 50,
|
||||
"done": False,
|
||||
"extra": {"current": 433170048, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Download complete",
|
||||
"progress": 50,
|
||||
"done": False,
|
||||
"extra": {"current": 433170048, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Extracting",
|
||||
"progress": 80.0,
|
||||
"done": False,
|
||||
"extra": {"current": 261816320, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Extracting",
|
||||
"progress": 100.0,
|
||||
"done": False,
|
||||
"extra": {"current": 436480882, "total": 436480882},
|
||||
},
|
||||
{
|
||||
"stage": "Pull complete",
|
||||
"progress": 100.0,
|
||||
"done": True,
|
||||
"extra": {"current": 436480882, "total": 436480882},
|
||||
"status": "Extracting",
|
||||
"progressDetail": {"current": 7, "total": None},
|
||||
"progress": "7 s",
|
||||
"id": "layer2",
|
||||
},
|
||||
{"status": "Pull complete", "progressDetail": {}, "id": "layer2"},
|
||||
{"status": "Digest: sha256:test"},
|
||||
{"status": "Status: Downloaded newer image for test/image:latest"},
|
||||
]
|
||||
|
||||
with patch.object(
|
||||
type(coresys.supervisor), "arch", PropertyMock(return_value="amd64")
|
||||
):
|
||||
event = asyncio.Event()
|
||||
job, install_task = coresys.jobs.schedule_job(
|
||||
test_docker_interface.install,
|
||||
JobSchedulerOptions(),
|
||||
AwesomeVersion("1.2.3"),
|
||||
"test",
|
||||
)
|
||||
|
||||
async def listen_for_job_end(reference: SupervisorJob):
|
||||
if reference.uuid != job.uuid:
|
||||
return
|
||||
event.set()
|
||||
|
||||
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
|
||||
await install_task
|
||||
await event.wait()
|
||||
|
||||
# The key assertion: Job should complete without crashing on None total
|
||||
assert job.done is True
|
||||
assert job.progress == 100
|
||||
capture_exception.assert_not_called()
|
||||
|
||||
@@ -293,6 +293,8 @@ async def test_cidfile_cleanup_handles_oserror(
|
||||
# Mock the containers.get method and cidfile cleanup to raise OSError
|
||||
with (
|
||||
patch.object(docker.containers, "get", return_value=mock_container),
|
||||
patch("pathlib.Path.is_dir", return_value=False),
|
||||
patch("pathlib.Path.is_file", return_value=True),
|
||||
patch(
|
||||
"pathlib.Path.unlink", side_effect=OSError("File not found")
|
||||
) as mock_unlink,
|
||||
@@ -306,3 +308,46 @@ async def test_cidfile_cleanup_handles_oserror(
|
||||
|
||||
# Verify cidfile cleanup was attempted
|
||||
mock_unlink.assert_called_once_with(missing_ok=True)
|
||||
|
||||
|
||||
async def test_run_container_with_leftover_cidfile_directory(
|
||||
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
|
||||
):
|
||||
"""Test container creation removes leftover cidfile directory before creating new one.
|
||||
|
||||
This can happen when Docker auto-starts a container with restart policy
|
||||
before Supervisor could write the CID file, causing Docker to create
|
||||
the bind mount source as a directory.
|
||||
"""
|
||||
# Mock container
|
||||
mock_container = MagicMock()
|
||||
mock_container.id = "test_container_id_new"
|
||||
|
||||
container_name = "test_container"
|
||||
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
|
||||
|
||||
# Create a leftover directory (simulating Docker's behavior)
|
||||
cidfile_path.mkdir()
|
||||
assert cidfile_path.is_dir()
|
||||
|
||||
# Mock container creation
|
||||
with patch.object(
|
||||
docker.containers, "create", return_value=mock_container
|
||||
) as create_mock:
|
||||
# Execute run with a container name
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda kwrgs: docker.run(**kwrgs),
|
||||
{"image": "test_image", "tag": "latest", "name": container_name},
|
||||
)
|
||||
|
||||
# Verify container was created
|
||||
create_mock.assert_called_once()
|
||||
|
||||
# Verify new cidfile was written as a file (not directory)
|
||||
assert cidfile_path.exists()
|
||||
assert cidfile_path.is_file()
|
||||
assert cidfile_path.read_text() == mock_container.id
|
||||
|
||||
assert result == mock_container
|
||||
|
||||
@@ -376,3 +376,14 @@ async def test_try_get_nvme_life_time_missing_percent_used(
|
||||
coresys.config.path_supervisor
|
||||
)
|
||||
assert lifetime is None
|
||||
|
||||
|
||||
async def test_try_get_nvme_life_time_dbus_not_connected(coresys: CoreSys):
|
||||
"""Test getting lifetime info from an NVMe when DBUS is not connected."""
|
||||
# Set the dbus for udisks2 bus to be None, to make it forcibly disconnected.
|
||||
coresys.dbus.udisks2.dbus = None
|
||||
|
||||
lifetime = await coresys.hardware.disk.get_disk_life_time(
|
||||
coresys.config.path_supervisor
|
||||
)
|
||||
assert lifetime is None
|
||||
|
||||
@@ -7,8 +7,8 @@ import pytest
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.dbus.const import DeviceType
|
||||
from supervisor.host.configuration import Interface, VlanConfig
|
||||
from supervisor.host.const import InterfaceType
|
||||
from supervisor.host.configuration import Interface, VlanConfig, WifiConfig
|
||||
from supervisor.host.const import AuthMethod, InterfaceType, WifiMode
|
||||
|
||||
from tests.dbus_service_mocks.base import DBusServiceMock
|
||||
from tests.dbus_service_mocks.network_connection_settings import (
|
||||
@@ -291,3 +291,237 @@ async def test_equals_dbus_interface_eth0_10_real(
|
||||
|
||||
# Test should pass with matching VLAN config
|
||||
assert test_vlan_interface.equals_dbus_interface(network_interface) is True
|
||||
|
||||
|
||||
def test_map_nm_wifi_non_wireless_interface():
|
||||
"""Test _map_nm_wifi returns None for non-wireless interface."""
|
||||
# Mock non-wireless interface
|
||||
mock_interface = Mock()
|
||||
mock_interface.type = DeviceType.ETHERNET
|
||||
mock_interface.settings = Mock()
|
||||
|
||||
result = Interface._map_nm_wifi(mock_interface)
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_map_nm_wifi_no_settings():
|
||||
"""Test _map_nm_wifi returns None when interface has no settings."""
|
||||
# Mock wireless interface without settings
|
||||
mock_interface = Mock()
|
||||
mock_interface.type = DeviceType.WIRELESS
|
||||
mock_interface.settings = None
|
||||
|
||||
result = Interface._map_nm_wifi(mock_interface)
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_map_nm_wifi_open_authentication():
|
||||
"""Test _map_nm_wifi with open authentication (no security)."""
|
||||
# Mock wireless interface with open authentication
|
||||
mock_interface = Mock()
|
||||
mock_interface.type = DeviceType.WIRELESS
|
||||
mock_interface.settings = Mock()
|
||||
mock_interface.settings.wireless_security = None
|
||||
mock_interface.settings.wireless = Mock()
|
||||
mock_interface.settings.wireless.ssid = "TestSSID"
|
||||
mock_interface.settings.wireless.mode = "infrastructure"
|
||||
mock_interface.wireless = None
|
||||
mock_interface.interface_name = "wlan0"
|
||||
|
||||
result = Interface._map_nm_wifi(mock_interface)
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, WifiConfig)
|
||||
assert result.mode == WifiMode.INFRASTRUCTURE
|
||||
assert result.ssid == "TestSSID"
|
||||
assert result.auth == AuthMethod.OPEN
|
||||
assert result.psk is None
|
||||
assert result.signal is None
|
||||
|
||||
|
||||
def test_map_nm_wifi_wep_authentication():
|
||||
"""Test _map_nm_wifi with WEP authentication."""
|
||||
# Mock wireless interface with WEP authentication
|
||||
mock_interface = Mock()
|
||||
mock_interface.type = DeviceType.WIRELESS
|
||||
mock_interface.settings = Mock()
|
||||
mock_interface.settings.wireless_security = Mock()
|
||||
mock_interface.settings.wireless_security.key_mgmt = "none"
|
||||
mock_interface.settings.wireless_security.psk = None
|
||||
mock_interface.settings.wireless = Mock()
|
||||
mock_interface.settings.wireless.ssid = "WEPNetwork"
|
||||
mock_interface.settings.wireless.mode = "infrastructure"
|
||||
mock_interface.wireless = None
|
||||
mock_interface.interface_name = "wlan0"
|
||||
|
||||
result = Interface._map_nm_wifi(mock_interface)
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, WifiConfig)
|
||||
assert result.auth == AuthMethod.WEP
|
||||
assert result.ssid == "WEPNetwork"
|
||||
assert result.psk is None
|
||||
|
||||
|
||||
def test_map_nm_wifi_wpa_psk_authentication():
|
||||
"""Test _map_nm_wifi with WPA-PSK authentication."""
|
||||
# Mock wireless interface with WPA-PSK authentication
|
||||
mock_interface = Mock()
|
||||
mock_interface.type = DeviceType.WIRELESS
|
||||
mock_interface.settings = Mock()
|
||||
mock_interface.settings.wireless_security = Mock()
|
||||
mock_interface.settings.wireless_security.key_mgmt = "wpa-psk"
|
||||
mock_interface.settings.wireless_security.psk = "SecretPassword123"
|
||||
mock_interface.settings.wireless = Mock()
|
||||
mock_interface.settings.wireless.ssid = "SecureNetwork"
|
||||
mock_interface.settings.wireless.mode = "infrastructure"
|
||||
mock_interface.wireless = None
|
||||
mock_interface.interface_name = "wlan0"
|
||||
|
||||
result = Interface._map_nm_wifi(mock_interface)
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, WifiConfig)
|
||||
assert result.auth == AuthMethod.WPA_PSK
|
||||
assert result.ssid == "SecureNetwork"
|
||||
assert result.psk == "SecretPassword123"
|
||||
|
||||
|
||||
def test_map_nm_wifi_unsupported_authentication():
|
||||
"""Test _map_nm_wifi returns None for unsupported authentication method."""
|
||||
# Mock wireless interface with unsupported authentication
|
||||
mock_interface = Mock()
|
||||
mock_interface.type = DeviceType.WIRELESS
|
||||
mock_interface.settings = Mock()
|
||||
mock_interface.settings.wireless_security = Mock()
|
||||
mock_interface.settings.wireless_security.key_mgmt = "wpa-eap" # Unsupported
|
||||
mock_interface.settings.wireless = Mock()
|
||||
mock_interface.settings.wireless.ssid = "EnterpriseNetwork"
|
||||
mock_interface.interface_name = "wlan0"
|
||||
|
||||
result = Interface._map_nm_wifi(mock_interface)
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_map_nm_wifi_different_modes():
|
||||
"""Test _map_nm_wifi with different wifi modes."""
|
||||
modes_to_test = [
|
||||
("infrastructure", WifiMode.INFRASTRUCTURE),
|
||||
("mesh", WifiMode.MESH),
|
||||
("adhoc", WifiMode.ADHOC),
|
||||
("ap", WifiMode.AP),
|
||||
]
|
||||
|
||||
for mode_value, expected_mode in modes_to_test:
|
||||
mock_interface = Mock()
|
||||
mock_interface.type = DeviceType.WIRELESS
|
||||
mock_interface.settings = Mock()
|
||||
mock_interface.settings.wireless_security = None
|
||||
mock_interface.settings.wireless = Mock()
|
||||
mock_interface.settings.wireless.ssid = "TestSSID"
|
||||
mock_interface.settings.wireless.mode = mode_value
|
||||
mock_interface.wireless = None
|
||||
mock_interface.interface_name = "wlan0"
|
||||
|
||||
result = Interface._map_nm_wifi(mock_interface)
|
||||
|
||||
assert result is not None
|
||||
assert result.mode == expected_mode
|
||||
|
||||
|
||||
def test_map_nm_wifi_with_signal():
|
||||
"""Test _map_nm_wifi with wireless signal strength."""
|
||||
# Mock wireless interface with active connection and signal
|
||||
mock_interface = Mock()
|
||||
mock_interface.type = DeviceType.WIRELESS
|
||||
mock_interface.settings = Mock()
|
||||
mock_interface.settings.wireless_security = None
|
||||
mock_interface.settings.wireless = Mock()
|
||||
mock_interface.settings.wireless.ssid = "TestSSID"
|
||||
mock_interface.settings.wireless.mode = "infrastructure"
|
||||
mock_interface.wireless = Mock()
|
||||
mock_interface.wireless.active = Mock()
|
||||
mock_interface.wireless.active.strength = 75
|
||||
mock_interface.interface_name = "wlan0"
|
||||
|
||||
result = Interface._map_nm_wifi(mock_interface)
|
||||
|
||||
assert result is not None
|
||||
assert result.signal == 75
|
||||
|
||||
|
||||
def test_map_nm_wifi_without_signal():
|
||||
"""Test _map_nm_wifi without wireless signal (no active connection)."""
|
||||
# Mock wireless interface without active connection
|
||||
mock_interface = Mock()
|
||||
mock_interface.type = DeviceType.WIRELESS
|
||||
mock_interface.settings = Mock()
|
||||
mock_interface.settings.wireless_security = None
|
||||
mock_interface.settings.wireless = Mock()
|
||||
mock_interface.settings.wireless.ssid = "TestSSID"
|
||||
mock_interface.settings.wireless.mode = "infrastructure"
|
||||
mock_interface.wireless = None
|
||||
mock_interface.interface_name = "wlan0"
|
||||
|
||||
result = Interface._map_nm_wifi(mock_interface)
|
||||
|
||||
assert result is not None
|
||||
assert result.signal is None
|
||||
|
||||
|
||||
def test_map_nm_wifi_wireless_no_active_ap():
|
||||
"""Test _map_nm_wifi with wireless object but no active access point."""
|
||||
# Mock wireless interface with wireless object but no active AP
|
||||
mock_interface = Mock()
|
||||
mock_interface.type = DeviceType.WIRELESS
|
||||
mock_interface.settings = Mock()
|
||||
mock_interface.settings.wireless_security = None
|
||||
mock_interface.settings.wireless = Mock()
|
||||
mock_interface.settings.wireless.ssid = "TestSSID"
|
||||
mock_interface.settings.wireless.mode = "infrastructure"
|
||||
mock_interface.wireless = Mock()
|
||||
mock_interface.wireless.active = None
|
||||
mock_interface.interface_name = "wlan0"
|
||||
|
||||
result = Interface._map_nm_wifi(mock_interface)
|
||||
|
||||
assert result is not None
|
||||
assert result.signal is None
|
||||
|
||||
|
||||
def test_map_nm_wifi_no_wireless_settings():
|
||||
"""Test _map_nm_wifi when wireless settings are missing."""
|
||||
# Mock wireless interface without wireless settings
|
||||
mock_interface = Mock()
|
||||
mock_interface.type = DeviceType.WIRELESS
|
||||
mock_interface.settings = Mock()
|
||||
mock_interface.settings.wireless_security = None
|
||||
mock_interface.settings.wireless = None
|
||||
mock_interface.wireless = None
|
||||
mock_interface.interface_name = "wlan0"
|
||||
|
||||
result = Interface._map_nm_wifi(mock_interface)
|
||||
|
||||
assert result is not None
|
||||
assert result.ssid == ""
|
||||
assert result.mode == WifiMode.INFRASTRUCTURE # Default mode
|
||||
|
||||
|
||||
def test_map_nm_wifi_no_wireless_mode():
|
||||
"""Test _map_nm_wifi when wireless mode is not specified."""
|
||||
# Mock wireless interface without mode specified
|
||||
mock_interface = Mock()
|
||||
mock_interface.type = DeviceType.WIRELESS
|
||||
mock_interface.settings = Mock()
|
||||
mock_interface.settings.wireless_security = None
|
||||
mock_interface.settings.wireless = Mock()
|
||||
mock_interface.settings.wireless.ssid = "TestSSID"
|
||||
mock_interface.settings.wireless.mode = None
|
||||
mock_interface.wireless = None
|
||||
mock_interface.interface_name = "wlan0"
|
||||
|
||||
result = Interface._map_nm_wifi(mock_interface)
|
||||
|
||||
assert result is not None
|
||||
assert result.mode == WifiMode.INFRASTRUCTURE # Default mode
|
||||
|
||||
@@ -19,7 +19,7 @@ from supervisor.exceptions import (
|
||||
)
|
||||
from supervisor.host.const import HostFeature
|
||||
from supervisor.host.manager import HostManager
|
||||
from supervisor.jobs import JobSchedulerOptions, SupervisorJob
|
||||
from supervisor.jobs import ChildJobSyncFilter, JobSchedulerOptions, SupervisorJob
|
||||
from supervisor.jobs.const import JobConcurrency, JobThrottle
|
||||
from supervisor.jobs.decorator import Job, JobCondition
|
||||
from supervisor.jobs.job_group import JobGroup
|
||||
@@ -1003,7 +1003,7 @@ async def test_internal_jobs_no_notify(coresys: CoreSys, ha_ws_client: AsyncMock
|
||||
"name": "test_internal_jobs_no_notify_default",
|
||||
"reference": None,
|
||||
"uuid": ANY,
|
||||
"progress": 0,
|
||||
"progress": 100,
|
||||
"stage": None,
|
||||
"done": True,
|
||||
"parent_id": None,
|
||||
@@ -1415,3 +1415,87 @@ async def test_core_supported(coresys: CoreSys, caplog: pytest.LogCaptureFixture
|
||||
|
||||
coresys.jobs.ignore_conditions = [JobCondition.HOME_ASSISTANT_CORE_SUPPORTED]
|
||||
assert await test.execute()
|
||||
|
||||
|
||||
async def test_progress_syncing(coresys: CoreSys):
|
||||
"""Test progress syncing from child jobs to parent."""
|
||||
group_child_event = asyncio.Event()
|
||||
child_event = asyncio.Event()
|
||||
execute_event = asyncio.Event()
|
||||
main_event = asyncio.Event()
|
||||
|
||||
class TestClassGroup(JobGroup):
|
||||
"""Test class group."""
|
||||
|
||||
def __init__(self, coresys: CoreSys) -> None:
|
||||
super().__init__(coresys, "test_class_group", "test")
|
||||
|
||||
@Job(name="test_progress_syncing_group_child", internal=True)
|
||||
async def test_progress_syncing_group_child(self):
|
||||
"""Test progress syncing group child."""
|
||||
coresys.jobs.current.progress = 50
|
||||
main_event.set()
|
||||
await group_child_event.wait()
|
||||
coresys.jobs.current.progress = 100
|
||||
|
||||
class TestClass:
|
||||
"""Test class."""
|
||||
|
||||
def __init__(self, coresys: CoreSys):
|
||||
"""Initialize the test class."""
|
||||
self.coresys = coresys
|
||||
self.test_group = TestClassGroup(coresys)
|
||||
|
||||
@Job(
|
||||
name="test_progress_syncing_execute",
|
||||
child_job_syncs=[
|
||||
ChildJobSyncFilter(
|
||||
"test_progress_syncing_child_execute", progress_allocation=0.5
|
||||
),
|
||||
ChildJobSyncFilter(
|
||||
"test_progress_syncing_group_child",
|
||||
reference="test",
|
||||
progress_allocation=0.5,
|
||||
),
|
||||
],
|
||||
)
|
||||
async def test_progress_syncing_execute(self):
|
||||
"""Test progress syncing execute."""
|
||||
await self.test_progress_syncing_child_execute()
|
||||
await self.test_group.test_progress_syncing_group_child()
|
||||
main_event.set()
|
||||
await execute_event.wait()
|
||||
|
||||
@Job(name="test_progress_syncing_child_execute", internal=True)
|
||||
async def test_progress_syncing_child_execute(self):
|
||||
"""Test progress syncing child execute."""
|
||||
coresys.jobs.current.progress = 50
|
||||
main_event.set()
|
||||
await child_event.wait()
|
||||
coresys.jobs.current.progress = 100
|
||||
|
||||
test = TestClass(coresys)
|
||||
job, task = coresys.jobs.schedule_job(
|
||||
test.test_progress_syncing_execute, JobSchedulerOptions()
|
||||
)
|
||||
|
||||
# First child should've set parent job to 25% progress
|
||||
await main_event.wait()
|
||||
assert job.progress == 25
|
||||
|
||||
# Now we run to middle of second job which should put us at 75%
|
||||
main_event.clear()
|
||||
child_event.set()
|
||||
await main_event.wait()
|
||||
assert job.progress == 75
|
||||
|
||||
# Finally let it run to the end and see progress is 100%
|
||||
main_event.clear()
|
||||
group_child_event.set()
|
||||
await main_event.wait()
|
||||
assert job.progress == 100
|
||||
|
||||
# Release and check it is done
|
||||
execute_event.set()
|
||||
await task
|
||||
assert job.done
|
||||
|
||||
@@ -198,7 +198,7 @@ async def test_notify_on_change(coresys: CoreSys, ha_ws_client: AsyncMock):
|
||||
"errors": [
|
||||
{
|
||||
"type": "HassioError",
|
||||
"message": "Unknown error, see supervisor logs",
|
||||
"message": "Unknown error, see Supervisor logs (check with 'ha supervisor logs')",
|
||||
"stage": "test",
|
||||
}
|
||||
],
|
||||
@@ -219,14 +219,14 @@ async def test_notify_on_change(coresys: CoreSys, ha_ws_client: AsyncMock):
|
||||
"name": TEST_JOB,
|
||||
"reference": "test",
|
||||
"uuid": ANY,
|
||||
"progress": 50,
|
||||
"progress": 100,
|
||||
"stage": "test",
|
||||
"done": True,
|
||||
"parent_id": None,
|
||||
"errors": [
|
||||
{
|
||||
"type": "HassioError",
|
||||
"message": "Unknown error, see supervisor logs",
|
||||
"message": "Unknown error, see Supervisor logs (check with 'ha supervisor logs')",
|
||||
"stage": "test",
|
||||
}
|
||||
],
|
||||
|
||||
@@ -181,7 +181,6 @@ async def test_reload_updater_triggers_supervisor_update(
|
||||
"""Test an updater reload triggers a supervisor update if there is one."""
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
await coresys.core.set_state(CoreState.RUNNING)
|
||||
coresys.security.content_trust = False
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
|
||||
@@ -17,7 +17,6 @@ from supervisor.exceptions import (
|
||||
AudioJobError,
|
||||
CliError,
|
||||
CliJobError,
|
||||
CodeNotaryUntrusted,
|
||||
CoreDNSError,
|
||||
CoreDNSJobError,
|
||||
DockerError,
|
||||
@@ -337,14 +336,12 @@ async def test_repair_failed(
|
||||
patch.object(
|
||||
DockerInterface, "arch", new=PropertyMock(return_value=CpuArch.AMD64)
|
||||
),
|
||||
patch(
|
||||
"supervisor.security.module.cas_validate", side_effect=CodeNotaryUntrusted
|
||||
),
|
||||
patch.object(DockerInterface, "install", side_effect=DockerError),
|
||||
):
|
||||
await plugin.repair()
|
||||
|
||||
capture_exception.assert_called_once()
|
||||
assert check_exception_chain(capture_exception.call_args[0][0], CodeNotaryUntrusted)
|
||||
assert check_exception_chain(capture_exception.call_args[0][0], DockerError)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -51,7 +51,6 @@ async def test_if_check_make_issue(coresys: CoreSys):
|
||||
"""Test check for setup."""
|
||||
free_space = Issue(IssueType.FREE_SPACE, ContextType.SYSTEM)
|
||||
await coresys.core.set_state(CoreState.RUNNING)
|
||||
coresys.security.content_trust = False
|
||||
|
||||
with patch("shutil.disk_usage", return_value=(1, 1, 1)):
|
||||
await coresys.resolution.check.check_system()
|
||||
@@ -63,7 +62,6 @@ async def test_if_check_cleanup_issue(coresys: CoreSys):
|
||||
"""Test check for setup."""
|
||||
free_space = Issue(IssueType.FREE_SPACE, ContextType.SYSTEM)
|
||||
await coresys.core.set_state(CoreState.RUNNING)
|
||||
coresys.security.content_trust = False
|
||||
|
||||
with patch("shutil.disk_usage", return_value=(1, 1, 1)):
|
||||
await coresys.resolution.check.check_system()
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
"""Test Check Supervisor trust."""
|
||||
|
||||
# pylint: disable=import-error,protected-access
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
from supervisor.const import CoreState
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.exceptions import CodeNotaryError, CodeNotaryUntrusted
|
||||
from supervisor.resolution.checks.supervisor_trust import CheckSupervisorTrust
|
||||
from supervisor.resolution.const import IssueType, UnhealthyReason
|
||||
|
||||
|
||||
async def test_base(coresys: CoreSys):
|
||||
"""Test check basics."""
|
||||
supervisor_trust = CheckSupervisorTrust(coresys)
|
||||
assert supervisor_trust.slug == "supervisor_trust"
|
||||
assert supervisor_trust.enabled
|
||||
|
||||
|
||||
async def test_check(coresys: CoreSys):
|
||||
"""Test check."""
|
||||
supervisor_trust = CheckSupervisorTrust(coresys)
|
||||
await coresys.core.set_state(CoreState.RUNNING)
|
||||
|
||||
assert len(coresys.resolution.issues) == 0
|
||||
|
||||
coresys.supervisor.check_trust = AsyncMock(side_effect=CodeNotaryError)
|
||||
await supervisor_trust.run_check()
|
||||
assert coresys.supervisor.check_trust.called
|
||||
|
||||
coresys.supervisor.check_trust = AsyncMock(return_value=None)
|
||||
await supervisor_trust.run_check()
|
||||
assert coresys.supervisor.check_trust.called
|
||||
|
||||
assert len(coresys.resolution.issues) == 0
|
||||
|
||||
coresys.supervisor.check_trust = AsyncMock(side_effect=CodeNotaryUntrusted)
|
||||
await supervisor_trust.run_check()
|
||||
assert coresys.supervisor.check_trust.called
|
||||
|
||||
assert len(coresys.resolution.issues) == 1
|
||||
assert coresys.resolution.issues[-1].type == IssueType.TRUST
|
||||
|
||||
assert UnhealthyReason.UNTRUSTED in coresys.resolution.unhealthy
|
||||
|
||||
|
||||
async def test_approve(coresys: CoreSys):
|
||||
"""Test check."""
|
||||
supervisor_trust = CheckSupervisorTrust(coresys)
|
||||
await coresys.core.set_state(CoreState.RUNNING)
|
||||
|
||||
coresys.supervisor.check_trust = AsyncMock(side_effect=CodeNotaryUntrusted)
|
||||
assert await supervisor_trust.approve_check()
|
||||
|
||||
coresys.supervisor.check_trust = AsyncMock(return_value=None)
|
||||
assert not await supervisor_trust.approve_check()
|
||||
|
||||
|
||||
async def test_with_global_disable(coresys: CoreSys, caplog):
|
||||
"""Test when pwned is globally disabled."""
|
||||
coresys.security.content_trust = False
|
||||
supervisor_trust = CheckSupervisorTrust(coresys)
|
||||
await coresys.core.set_state(CoreState.RUNNING)
|
||||
|
||||
assert len(coresys.resolution.issues) == 0
|
||||
coresys.security.verify_own_content = AsyncMock(side_effect=CodeNotaryUntrusted)
|
||||
await supervisor_trust.run_check()
|
||||
assert not coresys.security.verify_own_content.called
|
||||
assert (
|
||||
"Skipping supervisor_trust, content_trust is globally disabled" in caplog.text
|
||||
)
|
||||
|
||||
|
||||
async def test_did_run(coresys: CoreSys):
|
||||
"""Test that the check ran as expected."""
|
||||
supervisor_trust = CheckSupervisorTrust(coresys)
|
||||
should_run = supervisor_trust.states
|
||||
should_not_run = [state for state in CoreState if state not in should_run]
|
||||
assert len(should_run) != 0
|
||||
assert len(should_not_run) != 0
|
||||
|
||||
with patch(
|
||||
"supervisor.resolution.checks.supervisor_trust.CheckSupervisorTrust.run_check",
|
||||
return_value=None,
|
||||
) as check:
|
||||
for state in should_run:
|
||||
await coresys.core.set_state(state)
|
||||
await supervisor_trust()
|
||||
check.assert_called_once()
|
||||
check.reset_mock()
|
||||
|
||||
for state in should_not_run:
|
||||
await coresys.core.set_state(state)
|
||||
await supervisor_trust()
|
||||
check.assert_not_called()
|
||||
check.reset_mock()
|
||||
@@ -1,46 +0,0 @@
|
||||
"""Test evaluation base."""
|
||||
|
||||
# pylint: disable=import-error,protected-access
|
||||
from unittest.mock import patch
|
||||
|
||||
from supervisor.const import CoreState
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.resolution.evaluations.content_trust import EvaluateContentTrust
|
||||
|
||||
|
||||
async def test_evaluation(coresys: CoreSys):
|
||||
"""Test evaluation."""
|
||||
job_conditions = EvaluateContentTrust(coresys)
|
||||
await coresys.core.set_state(CoreState.SETUP)
|
||||
|
||||
await job_conditions()
|
||||
assert job_conditions.reason not in coresys.resolution.unsupported
|
||||
|
||||
coresys.security.content_trust = False
|
||||
await job_conditions()
|
||||
assert job_conditions.reason in coresys.resolution.unsupported
|
||||
|
||||
|
||||
async def test_did_run(coresys: CoreSys):
|
||||
"""Test that the evaluation ran as expected."""
|
||||
job_conditions = EvaluateContentTrust(coresys)
|
||||
should_run = job_conditions.states
|
||||
should_not_run = [state for state in CoreState if state not in should_run]
|
||||
assert len(should_run) != 0
|
||||
assert len(should_not_run) != 0
|
||||
|
||||
with patch(
|
||||
"supervisor.resolution.evaluations.content_trust.EvaluateContentTrust.evaluate",
|
||||
return_value=None,
|
||||
) as evaluate:
|
||||
for state in should_run:
|
||||
await coresys.core.set_state(state)
|
||||
await job_conditions()
|
||||
evaluate.assert_called_once()
|
||||
evaluate.reset_mock()
|
||||
|
||||
for state in should_not_run:
|
||||
await coresys.core.set_state(state)
|
||||
await job_conditions()
|
||||
evaluate.assert_not_called()
|
||||
evaluate.reset_mock()
|
||||
@@ -25,13 +25,18 @@ async def test_evaluation(coresys: CoreSys):
|
||||
assert docker_configuration.reason in coresys.resolution.unsupported
|
||||
coresys.resolution.unsupported.clear()
|
||||
|
||||
coresys.docker.info.storage = EXPECTED_STORAGE
|
||||
coresys.docker.info.storage = EXPECTED_STORAGE[0]
|
||||
coresys.docker.info.logging = "unsupported"
|
||||
await docker_configuration()
|
||||
assert docker_configuration.reason in coresys.resolution.unsupported
|
||||
coresys.resolution.unsupported.clear()
|
||||
|
||||
coresys.docker.info.storage = EXPECTED_STORAGE
|
||||
coresys.docker.info.storage = "overlay2"
|
||||
coresys.docker.info.logging = EXPECTED_LOGGING
|
||||
await docker_configuration()
|
||||
assert docker_configuration.reason not in coresys.resolution.unsupported
|
||||
|
||||
coresys.docker.info.storage = "overlayfs"
|
||||
coresys.docker.info.logging = EXPECTED_LOGGING
|
||||
await docker_configuration()
|
||||
assert docker_configuration.reason not in coresys.resolution.unsupported
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
"""Test evaluation base."""
|
||||
|
||||
# pylint: disable=import-error,protected-access
|
||||
import errno
|
||||
import os
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
from supervisor.const import CoreState
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.exceptions import CodeNotaryError, CodeNotaryUntrusted
|
||||
from supervisor.resolution.const import ContextType, IssueType
|
||||
from supervisor.resolution.data import Issue
|
||||
from supervisor.resolution.evaluations.source_mods import EvaluateSourceMods
|
||||
|
||||
|
||||
async def test_evaluation(coresys: CoreSys):
|
||||
"""Test evaluation."""
|
||||
with patch(
|
||||
"supervisor.resolution.evaluations.source_mods._SUPERVISOR_SOURCE",
|
||||
Path(f"{os.getcwd()}/supervisor"),
|
||||
):
|
||||
sourcemods = EvaluateSourceMods(coresys)
|
||||
await coresys.core.set_state(CoreState.RUNNING)
|
||||
|
||||
assert sourcemods.reason not in coresys.resolution.unsupported
|
||||
coresys.security.verify_own_content = AsyncMock(side_effect=CodeNotaryUntrusted)
|
||||
await sourcemods()
|
||||
assert sourcemods.reason in coresys.resolution.unsupported
|
||||
|
||||
coresys.security.verify_own_content = AsyncMock(side_effect=CodeNotaryError)
|
||||
await sourcemods()
|
||||
assert sourcemods.reason not in coresys.resolution.unsupported
|
||||
|
||||
coresys.security.verify_own_content = AsyncMock()
|
||||
await sourcemods()
|
||||
assert sourcemods.reason not in coresys.resolution.unsupported
|
||||
|
||||
|
||||
async def test_did_run(coresys: CoreSys):
|
||||
"""Test that the evaluation ran as expected."""
|
||||
sourcemods = EvaluateSourceMods(coresys)
|
||||
should_run = sourcemods.states
|
||||
should_not_run = [state for state in CoreState if state not in should_run]
|
||||
assert len(should_run) != 0
|
||||
assert len(should_not_run) != 0
|
||||
|
||||
with patch(
|
||||
"supervisor.resolution.evaluations.source_mods.EvaluateSourceMods.evaluate",
|
||||
return_value=None,
|
||||
) as evaluate:
|
||||
for state in should_run:
|
||||
await coresys.core.set_state(state)
|
||||
await sourcemods()
|
||||
evaluate.assert_called_once()
|
||||
evaluate.reset_mock()
|
||||
|
||||
for state in should_not_run:
|
||||
await coresys.core.set_state(state)
|
||||
await sourcemods()
|
||||
evaluate.assert_not_called()
|
||||
evaluate.reset_mock()
|
||||
|
||||
|
||||
async def test_evaluation_error(coresys: CoreSys):
|
||||
"""Test error reading file during evaluation."""
|
||||
sourcemods = EvaluateSourceMods(coresys)
|
||||
await coresys.core.set_state(CoreState.RUNNING)
|
||||
corrupt_fs = Issue(IssueType.CORRUPT_FILESYSTEM, ContextType.SYSTEM)
|
||||
|
||||
assert sourcemods.reason not in coresys.resolution.unsupported
|
||||
assert corrupt_fs not in coresys.resolution.issues
|
||||
|
||||
with patch(
|
||||
"supervisor.utils.codenotary.dirhash",
|
||||
side_effect=(err := OSError()),
|
||||
):
|
||||
err.errno = errno.EBUSY
|
||||
await sourcemods()
|
||||
assert sourcemods.reason not in coresys.resolution.unsupported
|
||||
assert corrupt_fs in coresys.resolution.issues
|
||||
assert coresys.core.healthy is True
|
||||
|
||||
coresys.resolution.dismiss_issue(corrupt_fs)
|
||||
err.errno = errno.EBADMSG
|
||||
await sourcemods()
|
||||
assert sourcemods.reason not in coresys.resolution.unsupported
|
||||
assert corrupt_fs in coresys.resolution.issues
|
||||
assert coresys.core.healthy is False
|
||||
@@ -1,69 +0,0 @@
|
||||
"""Test evaluation base."""
|
||||
|
||||
# pylint: disable=import-error,protected-access
|
||||
from datetime import timedelta
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import time_machine
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
||||
from supervisor.resolution.data import Issue, Suggestion
|
||||
from supervisor.resolution.fixups.system_execute_integrity import (
|
||||
FixupSystemExecuteIntegrity,
|
||||
)
|
||||
from supervisor.security.const import ContentTrustResult, IntegrityResult
|
||||
from supervisor.utils.dt import utcnow
|
||||
|
||||
|
||||
async def test_fixup(coresys: CoreSys, supervisor_internet: AsyncMock):
|
||||
"""Test fixup."""
|
||||
system_execute_integrity = FixupSystemExecuteIntegrity(coresys)
|
||||
|
||||
assert system_execute_integrity.auto
|
||||
|
||||
coresys.resolution.add_suggestion(
|
||||
Suggestion(SuggestionType.EXECUTE_INTEGRITY, ContextType.SYSTEM)
|
||||
)
|
||||
coresys.resolution.add_issue(Issue(IssueType.TRUST, ContextType.SYSTEM))
|
||||
|
||||
coresys.security.integrity_check = AsyncMock(
|
||||
return_value=IntegrityResult(
|
||||
ContentTrustResult.PASS,
|
||||
ContentTrustResult.PASS,
|
||||
{"audio": ContentTrustResult.PASS},
|
||||
)
|
||||
)
|
||||
|
||||
await system_execute_integrity()
|
||||
|
||||
assert coresys.security.integrity_check.called
|
||||
assert len(coresys.resolution.suggestions) == 0
|
||||
assert len(coresys.resolution.issues) == 0
|
||||
|
||||
|
||||
async def test_fixup_error(coresys: CoreSys, supervisor_internet: AsyncMock):
|
||||
"""Test fixup."""
|
||||
system_execute_integrity = FixupSystemExecuteIntegrity(coresys)
|
||||
|
||||
assert system_execute_integrity.auto
|
||||
|
||||
coresys.resolution.add_suggestion(
|
||||
Suggestion(SuggestionType.EXECUTE_INTEGRITY, ContextType.SYSTEM)
|
||||
)
|
||||
coresys.resolution.add_issue(Issue(IssueType.TRUST, ContextType.SYSTEM))
|
||||
|
||||
coresys.security.integrity_check = AsyncMock(
|
||||
return_value=IntegrityResult(
|
||||
ContentTrustResult.FAILED,
|
||||
ContentTrustResult.PASS,
|
||||
{"audio": ContentTrustResult.PASS},
|
||||
)
|
||||
)
|
||||
|
||||
with time_machine.travel(utcnow() + timedelta(hours=24)):
|
||||
await system_execute_integrity()
|
||||
|
||||
assert coresys.security.integrity_check.called
|
||||
assert len(coresys.resolution.suggestions) == 1
|
||||
assert len(coresys.resolution.issues) == 1
|
||||
@@ -1,21 +1,15 @@
|
||||
"""Test evaluations."""
|
||||
|
||||
from unittest.mock import Mock, patch
|
||||
from unittest.mock import Mock
|
||||
|
||||
from supervisor.const import CoreState
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.utils import check_exception_chain
|
||||
|
||||
|
||||
async def test_evaluate_system_error(coresys: CoreSys, capture_exception: Mock):
|
||||
"""Test error while evaluating system."""
|
||||
await coresys.core.set_state(CoreState.RUNNING)
|
||||
|
||||
with patch(
|
||||
"supervisor.resolution.evaluations.source_mods.calc_checksum_path_sourcecode",
|
||||
side_effect=RuntimeError,
|
||||
):
|
||||
await coresys.resolution.evaluate.evaluate_system()
|
||||
await coresys.resolution.evaluate.evaluate_system()
|
||||
|
||||
capture_exception.assert_called_once()
|
||||
assert check_exception_chain(capture_exception.call_args[0][0], RuntimeError)
|
||||
capture_exception.assert_not_called()
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
"""Testing handling with Security."""
|
||||
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.exceptions import CodeNotaryError, CodeNotaryUntrusted
|
||||
from supervisor.security.const import ContentTrustResult
|
||||
|
||||
|
||||
async def test_content_trust(coresys: CoreSys):
|
||||
"""Test Content-Trust."""
|
||||
|
||||
with patch("supervisor.security.module.cas_validate", AsyncMock()) as cas_validate:
|
||||
await coresys.security.verify_content("test@mail.com", "ffffffffffffff")
|
||||
assert cas_validate.called
|
||||
cas_validate.assert_called_once_with("test@mail.com", "ffffffffffffff")
|
||||
|
||||
with patch(
|
||||
"supervisor.security.module.cas_validate", AsyncMock()
|
||||
) as cas_validate:
|
||||
await coresys.security.verify_own_content("ffffffffffffff")
|
||||
assert cas_validate.called
|
||||
cas_validate.assert_called_once_with(
|
||||
"notary@home-assistant.io", "ffffffffffffff"
|
||||
)
|
||||
|
||||
|
||||
async def test_disabled_content_trust(coresys: CoreSys):
|
||||
"""Test Content-Trust."""
|
||||
coresys.security.content_trust = False
|
||||
|
||||
with patch("supervisor.security.module.cas_validate", AsyncMock()) as cas_validate:
|
||||
await coresys.security.verify_content("test@mail.com", "ffffffffffffff")
|
||||
assert not cas_validate.called
|
||||
|
||||
with patch("supervisor.security.module.cas_validate", AsyncMock()) as cas_validate:
|
||||
await coresys.security.verify_own_content("ffffffffffffff")
|
||||
assert not cas_validate.called
|
||||
|
||||
|
||||
async def test_force_content_trust(coresys: CoreSys):
|
||||
"""Force Content-Trust tests."""
|
||||
|
||||
with patch(
|
||||
"supervisor.security.module.cas_validate",
|
||||
AsyncMock(side_effect=CodeNotaryError),
|
||||
) as cas_validate:
|
||||
await coresys.security.verify_content("test@mail.com", "ffffffffffffff")
|
||||
assert cas_validate.called
|
||||
cas_validate.assert_called_once_with("test@mail.com", "ffffffffffffff")
|
||||
|
||||
coresys.security.force = True
|
||||
|
||||
with (
|
||||
patch(
|
||||
"supervisor.security.module.cas_validate",
|
||||
AsyncMock(side_effect=CodeNotaryError),
|
||||
) as cas_validate,
|
||||
pytest.raises(CodeNotaryError),
|
||||
):
|
||||
await coresys.security.verify_content("test@mail.com", "ffffffffffffff")
|
||||
|
||||
|
||||
async def test_integrity_check_disabled(coresys: CoreSys):
|
||||
"""Test integrity check with disabled content trust."""
|
||||
coresys.security.content_trust = False
|
||||
|
||||
result = await coresys.security.integrity_check.__wrapped__(coresys.security)
|
||||
|
||||
assert result.core == ContentTrustResult.UNTESTED
|
||||
assert result.supervisor == ContentTrustResult.UNTESTED
|
||||
|
||||
|
||||
async def test_integrity_check(coresys: CoreSys, install_addon_ssh):
|
||||
"""Test integrity check with content trust."""
|
||||
coresys.homeassistant.core.check_trust = AsyncMock()
|
||||
coresys.supervisor.check_trust = AsyncMock()
|
||||
install_addon_ssh.check_trust = AsyncMock()
|
||||
install_addon_ssh.data["codenotary"] = "test@example.com"
|
||||
|
||||
result = await coresys.security.integrity_check.__wrapped__(coresys.security)
|
||||
|
||||
assert result.core == ContentTrustResult.PASS
|
||||
assert result.supervisor == ContentTrustResult.PASS
|
||||
assert result.addons[install_addon_ssh.slug] == ContentTrustResult.PASS
|
||||
|
||||
|
||||
async def test_integrity_check_error(coresys: CoreSys, install_addon_ssh):
|
||||
"""Test integrity check with content trust issues."""
|
||||
coresys.homeassistant.core.check_trust = AsyncMock(side_effect=CodeNotaryUntrusted)
|
||||
coresys.supervisor.check_trust = AsyncMock(side_effect=CodeNotaryUntrusted)
|
||||
install_addon_ssh.check_trust = AsyncMock(side_effect=CodeNotaryUntrusted)
|
||||
install_addon_ssh.data["codenotary"] = "test@example.com"
|
||||
|
||||
result = await coresys.security.integrity_check.__wrapped__(coresys.security)
|
||||
|
||||
assert result.core == ContentTrustResult.ERROR
|
||||
assert result.supervisor == ContentTrustResult.ERROR
|
||||
assert result.addons[install_addon_ssh.slug] == ContentTrustResult.ERROR
|
||||
|
||||
|
||||
async def test_integrity_check_failed(coresys: CoreSys, install_addon_ssh):
|
||||
"""Test integrity check with content trust failed."""
|
||||
coresys.homeassistant.core.check_trust = AsyncMock(side_effect=CodeNotaryError)
|
||||
coresys.supervisor.check_trust = AsyncMock(side_effect=CodeNotaryError)
|
||||
install_addon_ssh.check_trust = AsyncMock(side_effect=CodeNotaryError)
|
||||
install_addon_ssh.data["codenotary"] = "test@example.com"
|
||||
|
||||
result = await coresys.security.integrity_check.__wrapped__(coresys.security)
|
||||
|
||||
assert result.core == ContentTrustResult.FAILED
|
||||
assert result.supervisor == ContentTrustResult.FAILED
|
||||
assert result.addons[install_addon_ssh.slug] == ContentTrustResult.FAILED
|
||||
|
||||
|
||||
async def test_integrity_check_addon(coresys: CoreSys, install_addon_ssh):
|
||||
"""Test integrity check with content trust but no signed add-ons."""
|
||||
coresys.homeassistant.core.check_trust = AsyncMock()
|
||||
coresys.supervisor.check_trust = AsyncMock()
|
||||
|
||||
result = await coresys.security.integrity_check.__wrapped__(coresys.security)
|
||||
|
||||
assert result.core == ContentTrustResult.PASS
|
||||
assert result.supervisor == ContentTrustResult.PASS
|
||||
assert result.addons[install_addon_ssh.slug] == ContentTrustResult.UNTESTED
|
||||
@@ -86,10 +86,9 @@ async def test_os_update_path(
|
||||
"""Test OS upgrade path across major versions."""
|
||||
coresys.os._board = "rpi4" # pylint: disable=protected-access
|
||||
coresys.os._version = AwesomeVersion(version) # pylint: disable=protected-access
|
||||
with patch.object(type(coresys.security), "verify_own_content"):
|
||||
await coresys.updater.fetch_data()
|
||||
await coresys.updater.fetch_data()
|
||||
|
||||
assert coresys.updater.version_hassos == AwesomeVersion(expected)
|
||||
assert coresys.updater.version_hassos == AwesomeVersion(expected)
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("no_job_throttle")
|
||||
@@ -105,7 +104,6 @@ async def test_delayed_fetch_for_connectivity(
|
||||
load_binary_fixture("version_stable.json")
|
||||
)
|
||||
coresys.websession.head = AsyncMock()
|
||||
coresys.security.verify_own_content = AsyncMock()
|
||||
|
||||
# Network connectivity change causes a series of async tasks to eventually do a version fetch
|
||||
# Rather then use some kind of sleep loop, set up listener for start of fetch data job
|
||||
|
||||
@@ -1,128 +0,0 @@
|
||||
"""Test CodeNotary."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from unittest.mock import AsyncMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from supervisor.exceptions import (
|
||||
CodeNotaryBackendError,
|
||||
CodeNotaryError,
|
||||
CodeNotaryUntrusted,
|
||||
)
|
||||
from supervisor.utils.codenotary import calc_checksum, cas_validate
|
||||
|
||||
pytest.skip("code notary has been disabled due to issues", allow_module_level=True)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SubprocessResponse:
|
||||
"""Class for specifying subprocess exec response."""
|
||||
|
||||
returncode: int = 0
|
||||
data: str = ""
|
||||
error: str | None = None
|
||||
exception: Exception | None = None
|
||||
|
||||
|
||||
@pytest.fixture(name="subprocess_exec")
|
||||
def fixture_subprocess_exec(request):
|
||||
"""Mock subprocess exec with specific return."""
|
||||
response = request.param
|
||||
if response.exception:
|
||||
communicate_return = AsyncMock(side_effect=response.exception)
|
||||
else:
|
||||
communicate_return = AsyncMock(return_value=(response.data, response.error))
|
||||
|
||||
exec_return = Mock(returncode=response.returncode, communicate=communicate_return)
|
||||
|
||||
with patch(
|
||||
"supervisor.utils.codenotary.asyncio.create_subprocess_exec",
|
||||
return_value=exec_return,
|
||||
) as subprocess_exec:
|
||||
yield subprocess_exec
|
||||
|
||||
|
||||
def test_checksum_calc():
|
||||
"""Calc Checkusm as test."""
|
||||
assert calc_checksum("test") == calc_checksum(b"test")
|
||||
assert (
|
||||
calc_checksum("test")
|
||||
== "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"
|
||||
)
|
||||
|
||||
|
||||
async def test_valid_checksum():
|
||||
"""Test a valid autorization."""
|
||||
await cas_validate(
|
||||
"notary@home-assistant.io",
|
||||
"4434a33ff9c695e870bc5bbe04230ea3361ecf4c129eb06133dd1373975a43f0",
|
||||
)
|
||||
|
||||
|
||||
async def test_invalid_checksum():
|
||||
"""Test a invalid autorization."""
|
||||
with pytest.raises(CodeNotaryUntrusted):
|
||||
await cas_validate(
|
||||
"notary@home-assistant.io",
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"subprocess_exec",
|
||||
[SubprocessResponse(returncode=1, error=b"x is not notarized")],
|
||||
)
|
||||
async def test_not_notarized_error(subprocess_exec):
|
||||
"""Test received a not notarized error response from command."""
|
||||
with pytest.raises(CodeNotaryUntrusted):
|
||||
await cas_validate(
|
||||
"notary@home-assistant.io",
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"subprocess_exec",
|
||||
[
|
||||
SubprocessResponse(returncode=1, error=b"test"),
|
||||
SubprocessResponse(returncode=0, data='{"error":"asn1: structure error"}'),
|
||||
SubprocessResponse(returncode=1, error="test".encode("utf-16")),
|
||||
],
|
||||
indirect=True,
|
||||
)
|
||||
async def test_cas_backend_error(subprocess_exec):
|
||||
"""Test backend error executing cas command."""
|
||||
with pytest.raises(CodeNotaryBackendError):
|
||||
await cas_validate(
|
||||
"notary@home-assistant.io",
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"subprocess_exec",
|
||||
[SubprocessResponse(returncode=0, data='{"status":1}')],
|
||||
indirect=True,
|
||||
)
|
||||
async def test_cas_notarized_untrusted(subprocess_exec):
|
||||
"""Test cas found notarized but untrusted content."""
|
||||
with pytest.raises(CodeNotaryUntrusted):
|
||||
await cas_validate(
|
||||
"notary@home-assistant.io",
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"subprocess_exec", [SubprocessResponse(exception=OSError())], indirect=True
|
||||
)
|
||||
async def test_cas_exec_os_error(subprocess_exec):
|
||||
"""Test os error attempting to execute cas command."""
|
||||
with pytest.raises(CodeNotaryError):
|
||||
await cas_validate(
|
||||
"notary@home-assistant.io",
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
)
|
||||
@@ -275,3 +275,25 @@ async def test_parsing_boots_none():
|
||||
boots.append((index, boot_id))
|
||||
|
||||
assert boots == []
|
||||
|
||||
|
||||
async def test_parsing_non_utf8_message():
|
||||
"""Test that non-UTF-8 bytes in message are replaced with replacement character."""
|
||||
journal_logs, stream = _journal_logs_mock()
|
||||
# Include invalid UTF-8 sequence (0xff is not valid UTF-8)
|
||||
stream.feed_data(b"MESSAGE=Hello, \xff world!\n\n")
|
||||
_, line = await anext(journal_logs_reader(journal_logs))
|
||||
assert line == "Hello, \ufffd world!"
|
||||
|
||||
|
||||
async def test_parsing_non_utf8_in_binary_message():
|
||||
"""Test that non-UTF-8 bytes in binary format message are replaced."""
|
||||
journal_logs, stream = _journal_logs_mock()
|
||||
# Binary format with invalid UTF-8 sequence
|
||||
stream.feed_data(
|
||||
b"ID=1\n"
|
||||
b"MESSAGE\n\x0f\x00\x00\x00\x00\x00\x00\x00Hello, \xff world!\n"
|
||||
b"AFTER=after\n\n"
|
||||
)
|
||||
_, line = await anext(journal_logs_reader(journal_logs))
|
||||
assert line == "Hello, \ufffd world!"
|
||||
|
||||
Reference in New Issue
Block a user