mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-12-05 23:48:16 +00:00
Compare commits
3 Commits
duplicate-
...
refactor-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
87e1e7a3ab | ||
|
|
e7c8700db9 | ||
|
|
a4f681586e |
@@ -1,7 +1,6 @@
|
||||
# General files
|
||||
.git
|
||||
.github
|
||||
.gitkeep
|
||||
.devcontainer
|
||||
.vscode
|
||||
|
||||
|
||||
85
.github/workflows/builder.yml
vendored
85
.github/workflows/builder.yml
vendored
@@ -53,10 +53,10 @@ jobs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
channel: ${{ steps.version.outputs.channel }}
|
||||
publish: ${{ steps.version.outputs.publish }}
|
||||
build_wheels: ${{ steps.requirements.outputs.build_wheels }}
|
||||
requirements: ${{ steps.requirements.outputs.changed }}
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -72,25 +72,20 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed_files
|
||||
if: github.event_name != 'release'
|
||||
if: steps.version.outputs.publish == 'false'
|
||||
uses: masesgroup/retrieve-changed-files@491e80760c0e28d36ca6240a27b1ccb8e1402c13 # v3.0.0
|
||||
|
||||
- name: Check if requirements files changed
|
||||
id: requirements
|
||||
run: |
|
||||
# No wheels build necessary for releases
|
||||
if [[ "${{ github.event_name }}" == "release" ]]; then
|
||||
echo "build_wheels=false" >> "$GITHUB_OUTPUT"
|
||||
elif [[ "${{ steps.changed_files.outputs.all }}" =~ (requirements\.txt|build\.yaml|\.github/workflows/builder\.yml) ]]; then
|
||||
echo "build_wheels=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "build_wheels=false" >> "$GITHUB_OUTPUT"
|
||||
if [[ "${{ steps.changed_files.outputs.all }}" =~ (requirements.txt|build.yaml) ]]; then
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
build:
|
||||
name: Build ${{ matrix.arch }} supervisor
|
||||
needs: init
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
@@ -98,66 +93,34 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
arch: ${{ fromJson(needs.init.outputs.architectures) }}
|
||||
include:
|
||||
- runs-on: ubuntu-24.04
|
||||
- runs-on: ubuntu-24.04-arm
|
||||
arch: aarch64
|
||||
env:
|
||||
WHEELS_ABI: cp313
|
||||
WHEELS_TAG: musllinux_1_2
|
||||
WHEELS_APK_DEPS: "libffi-dev;openssl-dev;yaml-dev"
|
||||
WHEELS_SKIP_BINARY: aiohttp
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Write env-file for wheels build
|
||||
if: needs.init.outputs.build_wheels == 'true'
|
||||
- name: Write env-file
|
||||
if: needs.init.outputs.requirements == 'true'
|
||||
run: |
|
||||
(
|
||||
# Fix out of memory issues with rust
|
||||
echo "CARGO_NET_GIT_FETCH_WITH_CLI=true"
|
||||
) > .env_file
|
||||
|
||||
- name: Build and publish wheels
|
||||
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'true'
|
||||
uses: home-assistant/wheels@e5742a69d69f0e274e2689c998900c7d19652c21 # 2025.12.0
|
||||
# home-assistant/wheels doesn't support sha pinning
|
||||
- name: Build wheels
|
||||
if: needs.init.outputs.requirements == 'true'
|
||||
uses: home-assistant/wheels@2025.11.0
|
||||
with:
|
||||
abi: cp313
|
||||
tag: musllinux_1_2
|
||||
arch: ${{ matrix.arch }}
|
||||
wheels-key: ${{ secrets.WHEELS_KEY }}
|
||||
abi: ${{ env.WHEELS_ABI }}
|
||||
tag: ${{ env.WHEELS_TAG }}
|
||||
arch: ${{ matrix.arch }}
|
||||
apk: ${{ env.WHEELS_APK_DEPS }}
|
||||
skip-binary: ${{ env.WHEELS_SKIP_BINARY }}
|
||||
apk: "libffi-dev;openssl-dev;yaml-dev"
|
||||
skip-binary: aiohttp
|
||||
env-file: true
|
||||
requirements: "requirements.txt"
|
||||
|
||||
- name: Build local wheels
|
||||
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
|
||||
uses: home-assistant/wheels@e5742a69d69f0e274e2689c998900c7d19652c21 # 2025.12.0
|
||||
with:
|
||||
wheels-host: ""
|
||||
wheels-user: ""
|
||||
wheels-key: ""
|
||||
local-wheels-repo-path: "wheels/"
|
||||
abi: ${{ env.WHEELS_ABI }}
|
||||
tag: ${{ env.WHEELS_TAG }}
|
||||
arch: ${{ matrix.arch }}
|
||||
apk: ${{ env.WHEELS_APK_DEPS }}
|
||||
skip-binary: ${{ env.WHEELS_SKIP_BINARY }}
|
||||
env-file: true
|
||||
requirements: "requirements.txt"
|
||||
|
||||
- name: Upload local wheels artifact
|
||||
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: wheels-${{ matrix.arch }}
|
||||
path: wheels
|
||||
retention-days: 1
|
||||
|
||||
- name: Set version
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: home-assistant/actions/helpers/version@master
|
||||
@@ -204,7 +167,6 @@ jobs:
|
||||
- name: Build supervisor
|
||||
uses: home-assistant/builder@2025.11.0
|
||||
with:
|
||||
image: ${{ matrix.arch }}
|
||||
args: |
|
||||
$BUILD_ARGS \
|
||||
--${{ matrix.arch }} \
|
||||
@@ -219,7 +181,7 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Initialize git
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
@@ -244,14 +206,7 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Download local wheels artifact
|
||||
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: wheels-amd64
|
||||
path: wheels
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
# home-assistant/builder doesn't support sha pinning
|
||||
- name: Build the Supervisor
|
||||
|
||||
20
.github/workflows/ci.yaml
vendored
20
.github/workflows/ci.yaml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
name: Prepare Python dependencies
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python
|
||||
id: python
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -111,7 +111,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -154,7 +154,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Register hadolint problem matcher
|
||||
run: |
|
||||
echo "::add-matcher::.github/workflows/matchers/hadolint.json"
|
||||
@@ -169,7 +169,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -213,7 +213,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -257,7 +257,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -293,7 +293,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -339,7 +339,7 @@ jobs:
|
||||
name: Run tests Python ${{ needs.prepare.outputs.python-version }}
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -398,7 +398,7 @@ jobs:
|
||||
needs: ["pytest", "prepare"]
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
|
||||
2
.github/workflows/release-drafter.yml
vendored
2
.github/workflows/release-drafter.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Release Drafter
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
2
.github/workflows/sentry.yaml
vendored
2
.github/workflows/sentry.yaml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Sentry Release
|
||||
uses: getsentry/action-release@128c5058bbbe93c8e02147fe0a9c713f166259a6 # v3.4.0
|
||||
env:
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
|
||||
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
days-before-stale: 30
|
||||
|
||||
4
.github/workflows/update_frontend.yml
vendored
4
.github/workflows/update_frontend.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
latest_version: ${{ steps.latest_frontend_version.outputs.latest_tag }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Get latest frontend release
|
||||
id: latest_frontend_version
|
||||
uses: abatilo/release-info-action@32cb932219f1cee3fc4f4a298fd65ead5d35b661 # v1.3.3
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
if: needs.check-version.outputs.skip != 'true'
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
- name: Clear www folder
|
||||
run: |
|
||||
rm -rf supervisor/api/panel/*
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -24,9 +24,6 @@ var/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# Local wheels
|
||||
wheels/**/*.whl
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
@@ -105,4 +102,4 @@ ENV/
|
||||
/.dmypy.json
|
||||
|
||||
# Mac
|
||||
.DS_Store
|
||||
.DS_Store
|
||||
12
Dockerfile
12
Dockerfile
@@ -32,17 +32,7 @@ RUN \
|
||||
# Install requirements
|
||||
RUN \
|
||||
--mount=type=bind,source=./requirements.txt,target=/usr/src/requirements.txt \
|
||||
--mount=type=bind,source=./wheels,target=/usr/src/wheels \
|
||||
if ls /usr/src/wheels/musllinux/* >/dev/null 2>&1; then \
|
||||
LOCAL_WHEELS=/usr/src/wheels/musllinux; \
|
||||
echo "Using local wheels from: $LOCAL_WHEELS"; \
|
||||
else \
|
||||
LOCAL_WHEELS=; \
|
||||
echo "No local wheels found"; \
|
||||
fi && \
|
||||
uv pip install --compile-bytecode --no-cache --no-build \
|
||||
-r requirements.txt \
|
||||
${LOCAL_WHEELS:+--find-links $LOCAL_WHEELS}
|
||||
uv pip install --compile-bytecode --no-cache --no-build -r requirements.txt
|
||||
|
||||
# Install Home Assistant Supervisor
|
||||
COPY . supervisor
|
||||
|
||||
@@ -25,7 +25,7 @@ pyudev==0.24.4
|
||||
PyYAML==6.0.3
|
||||
requests==2.32.5
|
||||
securetar==2025.2.1
|
||||
sentry-sdk==2.47.0
|
||||
sentry-sdk==2.46.0
|
||||
setuptools==80.9.0
|
||||
voluptuous==0.15.2
|
||||
dbus-fast==3.1.2
|
||||
|
||||
@@ -8,9 +8,9 @@ pytest-asyncio==1.3.0
|
||||
pytest-cov==7.0.0
|
||||
pytest-timeout==2.4.0
|
||||
pytest==9.0.1
|
||||
ruff==0.14.8
|
||||
ruff==0.14.7
|
||||
time-machine==3.1.0
|
||||
types-docker==7.1.0.20251202
|
||||
types-docker==7.1.0.20251129
|
||||
types-pyyaml==6.0.12.20250915
|
||||
types-requests==2.32.4.20250913
|
||||
urllib3==2.5.0
|
||||
|
||||
@@ -66,22 +66,13 @@ from ..docker.const import ContainerState
|
||||
from ..docker.monitor import DockerContainerStateEvent
|
||||
from ..docker.stats import DockerStats
|
||||
from ..exceptions import (
|
||||
AddonBackupMetadataInvalidError,
|
||||
AddonBuildFailedUnknownError,
|
||||
AddonConfigurationInvalidError,
|
||||
AddonNotRunningError,
|
||||
AddonConfigurationError,
|
||||
AddonNotSupportedError,
|
||||
AddonNotSupportedWriteStdinError,
|
||||
AddonPrePostBackupCommandReturnedError,
|
||||
AddonsError,
|
||||
AddonsJobError,
|
||||
AddonUnknownError,
|
||||
BackupRestoreUnknownError,
|
||||
ConfigurationFileError,
|
||||
DockerBuildError,
|
||||
DockerError,
|
||||
HostAppArmorError,
|
||||
StoreAddonNotFoundError,
|
||||
)
|
||||
from ..hardware.data import Device
|
||||
from ..homeassistant.const import WSEvent
|
||||
@@ -244,7 +235,7 @@ class Addon(AddonModel):
|
||||
await self.instance.check_image(self.version, default_image, self.arch)
|
||||
except DockerError:
|
||||
_LOGGER.info("No %s addon Docker image %s found", self.slug, self.image)
|
||||
with suppress(DockerError, AddonNotSupportedError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(self.version, default_image, arch=self.arch)
|
||||
|
||||
self.persist[ATTR_IMAGE] = default_image
|
||||
@@ -727,16 +718,18 @@ class Addon(AddonModel):
|
||||
options = self.schema.validate(self.options)
|
||||
await self.sys_run_in_executor(write_json_file, self.path_options, options)
|
||||
except vol.Invalid as ex:
|
||||
raise AddonConfigurationInvalidError(
|
||||
_LOGGER.error,
|
||||
addon=self.slug,
|
||||
validation_error=humanize_error(self.options, ex),
|
||||
) from None
|
||||
except ConfigurationFileError as err:
|
||||
_LOGGER.error(
|
||||
"Add-on %s has invalid options: %s",
|
||||
self.slug,
|
||||
humanize_error(self.options, ex),
|
||||
)
|
||||
except ConfigurationFileError:
|
||||
_LOGGER.error("Add-on %s can't write options", self.slug)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
else:
|
||||
_LOGGER.debug("Add-on %s write options: %s", self.slug, options)
|
||||
return
|
||||
|
||||
_LOGGER.debug("Add-on %s write options: %s", self.slug, options)
|
||||
raise AddonConfigurationError()
|
||||
|
||||
@Job(
|
||||
name="addon_unload",
|
||||
@@ -779,7 +772,7 @@ class Addon(AddonModel):
|
||||
async def install(self) -> None:
|
||||
"""Install and setup this addon."""
|
||||
if not self.addon_store:
|
||||
raise StoreAddonNotFoundError(addon=self.slug)
|
||||
raise AddonsError("Missing from store, cannot install!")
|
||||
|
||||
await self.sys_addons.data.install(self.addon_store)
|
||||
|
||||
@@ -800,17 +793,9 @@ class Addon(AddonModel):
|
||||
await self.instance.install(
|
||||
self.latest_version, self.addon_store.image, arch=self.arch
|
||||
)
|
||||
except AddonsError:
|
||||
await self.sys_addons.data.uninstall(self)
|
||||
raise
|
||||
except DockerBuildError as err:
|
||||
_LOGGER.error("Could not build image for addon %s: %s", self.slug, err)
|
||||
await self.sys_addons.data.uninstall(self)
|
||||
raise AddonBuildFailedUnknownError(addon=self.slug) from err
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Could not pull image to update addon %s: %s", self.slug, err)
|
||||
await self.sys_addons.data.uninstall(self)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
raise AddonsError() from err
|
||||
|
||||
# Finish initialization and set up listeners
|
||||
await self.load()
|
||||
@@ -834,8 +819,7 @@ class Addon(AddonModel):
|
||||
try:
|
||||
await self.instance.remove(remove_image=remove_image)
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Could not remove image for addon %s: %s", self.slug, err)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
raise AddonsError() from err
|
||||
|
||||
self.state = AddonState.UNKNOWN
|
||||
|
||||
@@ -900,7 +884,7 @@ class Addon(AddonModel):
|
||||
if it was running. Else nothing is returned.
|
||||
"""
|
||||
if not self.addon_store:
|
||||
raise StoreAddonNotFoundError(addon=self.slug)
|
||||
raise AddonsError("Missing from store, cannot update!")
|
||||
|
||||
old_image = self.image
|
||||
# Cache data to prevent races with other updates to global
|
||||
@@ -908,12 +892,8 @@ class Addon(AddonModel):
|
||||
|
||||
try:
|
||||
await self.instance.update(store.version, store.image, arch=self.arch)
|
||||
except DockerBuildError as err:
|
||||
_LOGGER.error("Could not build image for addon %s: %s", self.slug, err)
|
||||
raise AddonBuildFailedUnknownError(addon=self.slug) from err
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Could not pull image to update addon %s: %s", self.slug, err)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
raise AddonsError() from err
|
||||
|
||||
# Stop the addon if running
|
||||
if (last_state := self.state) in {AddonState.STARTED, AddonState.STARTUP}:
|
||||
@@ -955,23 +935,12 @@ class Addon(AddonModel):
|
||||
"""
|
||||
last_state: AddonState = self.state
|
||||
try:
|
||||
# remove docker container and image but not addon config
|
||||
# remove docker container but not addon config
|
||||
try:
|
||||
await self.instance.remove()
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Could not remove image for addon %s: %s", self.slug, err)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
try:
|
||||
await self.instance.install(self.version)
|
||||
except DockerBuildError as err:
|
||||
_LOGGER.error("Could not build image for addon %s: %s", self.slug, err)
|
||||
raise AddonBuildFailedUnknownError(addon=self.slug) from err
|
||||
except DockerError as err:
|
||||
_LOGGER.error(
|
||||
"Could not pull image to update addon %s: %s", self.slug, err
|
||||
)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
raise AddonsError() from err
|
||||
|
||||
if self.addon_store:
|
||||
await self.sys_addons.data.update(self.addon_store)
|
||||
@@ -1142,9 +1111,8 @@ class Addon(AddonModel):
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Could not start container for addon %s: %s", self.slug, err)
|
||||
self.state = AddonState.ERROR
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
raise AddonsError() from err
|
||||
|
||||
return self.sys_create_task(self._wait_for_startup())
|
||||
|
||||
@@ -1159,9 +1127,8 @@ class Addon(AddonModel):
|
||||
try:
|
||||
await self.instance.stop()
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Could not stop container for addon %s: %s", self.slug, err)
|
||||
self.state = AddonState.ERROR
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
raise AddonsError() from err
|
||||
|
||||
@Job(
|
||||
name="addon_restart",
|
||||
@@ -1194,15 +1161,9 @@ class Addon(AddonModel):
|
||||
async def stats(self) -> DockerStats:
|
||||
"""Return stats of container."""
|
||||
try:
|
||||
if not await self.is_running():
|
||||
raise AddonNotRunningError(_LOGGER.warning, addon=self.slug)
|
||||
|
||||
return await self.instance.stats()
|
||||
except DockerError as err:
|
||||
_LOGGER.error(
|
||||
"Could not get stats of container for addon %s: %s", self.slug, err
|
||||
)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
raise AddonsError() from err
|
||||
|
||||
@Job(
|
||||
name="addon_write_stdin",
|
||||
@@ -1212,18 +1173,14 @@ class Addon(AddonModel):
|
||||
async def write_stdin(self, data) -> None:
|
||||
"""Write data to add-on stdin."""
|
||||
if not self.with_stdin:
|
||||
raise AddonNotSupportedWriteStdinError(_LOGGER.error, addon=self.slug)
|
||||
raise AddonNotSupportedError(
|
||||
f"Add-on {self.slug} does not support writing to stdin!", _LOGGER.error
|
||||
)
|
||||
|
||||
try:
|
||||
if not await self.is_running():
|
||||
raise AddonNotRunningError(_LOGGER.warning, addon=self.slug)
|
||||
|
||||
await self.instance.write_stdin(data)
|
||||
return await self.instance.write_stdin(data)
|
||||
except DockerError as err:
|
||||
_LOGGER.error(
|
||||
"Could not write stdin to container for addon %s: %s", self.slug, err
|
||||
)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
raise AddonsError() from err
|
||||
|
||||
async def _backup_command(self, command: str) -> None:
|
||||
try:
|
||||
@@ -1232,14 +1189,15 @@ class Addon(AddonModel):
|
||||
_LOGGER.debug(
|
||||
"Pre-/Post backup command failed with: %s", command_return.output
|
||||
)
|
||||
raise AddonPrePostBackupCommandReturnedError(
|
||||
_LOGGER.error, addon=self.slug, exit_code=command_return.exit_code
|
||||
raise AddonsError(
|
||||
f"Pre-/Post backup command returned error code: {command_return.exit_code}",
|
||||
_LOGGER.error,
|
||||
)
|
||||
except DockerError as err:
|
||||
_LOGGER.error(
|
||||
"Failed running pre-/post backup command %s: %s", command, err
|
||||
)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
raise AddonsError(
|
||||
f"Failed running pre-/post backup command {command}: {str(err)}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
@Job(
|
||||
name="addon_begin_backup",
|
||||
@@ -1328,14 +1286,15 @@ class Addon(AddonModel):
|
||||
try:
|
||||
self.instance.export_image(temp_path.joinpath("image.tar"))
|
||||
except DockerError as err:
|
||||
raise BackupRestoreUnknownError() from err
|
||||
raise AddonsError() from err
|
||||
|
||||
# Store local configs/state
|
||||
try:
|
||||
write_json_file(temp_path.joinpath("addon.json"), metadata)
|
||||
except ConfigurationFileError as err:
|
||||
_LOGGER.error("Can't save meta for %s: %s", self.slug, err)
|
||||
raise BackupRestoreUnknownError() from err
|
||||
raise AddonsError(
|
||||
f"Can't save meta for {self.slug}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
# Store AppArmor Profile
|
||||
if apparmor_profile:
|
||||
@@ -1345,7 +1304,9 @@ class Addon(AddonModel):
|
||||
apparmor_profile, profile_backup_file
|
||||
)
|
||||
except HostAppArmorError as err:
|
||||
raise BackupRestoreUnknownError() from err
|
||||
raise AddonsError(
|
||||
"Can't backup AppArmor profile", _LOGGER.error
|
||||
) from err
|
||||
|
||||
# Write tarfile
|
||||
with tar_file as backup:
|
||||
@@ -1399,8 +1360,7 @@ class Addon(AddonModel):
|
||||
)
|
||||
_LOGGER.info("Finish backup for addon %s", self.slug)
|
||||
except (tarfile.TarError, OSError, AddFileError) as err:
|
||||
_LOGGER.error("Can't write backup tarfile for addon %s: %s", self.slug, err)
|
||||
raise BackupRestoreUnknownError() from err
|
||||
raise AddonsError(f"Can't write tarfile: {err}", _LOGGER.error) from err
|
||||
finally:
|
||||
if was_running:
|
||||
wait_for_start = await self.end_backup()
|
||||
@@ -1442,24 +1402,28 @@ class Addon(AddonModel):
|
||||
try:
|
||||
tmp, data = await self.sys_run_in_executor(_extract_tarfile)
|
||||
except tarfile.TarError as err:
|
||||
_LOGGER.error("Can't extract backup tarfile for %s: %s", self.slug, err)
|
||||
raise BackupRestoreUnknownError() from err
|
||||
raise AddonsError(
|
||||
f"Can't read tarfile {tar_file}: {err}", _LOGGER.error
|
||||
) from err
|
||||
except ConfigurationFileError as err:
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
raise AddonsError() from err
|
||||
|
||||
try:
|
||||
# Validate
|
||||
try:
|
||||
data = SCHEMA_ADDON_BACKUP(data)
|
||||
except vol.Invalid as err:
|
||||
raise AddonBackupMetadataInvalidError(
|
||||
raise AddonsError(
|
||||
f"Can't validate {self.slug}, backup data: {humanize_error(data, err)}",
|
||||
_LOGGER.error,
|
||||
addon=self.slug,
|
||||
validation_error=humanize_error(data, err),
|
||||
) from err
|
||||
|
||||
# Validate availability. Raises if not
|
||||
self._validate_availability(data[ATTR_SYSTEM], logger=_LOGGER.error)
|
||||
# If available
|
||||
if not self._available(data[ATTR_SYSTEM]):
|
||||
raise AddonNotSupportedError(
|
||||
f"Add-on {self.slug} is not available for this platform",
|
||||
_LOGGER.error,
|
||||
)
|
||||
|
||||
# Restore local add-on information
|
||||
_LOGGER.info("Restore config for addon %s", self.slug)
|
||||
@@ -1518,10 +1482,9 @@ class Addon(AddonModel):
|
||||
try:
|
||||
await self.sys_run_in_executor(_restore_data)
|
||||
except shutil.Error as err:
|
||||
_LOGGER.error(
|
||||
"Can't restore origin data for %s: %s", self.slug, err
|
||||
)
|
||||
raise BackupRestoreUnknownError() from err
|
||||
raise AddonsError(
|
||||
f"Can't restore origin data: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
# Restore AppArmor
|
||||
profile_file = Path(tmp.name, "apparmor.txt")
|
||||
@@ -1532,11 +1495,10 @@ class Addon(AddonModel):
|
||||
)
|
||||
except HostAppArmorError as err:
|
||||
_LOGGER.error(
|
||||
"Can't restore AppArmor profile for add-on %s: %s",
|
||||
"Can't restore AppArmor profile for add-on %s",
|
||||
self.slug,
|
||||
err,
|
||||
)
|
||||
raise BackupRestoreUnknownError() from err
|
||||
raise AddonsError() from err
|
||||
|
||||
finally:
|
||||
# Is add-on loaded
|
||||
|
||||
@@ -5,7 +5,6 @@ from __future__ import annotations
|
||||
import base64
|
||||
from functools import cached_property
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
@@ -24,22 +23,15 @@ from ..const import (
|
||||
CpuArch,
|
||||
)
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..docker.const import DOCKER_HUB, DOCKER_HUB_LEGACY
|
||||
from ..docker.const import DOCKER_HUB
|
||||
from ..docker.interface import MAP_ARCH
|
||||
from ..exceptions import (
|
||||
AddonBuildArchitectureNotSupportedError,
|
||||
AddonBuildDockerfileMissingError,
|
||||
ConfigurationFileError,
|
||||
HassioArchNotFound,
|
||||
)
|
||||
from ..exceptions import ConfigurationFileError, HassioArchNotFound
|
||||
from ..utils.common import FileConfiguration, find_one_filetype
|
||||
from .validate import SCHEMA_BUILD_CONFIG
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .manager import AnyAddon
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
"""Handle build options for add-ons."""
|
||||
@@ -120,7 +112,7 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
return self.addon.path_location.joinpath(f"Dockerfile.{self.arch}")
|
||||
return self.addon.path_location.joinpath("Dockerfile")
|
||||
|
||||
async def is_valid(self) -> None:
|
||||
async def is_valid(self) -> bool:
|
||||
"""Return true if the build env is valid."""
|
||||
|
||||
def build_is_valid() -> bool:
|
||||
@@ -132,17 +124,9 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
)
|
||||
|
||||
try:
|
||||
if not await self.sys_run_in_executor(build_is_valid):
|
||||
raise AddonBuildDockerfileMissingError(
|
||||
_LOGGER.error, addon=self.addon.slug
|
||||
)
|
||||
return await self.sys_run_in_executor(build_is_valid)
|
||||
except HassioArchNotFound:
|
||||
raise AddonBuildArchitectureNotSupportedError(
|
||||
_LOGGER.error,
|
||||
addon=self.addon.slug,
|
||||
addon_arch_list=self.addon.supported_arch,
|
||||
system_arch_list=[arch.value for arch in self.sys_arch.supported],
|
||||
) from None
|
||||
return False
|
||||
|
||||
def get_docker_config_json(self) -> str | None:
|
||||
"""Generate Docker config.json content with registry credentials for base image.
|
||||
@@ -171,11 +155,8 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
|
||||
# Use the actual registry URL for the key
|
||||
# Docker Hub uses "https://index.docker.io/v1/" as the key
|
||||
# Support both docker.io (official) and hub.docker.com (legacy)
|
||||
registry_key = (
|
||||
"https://index.docker.io/v1/"
|
||||
if registry in (DOCKER_HUB, DOCKER_HUB_LEGACY)
|
||||
else registry
|
||||
"https://index.docker.io/v1/" if registry == DOCKER_HUB else registry
|
||||
)
|
||||
|
||||
config = {"auths": {registry_key: {"auth": auth_string}}}
|
||||
|
||||
@@ -316,12 +316,12 @@ class AddonModel(JobGroup, ABC):
|
||||
|
||||
@property
|
||||
def panel_title(self) -> str:
|
||||
"""Return panel title for Ingress frame."""
|
||||
"""Return panel icon for Ingress frame."""
|
||||
return self.data.get(ATTR_PANEL_TITLE, self.name)
|
||||
|
||||
@property
|
||||
def panel_admin(self) -> bool:
|
||||
"""Return if panel is only available for admin users."""
|
||||
def panel_admin(self) -> str:
|
||||
"""Return panel icon for Ingress frame."""
|
||||
return self.data[ATTR_PANEL_ADMIN]
|
||||
|
||||
@property
|
||||
@@ -489,7 +489,7 @@ class AddonModel(JobGroup, ABC):
|
||||
return self.data[ATTR_DEVICETREE]
|
||||
|
||||
@property
|
||||
def with_tmpfs(self) -> bool:
|
||||
def with_tmpfs(self) -> str | None:
|
||||
"""Return if tmp is in memory of add-on."""
|
||||
return self.data[ATTR_TMPFS]
|
||||
|
||||
@@ -509,7 +509,7 @@ class AddonModel(JobGroup, ABC):
|
||||
return self.data[ATTR_VIDEO]
|
||||
|
||||
@property
|
||||
def homeassistant_version(self) -> AwesomeVersion | None:
|
||||
def homeassistant_version(self) -> str | None:
|
||||
"""Return min Home Assistant version they needed by Add-on."""
|
||||
return self.data.get(ATTR_HOMEASSISTANT)
|
||||
|
||||
|
||||
@@ -75,7 +75,7 @@ class AddonOptions(CoreSysAttributes):
|
||||
"""Create a schema for add-on options."""
|
||||
return vol.Schema(vol.All(dict, self))
|
||||
|
||||
def __call__(self, struct: dict[str, Any]) -> dict[str, Any]:
|
||||
def __call__(self, struct):
|
||||
"""Create schema validator for add-ons options."""
|
||||
options = {}
|
||||
|
||||
@@ -193,7 +193,9 @@ class AddonOptions(CoreSysAttributes):
|
||||
f"Fatal error for option '{key}' with type '{typ}' in {self._name} ({self._slug})"
|
||||
) from None
|
||||
|
||||
def _nested_validate_list(self, typ: Any, data_list: Any, key: str) -> list[Any]:
|
||||
def _nested_validate_list(
|
||||
self, typ: Any, data_list: list[Any], key: str
|
||||
) -> list[Any]:
|
||||
"""Validate nested items."""
|
||||
options = []
|
||||
|
||||
@@ -211,7 +213,7 @@ class AddonOptions(CoreSysAttributes):
|
||||
return options
|
||||
|
||||
def _nested_validate_dict(
|
||||
self, typ: dict[Any, Any], data_dict: Any, key: str
|
||||
self, typ: dict[Any, Any], data_dict: dict[Any, Any], key: str
|
||||
) -> dict[Any, Any]:
|
||||
"""Validate nested items."""
|
||||
options = {}
|
||||
@@ -262,7 +264,7 @@ class UiOptions(CoreSysAttributes):
|
||||
|
||||
def __init__(self, coresys: CoreSys) -> None:
|
||||
"""Initialize UI option render."""
|
||||
self.coresys: CoreSys = coresys
|
||||
self.coresys = coresys
|
||||
|
||||
def __call__(self, raw_schema: dict[str, Any]) -> list[dict[str, Any]]:
|
||||
"""Generate UI schema."""
|
||||
@@ -277,10 +279,10 @@ class UiOptions(CoreSysAttributes):
|
||||
def _ui_schema_element(
|
||||
self,
|
||||
ui_schema: list[dict[str, Any]],
|
||||
value: str | list[Any] | dict[str, Any],
|
||||
value: str,
|
||||
key: str,
|
||||
multiple: bool = False,
|
||||
) -> None:
|
||||
):
|
||||
if isinstance(value, list):
|
||||
# nested value list
|
||||
assert not multiple
|
||||
|
||||
@@ -100,9 +100,6 @@ from ..const import (
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..docker.stats import DockerStats
|
||||
from ..exceptions import (
|
||||
AddonBootConfigCannotChangeError,
|
||||
AddonConfigurationInvalidError,
|
||||
AddonNotSupportedWriteStdinError,
|
||||
APIAddonNotInstalled,
|
||||
APIError,
|
||||
APIForbidden,
|
||||
@@ -128,7 +125,6 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
|
||||
vol.Optional(ATTR_INGRESS_PANEL): vol.Boolean(),
|
||||
vol.Optional(ATTR_WATCHDOG): vol.Boolean(),
|
||||
vol.Optional(ATTR_OPTIONS): vol.Maybe(dict),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -304,24 +300,19 @@ class APIAddons(CoreSysAttributes):
|
||||
# Update secrets for validation
|
||||
await self.sys_homeassistant.secrets.reload()
|
||||
|
||||
# Extend schema with add-on specific validation
|
||||
addon_schema = SCHEMA_OPTIONS.extend(
|
||||
{vol.Optional(ATTR_OPTIONS): vol.Maybe(addon.schema)}
|
||||
)
|
||||
|
||||
# Validate/Process Body
|
||||
body = await api_validate(SCHEMA_OPTIONS, request)
|
||||
body = await api_validate(addon_schema, request)
|
||||
if ATTR_OPTIONS in body:
|
||||
# None resets options to defaults, otherwise validate the options
|
||||
if body[ATTR_OPTIONS] is None:
|
||||
addon.options = None
|
||||
else:
|
||||
try:
|
||||
addon.options = addon.schema(body[ATTR_OPTIONS])
|
||||
except vol.Invalid as ex:
|
||||
raise AddonConfigurationInvalidError(
|
||||
addon=addon.slug,
|
||||
validation_error=humanize_error(body[ATTR_OPTIONS], ex),
|
||||
) from None
|
||||
addon.options = body[ATTR_OPTIONS]
|
||||
if ATTR_BOOT in body:
|
||||
if addon.boot_config == AddonBootConfig.MANUAL_ONLY:
|
||||
raise AddonBootConfigCannotChangeError(
|
||||
addon=addon.slug, boot_config=addon.boot_config.value
|
||||
raise APIError(
|
||||
f"Addon {addon.slug} boot option is set to {addon.boot_config} so it cannot be changed"
|
||||
)
|
||||
addon.boot = body[ATTR_BOOT]
|
||||
if ATTR_AUTO_UPDATE in body:
|
||||
@@ -394,7 +385,7 @@ class APIAddons(CoreSysAttributes):
|
||||
return data
|
||||
|
||||
@api_process
|
||||
async def options_config(self, request: web.Request) -> dict[str, Any]:
|
||||
async def options_config(self, request: web.Request) -> None:
|
||||
"""Validate user options for add-on."""
|
||||
slug: str = request.match_info["addon"]
|
||||
if slug != "self":
|
||||
@@ -439,11 +430,11 @@ class APIAddons(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def uninstall(self, request: web.Request) -> None:
|
||||
async def uninstall(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Uninstall add-on."""
|
||||
addon = self.get_addon_for_request(request)
|
||||
body: dict[str, Any] = await api_validate(SCHEMA_UNINSTALL, request)
|
||||
await asyncio.shield(
|
||||
return await asyncio.shield(
|
||||
self.sys_addons.uninstall(
|
||||
addon.slug, remove_config=body[ATTR_REMOVE_CONFIG]
|
||||
)
|
||||
@@ -485,7 +476,7 @@ class APIAddons(CoreSysAttributes):
|
||||
"""Write to stdin of add-on."""
|
||||
addon = self.get_addon_for_request(request)
|
||||
if not addon.with_stdin:
|
||||
raise AddonNotSupportedWriteStdinError(_LOGGER.error, addon=addon.slug)
|
||||
raise APIError(f"STDIN not supported the {addon.slug} add-on")
|
||||
|
||||
data = await request.read()
|
||||
await asyncio.shield(addon.write_stdin(data))
|
||||
|
||||
@@ -15,7 +15,7 @@ import voluptuous as vol
|
||||
from ..addons.addon import Addon
|
||||
from ..const import ATTR_NAME, ATTR_PASSWORD, ATTR_USERNAME, REQUEST_FROM
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIForbidden, AuthInvalidNonStringValueError
|
||||
from ..exceptions import APIForbidden
|
||||
from .const import (
|
||||
ATTR_GROUP_IDS,
|
||||
ATTR_IS_ACTIVE,
|
||||
@@ -69,9 +69,7 @@ class APIAuth(CoreSysAttributes):
|
||||
try:
|
||||
_ = username.encode and password.encode # type: ignore
|
||||
except AttributeError:
|
||||
raise AuthInvalidNonStringValueError(
|
||||
_LOGGER.error, headers=REALM_HEADER
|
||||
) from None
|
||||
raise HTTPUnauthorized(headers=REALM_HEADER) from None
|
||||
|
||||
return self.sys_auth.check_login(
|
||||
addon, cast(str, username), cast(str, password)
|
||||
|
||||
@@ -211,7 +211,7 @@ class APIBackups(CoreSysAttributes):
|
||||
await self.sys_backups.save_data()
|
||||
|
||||
@api_process
|
||||
async def reload(self, _: web.Request) -> bool:
|
||||
async def reload(self, _):
|
||||
"""Reload backup list."""
|
||||
await asyncio.shield(self.sys_backups.reload())
|
||||
return True
|
||||
@@ -421,7 +421,7 @@ class APIBackups(CoreSysAttributes):
|
||||
await self.sys_backups.remove(backup, locations=locations)
|
||||
|
||||
@api_process
|
||||
async def download(self, request: web.Request) -> web.StreamResponse:
|
||||
async def download(self, request: web.Request):
|
||||
"""Download a backup file."""
|
||||
backup = self._extract_slug(request)
|
||||
# Query will give us '' for /backups, convert value to None
|
||||
@@ -451,7 +451,7 @@ class APIBackups(CoreSysAttributes):
|
||||
return response
|
||||
|
||||
@api_process
|
||||
async def upload(self, request: web.Request) -> dict[str, str] | bool:
|
||||
async def upload(self, request: web.Request):
|
||||
"""Upload a backup file."""
|
||||
location: LOCATION_TYPE = None
|
||||
locations: list[LOCATION_TYPE] | None = None
|
||||
|
||||
@@ -46,7 +46,7 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
|
||||
SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_STORAGE_DRIVER): vol.In(["overlayfs"]),
|
||||
vol.Required(ATTR_STORAGE_DRIVER): vol.In(["overlayfs", "overlay2"]),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -55,7 +55,7 @@ class APIDocker(CoreSysAttributes):
|
||||
"""Handle RESTful API for Docker configuration."""
|
||||
|
||||
@api_process
|
||||
async def info(self, request: web.Request) -> dict[str, Any]:
|
||||
async def info(self, request: web.Request):
|
||||
"""Get docker info."""
|
||||
data_registries = {}
|
||||
for hostname, registry in self.sys_docker.config.registries.items():
|
||||
@@ -113,7 +113,7 @@ class APIDocker(CoreSysAttributes):
|
||||
return {ATTR_REGISTRIES: data_registries}
|
||||
|
||||
@api_process
|
||||
async def create_registry(self, request: web.Request) -> None:
|
||||
async def create_registry(self, request: web.Request):
|
||||
"""Create a new docker registry."""
|
||||
body = await api_validate(SCHEMA_DOCKER_REGISTRY, request)
|
||||
|
||||
@@ -123,7 +123,7 @@ class APIDocker(CoreSysAttributes):
|
||||
await self.sys_docker.config.save_data()
|
||||
|
||||
@api_process
|
||||
async def remove_registry(self, request: web.Request) -> None:
|
||||
async def remove_registry(self, request: web.Request):
|
||||
"""Delete a docker registry."""
|
||||
hostname = request.match_info.get(ATTR_HOSTNAME)
|
||||
if hostname not in self.sys_docker.config.registries:
|
||||
|
||||
@@ -18,7 +18,6 @@ from ..const import (
|
||||
ATTR_BLK_WRITE,
|
||||
ATTR_BOOT,
|
||||
ATTR_CPU_PERCENT,
|
||||
ATTR_DUPLICATE_LOG_FILE,
|
||||
ATTR_IMAGE,
|
||||
ATTR_IP_ADDRESS,
|
||||
ATTR_JOB_ID,
|
||||
@@ -56,7 +55,6 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(str),
|
||||
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
|
||||
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE): vol.Boolean(),
|
||||
vol.Optional(ATTR_DUPLICATE_LOG_FILE): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -114,7 +112,6 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
ATTR_AUDIO_INPUT: self.sys_homeassistant.audio_input,
|
||||
ATTR_AUDIO_OUTPUT: self.sys_homeassistant.audio_output,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE: self.sys_homeassistant.backups_exclude_database,
|
||||
ATTR_DUPLICATE_LOG_FILE: self.sys_homeassistant.duplicate_log_file,
|
||||
}
|
||||
|
||||
@api_process
|
||||
@@ -154,13 +151,10 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE
|
||||
]
|
||||
|
||||
if ATTR_DUPLICATE_LOG_FILE in body:
|
||||
self.sys_homeassistant.duplicate_log_file = body[ATTR_DUPLICATE_LOG_FILE]
|
||||
|
||||
await self.sys_homeassistant.save_data()
|
||||
|
||||
@api_process
|
||||
async def stats(self, request: web.Request) -> dict[str, Any]:
|
||||
async def stats(self, request: web.Request) -> dict[Any, str]:
|
||||
"""Return resource information."""
|
||||
stats = await self.sys_homeassistant.core.stats()
|
||||
if not stats:
|
||||
@@ -197,7 +191,7 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
return await update_task
|
||||
|
||||
@api_process
|
||||
async def stop(self, request: web.Request) -> None:
|
||||
async def stop(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Stop Home Assistant."""
|
||||
body = await api_validate(SCHEMA_STOP, request)
|
||||
await self._check_offline_migration(force=body[ATTR_FORCE])
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Init file for Supervisor host RESTful API."""
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
import json
|
||||
import logging
|
||||
@@ -100,7 +99,7 @@ class APIHost(CoreSysAttributes):
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def info(self, request: web.Request) -> dict[str, Any]:
|
||||
async def info(self, request):
|
||||
"""Return host information."""
|
||||
return {
|
||||
ATTR_AGENT_VERSION: self.sys_dbus.agent.version,
|
||||
@@ -129,7 +128,7 @@ class APIHost(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def options(self, request: web.Request) -> None:
|
||||
async def options(self, request):
|
||||
"""Edit host settings."""
|
||||
body = await api_validate(SCHEMA_OPTIONS, request)
|
||||
|
||||
@@ -140,7 +139,7 @@ class APIHost(CoreSysAttributes):
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def reboot(self, request: web.Request) -> None:
|
||||
async def reboot(self, request):
|
||||
"""Reboot host."""
|
||||
body = await api_validate(SCHEMA_SHUTDOWN, request)
|
||||
await self._check_ha_offline_migration(force=body[ATTR_FORCE])
|
||||
@@ -148,7 +147,7 @@ class APIHost(CoreSysAttributes):
|
||||
return await asyncio.shield(self.sys_host.control.reboot())
|
||||
|
||||
@api_process
|
||||
async def shutdown(self, request: web.Request) -> None:
|
||||
async def shutdown(self, request):
|
||||
"""Poweroff host."""
|
||||
body = await api_validate(SCHEMA_SHUTDOWN, request)
|
||||
await self._check_ha_offline_migration(force=body[ATTR_FORCE])
|
||||
@@ -156,12 +155,12 @@ class APIHost(CoreSysAttributes):
|
||||
return await asyncio.shield(self.sys_host.control.shutdown())
|
||||
|
||||
@api_process
|
||||
def reload(self, request: web.Request) -> Awaitable[None]:
|
||||
def reload(self, request):
|
||||
"""Reload host data."""
|
||||
return asyncio.shield(self.sys_host.reload())
|
||||
|
||||
@api_process
|
||||
async def services(self, request: web.Request) -> dict[str, Any]:
|
||||
async def services(self, request):
|
||||
"""Return list of available services."""
|
||||
services = []
|
||||
for unit in self.sys_host.services:
|
||||
@@ -176,7 +175,7 @@ class APIHost(CoreSysAttributes):
|
||||
return {ATTR_SERVICES: services}
|
||||
|
||||
@api_process
|
||||
async def list_boots(self, _: web.Request) -> dict[str, Any]:
|
||||
async def list_boots(self, _: web.Request):
|
||||
"""Return a list of boot IDs."""
|
||||
boot_ids = await self.sys_host.logs.get_boot_ids()
|
||||
return {
|
||||
@@ -187,7 +186,7 @@ class APIHost(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def list_identifiers(self, _: web.Request) -> dict[str, list[str]]:
|
||||
async def list_identifiers(self, _: web.Request):
|
||||
"""Return a list of syslog identifiers."""
|
||||
return {ATTR_IDENTIFIERS: await self.sys_host.logs.get_identifiers()}
|
||||
|
||||
@@ -333,7 +332,7 @@ class APIHost(CoreSysAttributes):
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def disk_usage(self, request: web.Request) -> dict[str, Any]:
|
||||
async def disk_usage(self, request: web.Request) -> dict:
|
||||
"""Return a breakdown of storage usage for the system."""
|
||||
|
||||
max_depth = request.query.get(ATTR_MAX_DEPTH, 1)
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
"""Handle security part of this API."""
|
||||
|
||||
from collections.abc import Awaitable, Callable
|
||||
from collections.abc import Callable
|
||||
import logging
|
||||
import re
|
||||
from typing import Final
|
||||
from urllib.parse import unquote
|
||||
|
||||
from aiohttp.web import Request, StreamResponse, middleware
|
||||
from aiohttp.web import Request, Response, middleware
|
||||
from aiohttp.web_exceptions import HTTPBadRequest, HTTPForbidden, HTTPUnauthorized
|
||||
from awesomeversion import AwesomeVersion
|
||||
|
||||
@@ -89,7 +89,7 @@ CORE_ONLY_PATHS: Final = re.compile(
|
||||
)
|
||||
|
||||
# Policy role add-on API access
|
||||
ADDONS_ROLE_ACCESS: dict[str, re.Pattern[str]] = {
|
||||
ADDONS_ROLE_ACCESS: dict[str, re.Pattern] = {
|
||||
ROLE_DEFAULT: re.compile(
|
||||
r"^(?:"
|
||||
r"|/.+/info"
|
||||
@@ -180,9 +180,7 @@ class SecurityMiddleware(CoreSysAttributes):
|
||||
return unquoted
|
||||
|
||||
@middleware
|
||||
async def block_bad_requests(
|
||||
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
|
||||
) -> StreamResponse:
|
||||
async def block_bad_requests(self, request: Request, handler: Callable) -> Response:
|
||||
"""Process request and tblock commonly known exploit attempts."""
|
||||
if FILTERS.search(self._recursive_unquote(request.path)):
|
||||
_LOGGER.warning(
|
||||
@@ -200,9 +198,7 @@ class SecurityMiddleware(CoreSysAttributes):
|
||||
return await handler(request)
|
||||
|
||||
@middleware
|
||||
async def system_validation(
|
||||
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
|
||||
) -> StreamResponse:
|
||||
async def system_validation(self, request: Request, handler: Callable) -> Response:
|
||||
"""Check if core is ready to response."""
|
||||
if self.sys_core.state not in VALID_API_STATES:
|
||||
return api_return_error(
|
||||
@@ -212,9 +208,7 @@ class SecurityMiddleware(CoreSysAttributes):
|
||||
return await handler(request)
|
||||
|
||||
@middleware
|
||||
async def token_validation(
|
||||
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
|
||||
) -> StreamResponse:
|
||||
async def token_validation(self, request: Request, handler: Callable) -> Response:
|
||||
"""Check security access of this layer."""
|
||||
request_from: CoreSysAttributes | None = None
|
||||
supervisor_token = extract_supervisor_token(request)
|
||||
@@ -285,9 +279,7 @@ class SecurityMiddleware(CoreSysAttributes):
|
||||
raise HTTPForbidden()
|
||||
|
||||
@middleware
|
||||
async def core_proxy(
|
||||
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
|
||||
) -> StreamResponse:
|
||||
async def core_proxy(self, request: Request, handler: Callable) -> Response:
|
||||
"""Validate user from Core API proxy."""
|
||||
if (
|
||||
request[REQUEST_FROM] != self.sys_homeassistant
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
"""Init file for Supervisor network RESTful API."""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from ..const import (
|
||||
ATTR_AVAILABLE,
|
||||
ATTR_PROVIDERS,
|
||||
@@ -29,7 +25,7 @@ class APIServices(CoreSysAttributes):
|
||||
return service
|
||||
|
||||
@api_process
|
||||
async def list_services(self, request: web.Request) -> dict[str, Any]:
|
||||
async def list_services(self, request):
|
||||
"""Show register services."""
|
||||
services = []
|
||||
for service in self.sys_services.list_services:
|
||||
@@ -44,7 +40,7 @@ class APIServices(CoreSysAttributes):
|
||||
return {ATTR_SERVICES: services}
|
||||
|
||||
@api_process
|
||||
async def set_service(self, request: web.Request) -> None:
|
||||
async def set_service(self, request):
|
||||
"""Write data into a service."""
|
||||
service = self._extract_service(request)
|
||||
body = await api_validate(service.schema, request)
|
||||
@@ -54,7 +50,7 @@ class APIServices(CoreSysAttributes):
|
||||
await service.set_service_data(addon, body)
|
||||
|
||||
@api_process
|
||||
async def get_service(self, request: web.Request) -> dict[str, Any]:
|
||||
async def get_service(self, request):
|
||||
"""Read data into a service."""
|
||||
service = self._extract_service(request)
|
||||
|
||||
@@ -66,7 +62,7 @@ class APIServices(CoreSysAttributes):
|
||||
return service.get_service_data()
|
||||
|
||||
@api_process
|
||||
async def del_service(self, request: web.Request) -> None:
|
||||
async def del_service(self, request):
|
||||
"""Delete data into a service."""
|
||||
service = self._extract_service(request)
|
||||
addon = request[REQUEST_FROM]
|
||||
|
||||
@@ -53,7 +53,7 @@ from ..const import (
|
||||
REQUEST_FROM,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIError, APIForbidden, APINotFound, StoreAddonNotFoundError
|
||||
from ..exceptions import APIError, APIForbidden, APINotFound
|
||||
from ..store.addon import AddonStore
|
||||
from ..store.repository import Repository
|
||||
from ..store.validate import validate_repository
|
||||
@@ -104,7 +104,7 @@ class APIStore(CoreSysAttributes):
|
||||
addon_slug: str = request.match_info["addon"]
|
||||
|
||||
if not (addon := self.sys_addons.get(addon_slug)):
|
||||
raise StoreAddonNotFoundError(addon=addon_slug)
|
||||
raise APINotFound(f"Addon {addon_slug} does not exist")
|
||||
|
||||
if installed and not addon.is_installed:
|
||||
raise APIError(f"Addon {addon_slug} is not installed")
|
||||
@@ -112,7 +112,7 @@ class APIStore(CoreSysAttributes):
|
||||
if not installed and addon.is_installed:
|
||||
addon = cast(Addon, addon)
|
||||
if not addon.addon_store:
|
||||
raise StoreAddonNotFoundError(addon=addon_slug)
|
||||
raise APINotFound(f"Addon {addon_slug} does not exist in the store")
|
||||
return addon.addon_store
|
||||
|
||||
return addon
|
||||
@@ -349,13 +349,13 @@ class APIStore(CoreSysAttributes):
|
||||
return self._generate_repository_information(repository)
|
||||
|
||||
@api_process
|
||||
async def add_repository(self, request: web.Request) -> None:
|
||||
async def add_repository(self, request: web.Request):
|
||||
"""Add repository to the store."""
|
||||
body = await api_validate(SCHEMA_ADD_REPOSITORY, request)
|
||||
await asyncio.shield(self.sys_store.add_repository(body[ATTR_REPOSITORY]))
|
||||
|
||||
@api_process
|
||||
async def remove_repository(self, request: web.Request) -> None:
|
||||
async def remove_repository(self, request: web.Request):
|
||||
"""Remove repository from the store."""
|
||||
repository: Repository = self._extract_repository(request)
|
||||
await asyncio.shield(self.sys_store.remove_repository(repository))
|
||||
|
||||
@@ -80,7 +80,7 @@ class APISupervisor(CoreSysAttributes):
|
||||
"""Handle RESTful API for Supervisor functions."""
|
||||
|
||||
@api_process
|
||||
async def ping(self, request: web.Request) -> bool:
|
||||
async def ping(self, request):
|
||||
"""Return ok for signal that the API is ready."""
|
||||
return True
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Init file for Supervisor util for RESTful API."""
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Mapping
|
||||
from collections.abc import Callable
|
||||
import json
|
||||
from typing import Any, cast
|
||||
|
||||
@@ -26,7 +26,7 @@ from ..const import (
|
||||
RESULT_OK,
|
||||
)
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import APIError, DockerAPIError, HassioError
|
||||
from ..exceptions import APIError, BackupFileNotFoundError, DockerAPIError, HassioError
|
||||
from ..jobs import JobSchedulerOptions, SupervisorJob
|
||||
from ..utils import check_exception_chain, get_message_from_exception_chain
|
||||
from ..utils.json import json_dumps, json_loads as json_loads_util
|
||||
@@ -67,10 +67,10 @@ def api_process(method):
|
||||
"""Return API information."""
|
||||
try:
|
||||
answer = await method(*args, **kwargs)
|
||||
except BackupFileNotFoundError as err:
|
||||
return api_return_error(err, status=404)
|
||||
except APIError as err:
|
||||
return api_return_error(
|
||||
err, status=err.status, job_id=err.job_id, headers=err.headers
|
||||
)
|
||||
return api_return_error(err, status=err.status, job_id=err.job_id)
|
||||
except HassioError as err:
|
||||
return api_return_error(err)
|
||||
|
||||
@@ -139,7 +139,6 @@ def api_return_error(
|
||||
error_type: str | None = None,
|
||||
status: int = 400,
|
||||
*,
|
||||
headers: Mapping[str, str] | None = None,
|
||||
job_id: str | None = None,
|
||||
) -> web.Response:
|
||||
"""Return an API error message."""
|
||||
@@ -152,15 +151,10 @@ def api_return_error(
|
||||
|
||||
match error_type:
|
||||
case const.CONTENT_TYPE_TEXT:
|
||||
return web.Response(
|
||||
body=message, content_type=error_type, status=status, headers=headers
|
||||
)
|
||||
return web.Response(body=message, content_type=error_type, status=status)
|
||||
case const.CONTENT_TYPE_BINARY:
|
||||
return web.Response(
|
||||
body=message.encode(),
|
||||
content_type=error_type,
|
||||
status=status,
|
||||
headers=headers,
|
||||
body=message.encode(), content_type=error_type, status=status
|
||||
)
|
||||
case _:
|
||||
result: dict[str, Any] = {
|
||||
@@ -178,7 +172,6 @@ def api_return_error(
|
||||
result,
|
||||
status=status,
|
||||
dumps=json_dumps,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -9,10 +9,8 @@ from .addons.addon import Addon
|
||||
from .const import ATTR_PASSWORD, ATTR_TYPE, ATTR_USERNAME, FILE_HASSIO_AUTH
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .exceptions import (
|
||||
AuthHomeAssistantAPIValidationError,
|
||||
AuthInvalidNonStringValueError,
|
||||
AuthError,
|
||||
AuthListUsersError,
|
||||
AuthListUsersNoneResponseError,
|
||||
AuthPasswordResetError,
|
||||
HomeAssistantAPIError,
|
||||
HomeAssistantWSError,
|
||||
@@ -85,8 +83,10 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
self, addon: Addon, username: str | None, password: str | None
|
||||
) -> bool:
|
||||
"""Check username login."""
|
||||
if username is None or password is None:
|
||||
raise AuthInvalidNonStringValueError(_LOGGER.error)
|
||||
if password is None:
|
||||
raise AuthError("None as password is not supported!", _LOGGER.error)
|
||||
if username is None:
|
||||
raise AuthError("None as username is not supported!", _LOGGER.error)
|
||||
|
||||
_LOGGER.info("Auth request from '%s' for '%s'", addon.slug, username)
|
||||
|
||||
@@ -137,7 +137,7 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
finally:
|
||||
self._running.pop(username, None)
|
||||
|
||||
raise AuthHomeAssistantAPIValidationError()
|
||||
raise AuthError()
|
||||
|
||||
async def change_password(self, username: str, password: str) -> None:
|
||||
"""Change user password login."""
|
||||
@@ -155,7 +155,7 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
except HomeAssistantAPIError as err:
|
||||
_LOGGER.error("Can't request password reset on Home Assistant: %s", err)
|
||||
|
||||
raise AuthPasswordResetError(user=username)
|
||||
raise AuthPasswordResetError()
|
||||
|
||||
async def list_users(self) -> list[dict[str, Any]]:
|
||||
"""List users on the Home Assistant instance."""
|
||||
@@ -166,12 +166,15 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
{ATTR_TYPE: "config/auth/list"}
|
||||
)
|
||||
except HomeAssistantWSError as err:
|
||||
_LOGGER.error("Can't request listing users on Home Assistant: %s", err)
|
||||
raise AuthListUsersError() from err
|
||||
raise AuthListUsersError(
|
||||
f"Can't request listing users on Home Assistant: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
if users is not None:
|
||||
return users
|
||||
raise AuthListUsersNoneResponseError(_LOGGER.error)
|
||||
raise AuthListUsersError(
|
||||
"Can't request listing users on Home Assistant!", _LOGGER.error
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _rehash(value: str, salt2: str = "") -> str:
|
||||
|
||||
@@ -628,6 +628,9 @@ class Backup(JobGroup):
|
||||
if start_task := await self._addon_save(addon):
|
||||
start_tasks.append(start_task)
|
||||
except BackupError as err:
|
||||
err = BackupError(
|
||||
f"Can't backup add-on {addon.slug}: {str(err)}", _LOGGER.error
|
||||
)
|
||||
self.sys_jobs.current.capture_error(err)
|
||||
|
||||
return start_tasks
|
||||
|
||||
@@ -179,7 +179,6 @@ ATTR_DOCKER = "docker"
|
||||
ATTR_DOCKER_API = "docker_api"
|
||||
ATTR_DOCUMENTATION = "documentation"
|
||||
ATTR_DOMAINS = "domains"
|
||||
ATTR_DUPLICATE_LOG_FILE = "duplicate_log_file"
|
||||
ATTR_ENABLE = "enable"
|
||||
ATTR_ENABLE_IPV6 = "enable_ipv6"
|
||||
ATTR_ENABLED = "enabled"
|
||||
|
||||
@@ -250,7 +250,7 @@ class ConnectionType(StrEnum):
|
||||
WIRELESS = "802-11-wireless"
|
||||
|
||||
|
||||
class ConnectionState(IntEnum):
|
||||
class ConnectionStateType(IntEnum):
|
||||
"""Connection states.
|
||||
|
||||
https://networkmanager.dev/docs/api/latest/nm-dbus-types.html#NMActiveConnectionState
|
||||
|
||||
@@ -90,8 +90,8 @@ class Ip4Properties(IpProperties):
|
||||
class Ip6Properties(IpProperties):
|
||||
"""IPv6 properties object for Network Manager."""
|
||||
|
||||
addr_gen_mode: int | None
|
||||
ip6_privacy: int | None
|
||||
addr_gen_mode: int
|
||||
ip6_privacy: int
|
||||
dns: list[bytes] | None
|
||||
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@ from ..const import (
|
||||
DBUS_IFACE_CONNECTION_ACTIVE,
|
||||
DBUS_NAME_NM,
|
||||
DBUS_OBJECT_BASE,
|
||||
ConnectionState,
|
||||
ConnectionStateFlags,
|
||||
ConnectionStateType,
|
||||
)
|
||||
from ..interface import DBusInterfaceProxy, dbus_property
|
||||
from ..utils import dbus_connected
|
||||
@@ -67,9 +67,9 @@ class NetworkConnection(DBusInterfaceProxy):
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def state(self) -> ConnectionState:
|
||||
def state(self) -> ConnectionStateType:
|
||||
"""Return the state of the connection."""
|
||||
return ConnectionState(self.properties[DBUS_ATTR_STATE])
|
||||
return ConnectionStateType(self.properties[DBUS_ATTR_STATE])
|
||||
|
||||
@property
|
||||
def state_flags(self) -> set[ConnectionStateFlags]:
|
||||
|
||||
@@ -16,11 +16,7 @@ from ....host.const import (
|
||||
InterfaceType,
|
||||
MulticastDnsMode,
|
||||
)
|
||||
from ...const import (
|
||||
InterfaceAddrGenMode as NMInterfaceAddrGenMode,
|
||||
InterfaceIp6Privacy as NMInterfaceIp6Privacy,
|
||||
MulticastDnsValue,
|
||||
)
|
||||
from ...const import MulticastDnsValue
|
||||
from .. import NetworkManager
|
||||
from . import (
|
||||
CONF_ATTR_802_ETHERNET,
|
||||
@@ -122,41 +118,24 @@ def _get_ipv6_connection_settings(
|
||||
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "auto")
|
||||
if ipv6setting:
|
||||
if ipv6setting.addr_gen_mode == InterfaceAddrGenMode.EUI64:
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
|
||||
"i", NMInterfaceAddrGenMode.EUI64.value
|
||||
)
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 0)
|
||||
elif (
|
||||
not support_addr_gen_mode_defaults
|
||||
or ipv6setting.addr_gen_mode == InterfaceAddrGenMode.STABLE_PRIVACY
|
||||
):
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
|
||||
"i", NMInterfaceAddrGenMode.STABLE_PRIVACY.value
|
||||
)
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 1)
|
||||
elif ipv6setting.addr_gen_mode == InterfaceAddrGenMode.DEFAULT_OR_EUI64:
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
|
||||
"i", NMInterfaceAddrGenMode.DEFAULT_OR_EUI64.value
|
||||
)
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 2)
|
||||
else:
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
|
||||
"i", NMInterfaceAddrGenMode.DEFAULT.value
|
||||
)
|
||||
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 3)
|
||||
if ipv6setting.ip6_privacy == InterfaceIp6Privacy.DISABLED:
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
|
||||
"i", NMInterfaceIp6Privacy.DISABLED.value
|
||||
)
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 0)
|
||||
elif ipv6setting.ip6_privacy == InterfaceIp6Privacy.ENABLED_PREFER_PUBLIC:
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
|
||||
"i", NMInterfaceIp6Privacy.ENABLED_PREFER_PUBLIC.value
|
||||
)
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 1)
|
||||
elif ipv6setting.ip6_privacy == InterfaceIp6Privacy.ENABLED:
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
|
||||
"i", NMInterfaceIp6Privacy.ENABLED.value
|
||||
)
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 2)
|
||||
else:
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
|
||||
"i", NMInterfaceIp6Privacy.DEFAULT.value
|
||||
)
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", -1)
|
||||
elif ipv6setting.method == InterfaceMethod.DISABLED:
|
||||
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "link-local")
|
||||
elif ipv6setting.method == InterfaceMethod.STATIC:
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
from ipaddress import IPv4Address
|
||||
import logging
|
||||
@@ -36,7 +35,6 @@ from ..coresys import CoreSys
|
||||
from ..exceptions import (
|
||||
CoreDNSError,
|
||||
DBusError,
|
||||
DockerBuildError,
|
||||
DockerError,
|
||||
DockerJobError,
|
||||
DockerNotFound,
|
||||
@@ -684,12 +682,13 @@ class DockerAddon(DockerInterface):
|
||||
async def _build(self, version: AwesomeVersion, image: str | None = None) -> None:
|
||||
"""Build a Docker container."""
|
||||
build_env = await AddonBuild(self.coresys, self.addon).load_config()
|
||||
# Check if the build environment is valid, raises if not
|
||||
await build_env.is_valid()
|
||||
if not await build_env.is_valid():
|
||||
_LOGGER.error("Invalid build environment, can't build this add-on!")
|
||||
raise DockerError()
|
||||
|
||||
_LOGGER.info("Starting build for %s:%s", self.image, version)
|
||||
|
||||
def build_image() -> tuple[str, str]:
|
||||
def build_image():
|
||||
if build_env.squash:
|
||||
_LOGGER.warning(
|
||||
"Ignoring squash build option for %s as Docker BuildKit does not support it.",
|
||||
@@ -762,9 +761,8 @@ class DockerAddon(DockerInterface):
|
||||
requests.RequestException,
|
||||
aiodocker.DockerError,
|
||||
) as err:
|
||||
raise DockerBuildError(
|
||||
f"Can't build {self.image}:{version}: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
_LOGGER.error("Can't build %s:%s: %s", self.image, version, err)
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Build %s:%s done", self.image, version)
|
||||
|
||||
@@ -822,9 +820,12 @@ class DockerAddon(DockerInterface):
|
||||
on_condition=DockerJobError,
|
||||
concurrency=JobConcurrency.GROUP_REJECT,
|
||||
)
|
||||
def write_stdin(self, data: bytes) -> Awaitable[None]:
|
||||
async def write_stdin(self, data: bytes) -> None:
|
||||
"""Write to add-on stdin."""
|
||||
return self.sys_run_in_executor(self._write_stdin, data)
|
||||
if not await self.is_running():
|
||||
raise DockerError()
|
||||
|
||||
await self.sys_run_in_executor(self._write_stdin, data)
|
||||
|
||||
def _write_stdin(self, data: bytes) -> None:
|
||||
"""Write to add-on stdin.
|
||||
|
||||
@@ -2,25 +2,19 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import suppress
|
||||
from enum import Enum, StrEnum
|
||||
from functools import total_ordering
|
||||
from enum import StrEnum
|
||||
from pathlib import PurePath
|
||||
import re
|
||||
from typing import cast
|
||||
|
||||
from docker.types import Mount
|
||||
|
||||
from ..const import MACHINE_ID
|
||||
|
||||
RE_RETRYING_DOWNLOAD_STATUS = re.compile(r"Retrying in \d+ seconds?")
|
||||
# Docker Hub registry identifier
|
||||
DOCKER_HUB = "hub.docker.com"
|
||||
|
||||
# Docker Hub registry identifier (official default)
|
||||
# Docker's default registry is docker.io
|
||||
DOCKER_HUB = "docker.io"
|
||||
|
||||
# Legacy Docker Hub identifier for backward compatibility
|
||||
DOCKER_HUB_LEGACY = "hub.docker.com"
|
||||
# Regex to match images with a registry host (e.g., ghcr.io/org/image)
|
||||
IMAGE_WITH_HOST = re.compile(r"^((?:[a-z0-9]+(?:-[a-z0-9]+)*\.)+[a-z]{2,})\/.+")
|
||||
|
||||
|
||||
class Capabilities(StrEnum):
|
||||
@@ -82,58 +76,6 @@ class PropagationMode(StrEnum):
|
||||
RSLAVE = "rslave"
|
||||
|
||||
|
||||
@total_ordering
|
||||
class PullImageLayerStage(Enum):
|
||||
"""Job stages for pulling an image layer.
|
||||
|
||||
These are a subset of the statuses in a docker pull image log. They
|
||||
are the standardized ones that are the most useful to us.
|
||||
"""
|
||||
|
||||
PULLING_FS_LAYER = 1, "Pulling fs layer"
|
||||
RETRYING_DOWNLOAD = 2, "Retrying download"
|
||||
DOWNLOADING = 2, "Downloading"
|
||||
VERIFYING_CHECKSUM = 3, "Verifying Checksum"
|
||||
DOWNLOAD_COMPLETE = 4, "Download complete"
|
||||
EXTRACTING = 5, "Extracting"
|
||||
PULL_COMPLETE = 6, "Pull complete"
|
||||
|
||||
def __init__(self, order: int, status: str) -> None:
|
||||
"""Set fields from values."""
|
||||
self.order = order
|
||||
self.status = status
|
||||
|
||||
def __eq__(self, value: object, /) -> bool:
|
||||
"""Check equality, allow StrEnum style comparisons on status."""
|
||||
with suppress(AttributeError):
|
||||
return self.status == cast(PullImageLayerStage, value).status
|
||||
return self.status == value
|
||||
|
||||
def __lt__(self, other: object) -> bool:
|
||||
"""Order instances."""
|
||||
with suppress(AttributeError):
|
||||
return self.order < cast(PullImageLayerStage, other).order
|
||||
return False
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Hash instance."""
|
||||
return hash(self.status)
|
||||
|
||||
@classmethod
|
||||
def from_status(cls, status: str) -> PullImageLayerStage | None:
|
||||
"""Return stage instance from pull log status."""
|
||||
for i in cls:
|
||||
if i.status == status:
|
||||
return i
|
||||
|
||||
# This one includes number of seconds until download so its not constant
|
||||
if RE_RETRYING_DOWNLOAD_STATUS.match(status):
|
||||
return cls.RETRYING_DOWNLOAD
|
||||
|
||||
return None
|
||||
|
||||
|
||||
ENV_DUPLICATE_LOG_FILE = "HA_DUPLICATE_LOG_FILE"
|
||||
ENV_TIME = "TZ"
|
||||
ENV_TOKEN = "SUPERVISOR_TOKEN"
|
||||
ENV_TOKEN_OLD = "HASSIO_TOKEN"
|
||||
|
||||
@@ -14,7 +14,6 @@ from ..homeassistant.const import LANDINGPAGE
|
||||
from ..jobs.const import JobConcurrency
|
||||
from ..jobs.decorator import Job
|
||||
from .const import (
|
||||
ENV_DUPLICATE_LOG_FILE,
|
||||
ENV_TIME,
|
||||
ENV_TOKEN,
|
||||
ENV_TOKEN_OLD,
|
||||
@@ -175,8 +174,6 @@ class DockerHomeAssistant(DockerInterface):
|
||||
}
|
||||
if restore_job_id:
|
||||
environment[ENV_RESTORE_JOB_ID] = restore_job_id
|
||||
if self.sys_homeassistant.duplicate_log_file:
|
||||
environment[ENV_DUPLICATE_LOG_FILE] = "1"
|
||||
await self._run(
|
||||
tag=(self.sys_homeassistant.version),
|
||||
name=self.name,
|
||||
|
||||
@@ -19,7 +19,6 @@ import docker
|
||||
from docker.models.containers import Container
|
||||
import requests
|
||||
|
||||
from ..bus import EventListener
|
||||
from ..const import (
|
||||
ATTR_PASSWORD,
|
||||
ATTR_REGISTRY,
|
||||
@@ -35,25 +34,18 @@ from ..exceptions import (
|
||||
DockerError,
|
||||
DockerHubRateLimitExceeded,
|
||||
DockerJobError,
|
||||
DockerLogOutOfOrder,
|
||||
DockerNotFound,
|
||||
DockerRequestError,
|
||||
)
|
||||
from ..jobs import SupervisorJob
|
||||
from ..jobs.const import JOB_GROUP_DOCKER_INTERFACE, JobConcurrency
|
||||
from ..jobs.decorator import Job
|
||||
from ..jobs.job_group import JobGroup
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import (
|
||||
DOCKER_HUB,
|
||||
DOCKER_HUB_LEGACY,
|
||||
ContainerState,
|
||||
PullImageLayerStage,
|
||||
RestartPolicy,
|
||||
)
|
||||
from .const import DOCKER_HUB, ContainerState, RestartPolicy
|
||||
from .manager import CommandReturn, PullLogEntry
|
||||
from .monitor import DockerContainerStateEvent
|
||||
from .pull_progress import ImagePullProgress
|
||||
from .stats import DockerStats
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@@ -190,8 +182,7 @@ class DockerInterface(JobGroup, ABC):
|
||||
stored = self.sys_docker.config.registries[registry]
|
||||
credentials[ATTR_USERNAME] = stored[ATTR_USERNAME]
|
||||
credentials[ATTR_PASSWORD] = stored[ATTR_PASSWORD]
|
||||
# Don't include registry for Docker Hub (both official and legacy)
|
||||
if registry not in (DOCKER_HUB, DOCKER_HUB_LEGACY):
|
||||
if registry != DOCKER_HUB:
|
||||
credentials[ATTR_REGISTRY] = registry
|
||||
|
||||
_LOGGER.debug(
|
||||
@@ -202,159 +193,6 @@ class DockerInterface(JobGroup, ABC):
|
||||
|
||||
return credentials
|
||||
|
||||
def _process_pull_image_log( # noqa: C901
|
||||
self, install_job_id: str, reference: PullLogEntry
|
||||
) -> None:
|
||||
"""Process events fired from a docker while pulling an image, filtered to a given job id."""
|
||||
if (
|
||||
reference.job_id != install_job_id
|
||||
or not reference.id
|
||||
or not reference.status
|
||||
or not (stage := PullImageLayerStage.from_status(reference.status))
|
||||
):
|
||||
return
|
||||
|
||||
# Pulling FS Layer is our marker for a layer that needs to be downloaded and extracted. Otherwise it already exists and we can ignore
|
||||
job: SupervisorJob | None = None
|
||||
if stage == PullImageLayerStage.PULLING_FS_LAYER:
|
||||
job = self.sys_jobs.new_job(
|
||||
name="Pulling container image layer",
|
||||
initial_stage=stage.status,
|
||||
reference=reference.id,
|
||||
parent_id=install_job_id,
|
||||
internal=True,
|
||||
)
|
||||
job.done = False
|
||||
return
|
||||
|
||||
# Find our sub job to update details of
|
||||
for j in self.sys_jobs.jobs:
|
||||
if j.parent_id == install_job_id and j.reference == reference.id:
|
||||
job = j
|
||||
break
|
||||
|
||||
# There should no longer be any real risk of logs out of order anymore.
|
||||
# However tests with very small images have shown that sometimes Docker
|
||||
# skips stages in log. So keeping this one as a safety check on null job
|
||||
if not job:
|
||||
raise DockerLogOutOfOrder(
|
||||
f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {install_job_id} but could not find a matching job, skipping",
|
||||
_LOGGER.debug,
|
||||
)
|
||||
|
||||
# For progress calculation we assume downloading is 70% of time, extracting is 30% and others stages negligible
|
||||
progress = job.progress
|
||||
match stage:
|
||||
case PullImageLayerStage.DOWNLOADING | PullImageLayerStage.EXTRACTING:
|
||||
if (
|
||||
reference.progress_detail
|
||||
and reference.progress_detail.current
|
||||
and reference.progress_detail.total
|
||||
):
|
||||
progress = (
|
||||
reference.progress_detail.current
|
||||
/ reference.progress_detail.total
|
||||
)
|
||||
if stage == PullImageLayerStage.DOWNLOADING:
|
||||
progress = 70 * progress
|
||||
else:
|
||||
progress = 70 + 30 * progress
|
||||
case (
|
||||
PullImageLayerStage.VERIFYING_CHECKSUM
|
||||
| PullImageLayerStage.DOWNLOAD_COMPLETE
|
||||
):
|
||||
progress = 70
|
||||
case PullImageLayerStage.PULL_COMPLETE:
|
||||
progress = 100
|
||||
case PullImageLayerStage.RETRYING_DOWNLOAD:
|
||||
progress = 0
|
||||
|
||||
# No real risk of getting things out of order in current implementation
|
||||
# but keeping this one in case another change to these trips us up.
|
||||
if stage != PullImageLayerStage.RETRYING_DOWNLOAD and progress < job.progress:
|
||||
raise DockerLogOutOfOrder(
|
||||
f"Received pull image log with status {reference.status} for job {job.uuid} that implied progress was {progress} but current progress is {job.progress}, skipping",
|
||||
_LOGGER.debug,
|
||||
)
|
||||
|
||||
# Our filters have all passed. Time to update the job
|
||||
# Only downloading and extracting have progress details. Use that to set extra
|
||||
# We'll leave it around on later stages as the total bytes may be useful after that stage
|
||||
# Enforce range to prevent float drift error
|
||||
progress = max(0, min(progress, 100))
|
||||
if (
|
||||
stage in {PullImageLayerStage.DOWNLOADING, PullImageLayerStage.EXTRACTING}
|
||||
and reference.progress_detail
|
||||
and reference.progress_detail.current is not None
|
||||
and reference.progress_detail.total is not None
|
||||
):
|
||||
job.update(
|
||||
progress=progress,
|
||||
stage=stage.status,
|
||||
extra={
|
||||
"current": reference.progress_detail.current,
|
||||
"total": reference.progress_detail.total,
|
||||
},
|
||||
)
|
||||
else:
|
||||
# If we reach DOWNLOAD_COMPLETE without ever having set extra (small layers that skip
|
||||
# the downloading phase), set a minimal extra so aggregate progress calculation can proceed
|
||||
extra = job.extra
|
||||
if stage == PullImageLayerStage.DOWNLOAD_COMPLETE and not job.extra:
|
||||
extra = {"current": 1, "total": 1}
|
||||
|
||||
job.update(
|
||||
progress=progress,
|
||||
stage=stage.status,
|
||||
done=stage == PullImageLayerStage.PULL_COMPLETE,
|
||||
extra=None if stage == PullImageLayerStage.RETRYING_DOWNLOAD else extra,
|
||||
)
|
||||
|
||||
# Once we have received a progress update for every child job, start to set status of the main one
|
||||
install_job = self.sys_jobs.get_job(install_job_id)
|
||||
layer_jobs = [
|
||||
job
|
||||
for job in self.sys_jobs.jobs
|
||||
if job.parent_id == install_job.uuid
|
||||
and job.name == "Pulling container image layer"
|
||||
]
|
||||
|
||||
# First set the total bytes to be downloaded/extracted on the main job
|
||||
if not install_job.extra:
|
||||
total = 0
|
||||
for job in layer_jobs:
|
||||
if not job.extra:
|
||||
return
|
||||
total += job.extra["total"]
|
||||
install_job.extra = {"total": total}
|
||||
else:
|
||||
total = install_job.extra["total"]
|
||||
|
||||
# Then determine total progress based on progress of each sub-job, factoring in size of each compared to total
|
||||
progress = 0.0
|
||||
stage = PullImageLayerStage.PULL_COMPLETE
|
||||
for job in layer_jobs:
|
||||
if not job.extra or not job.extra.get("total"):
|
||||
return
|
||||
progress += job.progress * (job.extra["total"] / total)
|
||||
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
|
||||
|
||||
if job_stage < PullImageLayerStage.EXTRACTING:
|
||||
stage = PullImageLayerStage.DOWNLOADING
|
||||
elif (
|
||||
stage == PullImageLayerStage.PULL_COMPLETE
|
||||
and job_stage < PullImageLayerStage.PULL_COMPLETE
|
||||
):
|
||||
stage = PullImageLayerStage.EXTRACTING
|
||||
|
||||
# Ensure progress is 100 at this point to prevent float drift
|
||||
if stage == PullImageLayerStage.PULL_COMPLETE:
|
||||
progress = 100
|
||||
|
||||
# To reduce noise, limit updates to when result has changed by an entire percent or when stage changed
|
||||
if stage != install_job.stage or progress >= install_job.progress + 1:
|
||||
install_job.update(stage=stage.status, progress=max(0, min(progress, 100)))
|
||||
|
||||
@Job(
|
||||
name="docker_interface_install",
|
||||
on_condition=DockerJobError,
|
||||
@@ -374,30 +212,35 @@ class DockerInterface(JobGroup, ABC):
|
||||
raise ValueError("Cannot pull without an image!")
|
||||
|
||||
image_arch = arch or self.sys_arch.supervisor
|
||||
listener: EventListener | None = None
|
||||
pull_progress = ImagePullProgress()
|
||||
current_job = self.sys_jobs.current
|
||||
|
||||
async def process_pull_event(event: PullLogEntry) -> None:
|
||||
"""Process pull event and update job progress."""
|
||||
if event.job_id != current_job.uuid:
|
||||
return
|
||||
|
||||
# Process event through progress tracker
|
||||
pull_progress.process_event(event)
|
||||
|
||||
# Update job if progress changed significantly (>= 1%)
|
||||
should_update, progress = pull_progress.should_update_job()
|
||||
if should_update:
|
||||
stage = pull_progress.get_stage()
|
||||
current_job.update(progress=progress, stage=stage)
|
||||
|
||||
listener = self.sys_bus.register_event(
|
||||
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_event
|
||||
)
|
||||
|
||||
_LOGGER.info("Downloading docker image %s with tag %s.", image, version)
|
||||
try:
|
||||
# Get credentials for private registries to pass to aiodocker
|
||||
credentials = self._get_credentials(image) or None
|
||||
|
||||
curr_job_id = self.sys_jobs.current.uuid
|
||||
|
||||
async def process_pull_image_log(reference: PullLogEntry) -> None:
|
||||
try:
|
||||
self._process_pull_image_log(curr_job_id, reference)
|
||||
except DockerLogOutOfOrder as err:
|
||||
# Send all these to sentry. Missing a few progress updates
|
||||
# shouldn't matter to users but matters to us
|
||||
await async_capture_exception(err)
|
||||
|
||||
listener = self.sys_bus.register_event(
|
||||
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_image_log
|
||||
)
|
||||
|
||||
# Pull new image, passing credentials to aiodocker
|
||||
docker_image = await self.sys_docker.pull_image(
|
||||
self.sys_jobs.current.uuid,
|
||||
current_job.uuid,
|
||||
image,
|
||||
str(version),
|
||||
platform=MAP_ARCH[image_arch],
|
||||
@@ -445,8 +288,7 @@ class DockerInterface(JobGroup, ABC):
|
||||
f"Unknown error with {image}:{version!s} -> {err!s}", _LOGGER.error
|
||||
) from err
|
||||
finally:
|
||||
if listener:
|
||||
self.sys_bus.remove_listener(listener)
|
||||
self.sys_bus.remove_listener(listener)
|
||||
|
||||
self._meta = docker_image
|
||||
|
||||
@@ -457,34 +299,35 @@ class DockerInterface(JobGroup, ABC):
|
||||
return True
|
||||
return False
|
||||
|
||||
async def _get_container(self) -> Container | None:
|
||||
"""Get docker container, returns None if not found."""
|
||||
async def is_running(self) -> bool:
|
||||
"""Return True if Docker is running."""
|
||||
try:
|
||||
return await self.sys_run_in_executor(
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return None
|
||||
return False
|
||||
except docker.errors.DockerException as err:
|
||||
raise DockerAPIError(
|
||||
f"Docker API error occurred while getting container information: {err!s}"
|
||||
) from err
|
||||
raise DockerAPIError() from err
|
||||
except requests.RequestException as err:
|
||||
raise DockerRequestError(
|
||||
f"Error communicating with Docker to get container information: {err!s}"
|
||||
) from err
|
||||
raise DockerRequestError() from err
|
||||
|
||||
async def is_running(self) -> bool:
|
||||
"""Return True if Docker is running."""
|
||||
if docker_container := await self._get_container():
|
||||
return docker_container.status == "running"
|
||||
return False
|
||||
return docker_container.status == "running"
|
||||
|
||||
async def current_state(self) -> ContainerState:
|
||||
"""Return current state of container."""
|
||||
if docker_container := await self._get_container():
|
||||
return _container_state_from_model(docker_container)
|
||||
return ContainerState.UNKNOWN
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return ContainerState.UNKNOWN
|
||||
except docker.errors.DockerException as err:
|
||||
raise DockerAPIError() from err
|
||||
except requests.RequestException as err:
|
||||
raise DockerRequestError() from err
|
||||
|
||||
return _container_state_from_model(docker_container)
|
||||
|
||||
@Job(name="docker_interface_attach", concurrency=JobConcurrency.GROUP_QUEUE)
|
||||
async def attach(
|
||||
@@ -519,9 +362,7 @@ class DockerInterface(JobGroup, ABC):
|
||||
|
||||
# Successful?
|
||||
if not self._meta:
|
||||
raise DockerError(
|
||||
f"Could not get metadata on container or image for {self.name}"
|
||||
)
|
||||
raise DockerError()
|
||||
_LOGGER.info("Attaching to %s with version %s", self.image, self.version)
|
||||
|
||||
@Job(
|
||||
@@ -724,8 +565,14 @@ class DockerInterface(JobGroup, ABC):
|
||||
|
||||
async def is_failed(self) -> bool:
|
||||
"""Return True if Docker is failing state."""
|
||||
if not (docker_container := await self._get_container()):
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return False
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
# container is not running
|
||||
if docker_container.status != "exited":
|
||||
|
||||
@@ -49,10 +49,9 @@ from ..exceptions import (
|
||||
)
|
||||
from ..utils.common import FileConfiguration
|
||||
from ..validate import SCHEMA_DOCKER_CONFIG
|
||||
from .const import DOCKER_HUB, DOCKER_HUB_LEGACY, LABEL_MANAGED
|
||||
from .const import DOCKER_HUB, IMAGE_WITH_HOST, LABEL_MANAGED
|
||||
from .monitor import DockerMonitor
|
||||
from .network import DockerNetwork
|
||||
from .utils import get_registry_from_image
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -213,25 +212,19 @@ class DockerConfig(FileConfiguration):
|
||||
|
||||
Matches the image against configured registries and returns the registry
|
||||
name if found, or None if no matching credentials are configured.
|
||||
|
||||
Uses Docker's domain detection logic from:
|
||||
vendor/github.com/distribution/reference/normalize.go
|
||||
"""
|
||||
if not self.registries:
|
||||
return None
|
||||
|
||||
# Check if image uses a custom registry (e.g., ghcr.io/org/image)
|
||||
registry = get_registry_from_image(image)
|
||||
if registry:
|
||||
matcher = IMAGE_WITH_HOST.match(image)
|
||||
if matcher:
|
||||
registry = matcher.group(1)
|
||||
if registry in self.registries:
|
||||
return registry
|
||||
else:
|
||||
# No registry prefix means Docker Hub
|
||||
# Support both docker.io (official) and hub.docker.com (legacy)
|
||||
if DOCKER_HUB in self.registries:
|
||||
return DOCKER_HUB
|
||||
if DOCKER_HUB_LEGACY in self.registries:
|
||||
return DOCKER_HUB_LEGACY
|
||||
# If no registry prefix, check for Docker Hub credentials
|
||||
elif DOCKER_HUB in self.registries:
|
||||
return DOCKER_HUB
|
||||
|
||||
return None
|
||||
|
||||
@@ -474,10 +467,8 @@ class DockerAPI(CoreSysAttributes):
|
||||
raises only if the get fails afterwards. Additionally it fires progress reports for the pull
|
||||
on the bus so listeners can use that to update status for users.
|
||||
"""
|
||||
# Use timeout=None to disable timeout for pull operations, matching docker-py behavior.
|
||||
# aiodocker converts None to ClientTimeout(total=None) which disables the timeout.
|
||||
async for e in self.images.pull(
|
||||
repository, tag=tag, platform=platform, auth=auth, stream=True, timeout=None
|
||||
repository, tag=tag, platform=platform, auth=auth, stream=True
|
||||
):
|
||||
entry = PullLogEntry.from_pull_log_dict(job_id, e)
|
||||
if entry.error:
|
||||
@@ -617,15 +608,9 @@ class DockerAPI(CoreSysAttributes):
|
||||
except aiodocker.DockerError as err:
|
||||
if err.status == HTTPStatus.NOT_FOUND:
|
||||
return False
|
||||
raise DockerError(
|
||||
f"Could not get container {name} or image {image}:{version} to check state: {err!s}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
raise DockerError() from err
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Could not get container {name} or image {image}:{version} to check state: {err!s}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
raise DockerError() from err
|
||||
|
||||
# Check the image is correct and state is good
|
||||
return (
|
||||
@@ -641,13 +626,9 @@ class DockerAPI(CoreSysAttributes):
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except docker_errors.NotFound:
|
||||
# Generally suppressed so we don't log this
|
||||
raise DockerNotFound() from None
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Could not get container {name} for stopping: {err!s}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
raise DockerError() from err
|
||||
|
||||
if docker_container.status == "running":
|
||||
_LOGGER.info("Stopping %s application", name)
|
||||
@@ -687,13 +668,9 @@ class DockerAPI(CoreSysAttributes):
|
||||
try:
|
||||
container: Container = self.containers.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound(
|
||||
f"Container {name} not found for restarting", _LOGGER.warning
|
||||
) from None
|
||||
raise DockerNotFound() from None
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Could not get container {name} for restarting: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Restarting %s", name)
|
||||
try:
|
||||
@@ -706,13 +683,9 @@ class DockerAPI(CoreSysAttributes):
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound(
|
||||
f"Container {name} not found for logs", _LOGGER.warning
|
||||
) from None
|
||||
raise DockerNotFound() from None
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Could not get container {name} for logs: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
raise DockerError() from err
|
||||
|
||||
try:
|
||||
return docker_container.logs(tail=tail, stdout=True, stderr=True)
|
||||
@@ -726,13 +699,9 @@ class DockerAPI(CoreSysAttributes):
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound(
|
||||
f"Container {name} not found for stats", _LOGGER.warning
|
||||
) from None
|
||||
raise DockerNotFound() from None
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Could not inspect container '{name}': {err!s}", _LOGGER.error
|
||||
) from err
|
||||
raise DockerError() from err
|
||||
|
||||
# container is not running
|
||||
if docker_container.status != "running":
|
||||
@@ -751,21 +720,15 @@ class DockerAPI(CoreSysAttributes):
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound(
|
||||
f"Container {name} not found for running command", _LOGGER.warning
|
||||
) from None
|
||||
raise DockerNotFound() from None
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't get container {name} to run command: {err!s}"
|
||||
) from err
|
||||
raise DockerError() from err
|
||||
|
||||
# Execute
|
||||
try:
|
||||
code, output = docker_container.exec_run(command)
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't run command in container {name}: {err!s}"
|
||||
) from err
|
||||
raise DockerError() from err
|
||||
|
||||
return CommandReturn(code, output)
|
||||
|
||||
@@ -798,7 +761,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
"""Import a tar file as image."""
|
||||
try:
|
||||
with tar_file.open("rb") as read_tar:
|
||||
resp: list[dict[str, Any]] = await self.images.import_image(read_tar)
|
||||
resp: list[dict[str, Any]] = self.images.import_image(read_tar)
|
||||
except (aiodocker.DockerError, OSError) as err:
|
||||
raise DockerError(
|
||||
f"Can't import image from tar: {err}", _LOGGER.error
|
||||
|
||||
316
supervisor/docker/pull_progress.py
Normal file
316
supervisor/docker/pull_progress.py
Normal file
@@ -0,0 +1,316 @@
|
||||
"""Image pull progress tracking."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import suppress
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .manager import PullLogEntry
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
# Progress weight distribution: 70% downloading, 30% extraction
|
||||
DOWNLOAD_WEIGHT = 70.0
|
||||
EXTRACT_WEIGHT = 30.0
|
||||
|
||||
|
||||
class LayerPullStatus(Enum):
|
||||
"""Status values for pulling an image layer.
|
||||
|
||||
These are a subset of the statuses in a docker pull image log.
|
||||
The order field allows comparing which stage is further along.
|
||||
"""
|
||||
|
||||
PULLING_FS_LAYER = 1, "Pulling fs layer"
|
||||
WAITING = 1, "Waiting"
|
||||
RETRYING = 2, "Retrying" # Matches "Retrying in N seconds"
|
||||
DOWNLOADING = 3, "Downloading"
|
||||
VERIFYING_CHECKSUM = 4, "Verifying Checksum"
|
||||
DOWNLOAD_COMPLETE = 5, "Download complete"
|
||||
EXTRACTING = 6, "Extracting"
|
||||
PULL_COMPLETE = 7, "Pull complete"
|
||||
ALREADY_EXISTS = 7, "Already exists"
|
||||
|
||||
def __init__(self, order: int, status: str) -> None:
|
||||
"""Set fields from values."""
|
||||
self.order = order
|
||||
self.status = status
|
||||
|
||||
def __eq__(self, value: object, /) -> bool:
|
||||
"""Check equality, allow string comparisons on status."""
|
||||
with suppress(AttributeError):
|
||||
return self.status == cast(LayerPullStatus, value).status
|
||||
return self.status == value
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Return hash based on status string."""
|
||||
return hash(self.status)
|
||||
|
||||
def __lt__(self, other: object) -> bool:
|
||||
"""Order instances by stage progression."""
|
||||
with suppress(AttributeError):
|
||||
return self.order < cast(LayerPullStatus, other).order
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def from_status(cls, status: str) -> LayerPullStatus | None:
|
||||
"""Get enum from status string, or None if not recognized."""
|
||||
# Handle "Retrying in N seconds" pattern
|
||||
if status.startswith("Retrying in "):
|
||||
return cls.RETRYING
|
||||
for member in cls:
|
||||
if member.status == status:
|
||||
return member
|
||||
return None
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayerProgress:
|
||||
"""Track progress of a single layer."""
|
||||
|
||||
layer_id: str
|
||||
total_size: int = 0 # Size in bytes (from downloading, reused for extraction)
|
||||
download_current: int = 0
|
||||
extract_current: int = 0 # Extraction progress in bytes (overlay2 only)
|
||||
download_complete: bool = False
|
||||
extract_complete: bool = False
|
||||
already_exists: bool = False # Layer was already locally available
|
||||
|
||||
def calculate_progress(self) -> float:
|
||||
"""Calculate layer progress 0-100.
|
||||
|
||||
Progress is weighted: 70% download, 30% extraction.
|
||||
For overlay2, we have byte-based extraction progress.
|
||||
For containerd, extraction jumps from 70% to 100% on completion.
|
||||
"""
|
||||
if self.already_exists or self.extract_complete:
|
||||
return 100.0
|
||||
|
||||
if self.download_complete:
|
||||
# Check if we have extraction progress (overlay2)
|
||||
if self.extract_current > 0 and self.total_size > 0:
|
||||
extract_pct = min(1.0, self.extract_current / self.total_size)
|
||||
return DOWNLOAD_WEIGHT + (extract_pct * EXTRACT_WEIGHT)
|
||||
# No extraction progress yet - return 70%
|
||||
return DOWNLOAD_WEIGHT
|
||||
|
||||
if self.total_size > 0:
|
||||
download_pct = min(1.0, self.download_current / self.total_size)
|
||||
return download_pct * DOWNLOAD_WEIGHT
|
||||
|
||||
return 0.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImagePullProgress:
|
||||
"""Track overall progress of pulling an image.
|
||||
|
||||
Uses count-based progress where each layer contributes equally regardless of size.
|
||||
This avoids progress regression when large layers are discovered late due to
|
||||
Docker's rate-limiting of concurrent downloads.
|
||||
|
||||
Progress is only reported after the first "Downloading" event, since Docker
|
||||
sends "Already exists" and "Pulling fs layer" events before we know the full
|
||||
layer count.
|
||||
"""
|
||||
|
||||
layers: dict[str, LayerProgress] = field(default_factory=dict)
|
||||
_last_reported_progress: float = field(default=0.0, repr=False)
|
||||
_seen_downloading: bool = field(default=False, repr=False)
|
||||
|
||||
def get_or_create_layer(self, layer_id: str) -> LayerProgress:
|
||||
"""Get existing layer or create new one."""
|
||||
if layer_id not in self.layers:
|
||||
self.layers[layer_id] = LayerProgress(layer_id=layer_id)
|
||||
return self.layers[layer_id]
|
||||
|
||||
def process_event(self, entry: PullLogEntry) -> None:
|
||||
"""Process a pull log event and update layer state."""
|
||||
# Skip events without layer ID or status
|
||||
if not entry.id or not entry.status:
|
||||
return
|
||||
|
||||
# Skip metadata events that aren't layer-specific
|
||||
# "Pulling from X" has id=tag but isn't a layer
|
||||
if entry.status.startswith("Pulling from "):
|
||||
return
|
||||
|
||||
# Parse status to enum (returns None for unrecognized statuses)
|
||||
status = LayerPullStatus.from_status(entry.status)
|
||||
if status is None:
|
||||
return
|
||||
|
||||
layer = self.get_or_create_layer(entry.id)
|
||||
|
||||
# Handle "Already exists" - layer is locally available
|
||||
if status is LayerPullStatus.ALREADY_EXISTS:
|
||||
layer.already_exists = True
|
||||
layer.download_complete = True
|
||||
layer.extract_complete = True
|
||||
return
|
||||
|
||||
# Handle "Pulling fs layer" / "Waiting" - layer is being tracked
|
||||
if status in (LayerPullStatus.PULLING_FS_LAYER, LayerPullStatus.WAITING):
|
||||
return
|
||||
|
||||
# Handle "Downloading" - update download progress
|
||||
if status is LayerPullStatus.DOWNLOADING:
|
||||
# Mark that we've seen downloading - now we know layer count is complete
|
||||
self._seen_downloading = True
|
||||
if (
|
||||
entry.progress_detail
|
||||
and entry.progress_detail.current is not None
|
||||
and entry.progress_detail.total is not None
|
||||
):
|
||||
layer.download_current = entry.progress_detail.current
|
||||
# Only set total_size if not already set or if this is larger
|
||||
# (handles case where total changes during download)
|
||||
layer.total_size = max(layer.total_size, entry.progress_detail.total)
|
||||
return
|
||||
|
||||
# Handle "Verifying Checksum" - download is essentially complete
|
||||
if status is LayerPullStatus.VERIFYING_CHECKSUM:
|
||||
if layer.total_size > 0:
|
||||
layer.download_current = layer.total_size
|
||||
return
|
||||
|
||||
# Handle "Download complete" - download phase done
|
||||
if status is LayerPullStatus.DOWNLOAD_COMPLETE:
|
||||
layer.download_complete = True
|
||||
if layer.total_size > 0:
|
||||
layer.download_current = layer.total_size
|
||||
elif layer.total_size == 0:
|
||||
# Small layer that skipped downloading phase
|
||||
# Set minimal size so it doesn't distort weighted average
|
||||
layer.total_size = 1
|
||||
layer.download_current = 1
|
||||
return
|
||||
|
||||
# Handle "Extracting" - extraction in progress
|
||||
if status is LayerPullStatus.EXTRACTING:
|
||||
# For overlay2: progressDetail has {current, total} in bytes
|
||||
# For containerd: progressDetail has {current, units: "s"} (time elapsed)
|
||||
# We can only use byte-based progress (overlay2)
|
||||
layer.download_complete = True
|
||||
if layer.total_size > 0:
|
||||
layer.download_current = layer.total_size
|
||||
|
||||
# Check if this is byte-based extraction progress (overlay2)
|
||||
# Overlay2 has {current, total} in bytes, no units field
|
||||
# Containerd has {current, units: "s"} which is useless for progress
|
||||
if (
|
||||
entry.progress_detail
|
||||
and entry.progress_detail.current is not None
|
||||
and entry.progress_detail.units is None
|
||||
):
|
||||
# Use layer's total_size from downloading phase (doesn't change)
|
||||
layer.extract_current = entry.progress_detail.current
|
||||
_LOGGER.debug(
|
||||
"Layer %s extracting: %d/%d (%.1f%%)",
|
||||
layer.layer_id,
|
||||
layer.extract_current,
|
||||
layer.total_size,
|
||||
(layer.extract_current / layer.total_size * 100)
|
||||
if layer.total_size > 0
|
||||
else 0,
|
||||
)
|
||||
return
|
||||
|
||||
# Handle "Pull complete" - layer is fully done
|
||||
if status is LayerPullStatus.PULL_COMPLETE:
|
||||
layer.download_complete = True
|
||||
layer.extract_complete = True
|
||||
if layer.total_size > 0:
|
||||
layer.download_current = layer.total_size
|
||||
return
|
||||
|
||||
# Handle "Retrying in N seconds" - reset download progress
|
||||
if status is LayerPullStatus.RETRYING:
|
||||
layer.download_current = 0
|
||||
layer.download_complete = False
|
||||
return
|
||||
|
||||
def calculate_progress(self) -> float:
|
||||
"""Calculate overall progress 0-100.
|
||||
|
||||
Uses count-based progress where each layer that needs pulling contributes
|
||||
equally. Layers that already exist locally are excluded from the calculation.
|
||||
|
||||
Returns 0 until we've seen the first "Downloading" event, since Docker
|
||||
reports "Already exists" and "Pulling fs layer" events before we know
|
||||
the complete layer count.
|
||||
"""
|
||||
# Don't report progress until we've seen downloading start
|
||||
# This ensures we know the full layer count before calculating progress
|
||||
if not self._seen_downloading or not self.layers:
|
||||
return 0.0
|
||||
|
||||
# Only count layers that need pulling (exclude already_exists)
|
||||
layers_to_pull = [
|
||||
layer for layer in self.layers.values() if not layer.already_exists
|
||||
]
|
||||
|
||||
if not layers_to_pull:
|
||||
# All layers already exist, nothing to download
|
||||
return 100.0
|
||||
|
||||
# Each layer contributes equally: sum of layer progresses / total layers
|
||||
total_progress = sum(layer.calculate_progress() for layer in layers_to_pull)
|
||||
return total_progress / len(layers_to_pull)
|
||||
|
||||
def get_stage(self) -> str | None:
|
||||
"""Get current stage based on layer states."""
|
||||
if not self.layers:
|
||||
return None
|
||||
|
||||
# Check if any layer is still downloading
|
||||
for layer in self.layers.values():
|
||||
if layer.already_exists:
|
||||
continue
|
||||
if not layer.download_complete:
|
||||
return "Downloading"
|
||||
|
||||
# All downloads complete, check if extracting
|
||||
for layer in self.layers.values():
|
||||
if layer.already_exists:
|
||||
continue
|
||||
if not layer.extract_complete:
|
||||
return "Extracting"
|
||||
|
||||
# All done
|
||||
return "Pull complete"
|
||||
|
||||
def should_update_job(self, threshold: float = 1.0) -> tuple[bool, float]:
|
||||
"""Check if job should be updated based on progress change.
|
||||
|
||||
Returns (should_update, current_progress).
|
||||
Updates are triggered when progress changes by at least threshold%.
|
||||
Progress is guaranteed to only increase (monotonic).
|
||||
"""
|
||||
current_progress = self.calculate_progress()
|
||||
|
||||
# Ensure monotonic progress - never report a decrease
|
||||
# This can happen when new layers get size info and change the weighted average
|
||||
if current_progress < self._last_reported_progress:
|
||||
_LOGGER.debug(
|
||||
"Progress decreased from %.1f%% to %.1f%%, keeping last reported",
|
||||
self._last_reported_progress,
|
||||
current_progress,
|
||||
)
|
||||
return False, self._last_reported_progress
|
||||
|
||||
if current_progress >= self._last_reported_progress + threshold:
|
||||
_LOGGER.debug(
|
||||
"Progress update: %.1f%% -> %.1f%% (delta: %.1f%%)",
|
||||
self._last_reported_progress,
|
||||
current_progress,
|
||||
current_progress - self._last_reported_progress,
|
||||
)
|
||||
self._last_reported_progress = current_progress
|
||||
return True, current_progress
|
||||
|
||||
return False, self._last_reported_progress
|
||||
@@ -1,57 +0,0 @@
|
||||
"""Docker utilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
# Docker image reference domain regex
|
||||
# Based on Docker's reference implementation:
|
||||
# vendor/github.com/distribution/reference/normalize.go
|
||||
#
|
||||
# A domain is detected if the part before the first / contains:
|
||||
# - "localhost" (with optional port)
|
||||
# - Contains "." (like registry.example.com or 127.0.0.1)
|
||||
# - Contains ":" (like myregistry:5000)
|
||||
# - IPv6 addresses in brackets (like [::1]:5000)
|
||||
#
|
||||
# Note: Docker also treats uppercase letters as registry indicators since
|
||||
# namespaces must be lowercase, but this regex handles lowercase matching
|
||||
# and the get_registry_from_image() function validates the registry rules.
|
||||
IMAGE_REGISTRY_REGEX = re.compile(
|
||||
r"^(?P<registry>"
|
||||
r"localhost(?::[0-9]+)?|" # localhost with optional port
|
||||
r"(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])" # domain component
|
||||
r"(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))*" # more components
|
||||
r"(?::[0-9]+)?|" # optional port
|
||||
r"\[[a-fA-F0-9:]+\](?::[0-9]+)?" # IPv6 with optional port
|
||||
r")/" # must be followed by /
|
||||
)
|
||||
|
||||
|
||||
def get_registry_from_image(image_ref: str) -> str | None:
|
||||
"""Extract registry from Docker image reference.
|
||||
|
||||
Returns the registry if the image reference contains one,
|
||||
or None if the image uses Docker Hub (docker.io).
|
||||
|
||||
Based on Docker's reference implementation:
|
||||
vendor/github.com/distribution/reference/normalize.go
|
||||
|
||||
Examples:
|
||||
get_registry_from_image("nginx") -> None (docker.io)
|
||||
get_registry_from_image("library/nginx") -> None (docker.io)
|
||||
get_registry_from_image("myregistry.com/nginx") -> "myregistry.com"
|
||||
get_registry_from_image("localhost/myimage") -> "localhost"
|
||||
get_registry_from_image("localhost:5000/myimage") -> "localhost:5000"
|
||||
get_registry_from_image("registry.io:5000/org/app:v1") -> "registry.io:5000"
|
||||
get_registry_from_image("[::1]:5000/myimage") -> "[::1]:5000"
|
||||
|
||||
"""
|
||||
match = IMAGE_REGISTRY_REGEX.match(image_ref)
|
||||
if match:
|
||||
registry = match.group("registry")
|
||||
# Must contain '.' or ':' or be 'localhost' to be a real registry
|
||||
# This prevents treating "myuser/myimage" as having registry "myuser"
|
||||
if "." in registry or ":" in registry or registry == "localhost":
|
||||
return registry
|
||||
return None # No registry = Docker Hub (docker.io)
|
||||
@@ -1,25 +1,25 @@
|
||||
"""Core Exceptions."""
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
MESSAGE_CHECK_SUPERVISOR_LOGS = (
|
||||
"Check supervisor logs for details (check with '{logs_command}')"
|
||||
)
|
||||
EXTRA_FIELDS_LOGS_COMMAND = {"logs_command": "ha supervisor logs"}
|
||||
|
||||
|
||||
class HassioError(Exception):
|
||||
"""Root exception."""
|
||||
|
||||
error_key: str | None = None
|
||||
message_template: str | None = None
|
||||
extra_fields: dict[str, Any] | None = None
|
||||
|
||||
def __init__(
|
||||
self, message: str | None = None, logger: Callable[..., None] | None = None
|
||||
self,
|
||||
message: str | None = None,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
extra_fields: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Raise & log."""
|
||||
self.extra_fields = extra_fields or {}
|
||||
|
||||
if not message and self.message_template:
|
||||
message = (
|
||||
self.message_template.format(**self.extra_fields)
|
||||
@@ -41,94 +41,6 @@ class HassioNotSupportedError(HassioError):
|
||||
"""Function is not supported."""
|
||||
|
||||
|
||||
# API
|
||||
|
||||
|
||||
class APIError(HassioError, RuntimeError):
|
||||
"""API errors."""
|
||||
|
||||
status = 400
|
||||
headers: Mapping[str, str] | None = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str | None = None,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
headers: Mapping[str, str] | None = None,
|
||||
job_id: str | None = None,
|
||||
) -> None:
|
||||
"""Raise & log, optionally with job."""
|
||||
super().__init__(message, logger)
|
||||
self.headers = headers
|
||||
self.job_id = job_id
|
||||
|
||||
|
||||
class APIUnauthorized(APIError):
|
||||
"""API unauthorized error."""
|
||||
|
||||
status = 401
|
||||
|
||||
|
||||
class APIForbidden(APIError):
|
||||
"""API forbidden error."""
|
||||
|
||||
status = 403
|
||||
|
||||
|
||||
class APINotFound(APIError):
|
||||
"""API not found error."""
|
||||
|
||||
status = 404
|
||||
|
||||
|
||||
class APIGone(APIError):
|
||||
"""API is no longer available."""
|
||||
|
||||
status = 410
|
||||
|
||||
|
||||
class APITooManyRequests(APIError):
|
||||
"""API too many requests error."""
|
||||
|
||||
status = 429
|
||||
|
||||
|
||||
class APIInternalServerError(APIError):
|
||||
"""API internal server error."""
|
||||
|
||||
status = 500
|
||||
|
||||
|
||||
class APIAddonNotInstalled(APIError):
|
||||
"""Not installed addon requested at addons API."""
|
||||
|
||||
|
||||
class APIDBMigrationInProgress(APIError):
|
||||
"""Service is unavailable due to an offline DB migration is in progress."""
|
||||
|
||||
status = 503
|
||||
|
||||
|
||||
class APIUnknownSupervisorError(APIError):
|
||||
"""Unknown error occurred within supervisor. Adds supervisor check logs rider to message template."""
|
||||
|
||||
status = 500
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
job_id: str | None = None,
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.message_template = (
|
||||
f"{self.message_template}. {MESSAGE_CHECK_SUPERVISOR_LOGS}"
|
||||
)
|
||||
self.extra_fields = (self.extra_fields or {}) | EXTRA_FIELDS_LOGS_COMMAND
|
||||
super().__init__(None, logger, job_id=job_id)
|
||||
|
||||
|
||||
# JobManager
|
||||
|
||||
|
||||
@@ -210,13 +122,6 @@ class SupervisorAppArmorError(SupervisorError):
|
||||
"""Supervisor AppArmor error."""
|
||||
|
||||
|
||||
class SupervisorUnknownError(SupervisorError, APIUnknownSupervisorError):
|
||||
"""Raise when an unknown error occurs interacting with Supervisor or its container."""
|
||||
|
||||
error_key = "supervisor_unknown_error"
|
||||
message_template = "An unknown error occurred with Supervisor"
|
||||
|
||||
|
||||
class SupervisorJobError(SupervisorError, JobException):
|
||||
"""Raise on job errors."""
|
||||
|
||||
@@ -345,54 +250,6 @@ class AddonConfigurationError(AddonsError):
|
||||
"""Error with add-on configuration."""
|
||||
|
||||
|
||||
class AddonConfigurationInvalidError(AddonConfigurationError, APIError):
|
||||
"""Raise if invalid configuration provided for addon."""
|
||||
|
||||
error_key = "addon_configuration_invalid_error"
|
||||
message_template = "Add-on {addon} has invalid options: {validation_error}"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
addon: str,
|
||||
validation_error: str,
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon, "validation_error": validation_error}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonBootConfigCannotChangeError(AddonsError, APIError):
|
||||
"""Raise if user attempts to change addon boot config when it can't be changed."""
|
||||
|
||||
error_key = "addon_boot_config_cannot_change_error"
|
||||
message_template = (
|
||||
"Addon {addon} boot option is set to {boot_config} so it cannot be changed"
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str, boot_config: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon, "boot_config": boot_config}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonNotRunningError(AddonsError, APIError):
|
||||
"""Raise when an addon is not running."""
|
||||
|
||||
error_key = "addon_not_running_error"
|
||||
message_template = "Add-on {addon} is not running"
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonNotSupportedError(HassioNotSupportedError):
|
||||
"""Addon doesn't support a function."""
|
||||
|
||||
@@ -411,8 +268,11 @@ class AddonNotSupportedArchitectureError(AddonNotSupportedError):
|
||||
architectures: list[str],
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"slug": slug, "architectures": ", ".join(architectures)}
|
||||
super().__init__(None, logger)
|
||||
super().__init__(
|
||||
None,
|
||||
logger,
|
||||
extra_fields={"slug": slug, "architectures": ", ".join(architectures)},
|
||||
)
|
||||
|
||||
|
||||
class AddonNotSupportedMachineTypeError(AddonNotSupportedError):
|
||||
@@ -429,8 +289,11 @@ class AddonNotSupportedMachineTypeError(AddonNotSupportedError):
|
||||
machine_types: list[str],
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"slug": slug, "machine_types": ", ".join(machine_types)}
|
||||
super().__init__(None, logger)
|
||||
super().__init__(
|
||||
None,
|
||||
logger,
|
||||
extra_fields={"slug": slug, "machine_types": ", ".join(machine_types)},
|
||||
)
|
||||
|
||||
|
||||
class AddonNotSupportedHomeAssistantVersionError(AddonNotSupportedError):
|
||||
@@ -447,96 +310,11 @@ class AddonNotSupportedHomeAssistantVersionError(AddonNotSupportedError):
|
||||
version: str,
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"slug": slug, "version": version}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonNotSupportedWriteStdinError(AddonNotSupportedError, APIError):
|
||||
"""Addon does not support writing to stdin."""
|
||||
|
||||
error_key = "addon_not_supported_write_stdin_error"
|
||||
message_template = "Add-on {addon} does not support writing to stdin"
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonBuildDockerfileMissingError(AddonNotSupportedError, APIError):
|
||||
"""Raise when addon build invalid because dockerfile is missing."""
|
||||
|
||||
error_key = "addon_build_dockerfile_missing_error"
|
||||
message_template = (
|
||||
"Cannot build addon '{addon}' because dockerfile is missing. A repair "
|
||||
"using '{repair_command}' will fix this if the cause is data "
|
||||
"corruption. Otherwise please report this to the addon developer."
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon, "repair_command": "ha supervisor repair"}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonBuildArchitectureNotSupportedError(AddonNotSupportedError, APIError):
|
||||
"""Raise when addon cannot be built on system because it doesn't support its architecture."""
|
||||
|
||||
error_key = "addon_build_architecture_not_supported_error"
|
||||
message_template = (
|
||||
"Cannot build addon '{addon}' because its supported architectures "
|
||||
"({addon_arches}) do not match the system supported architectures ({system_arches})"
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
addon: str,
|
||||
addon_arch_list: list[str],
|
||||
system_arch_list: list[str],
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {
|
||||
"addon": addon,
|
||||
"addon_arches": ", ".join(addon_arch_list),
|
||||
"system_arches": ", ".join(system_arch_list),
|
||||
}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonUnknownError(AddonsError, APIUnknownSupervisorError):
|
||||
"""Raise when unknown error occurs taking an action for an addon."""
|
||||
|
||||
error_key = "addon_unknown_error"
|
||||
message_template = "An unknown error occurred with addon {addon}"
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon}
|
||||
super().__init__(logger)
|
||||
|
||||
|
||||
class AddonBuildFailedUnknownError(AddonsError, APIUnknownSupervisorError):
|
||||
"""Raise when the build failed for an addon due to an unknown error."""
|
||||
|
||||
error_key = "addon_build_failed_unknown_error"
|
||||
message_template = (
|
||||
"An unknown error occurred while trying to build the image for addon {addon}"
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon}
|
||||
super().__init__(logger)
|
||||
super().__init__(
|
||||
None,
|
||||
logger,
|
||||
extra_fields={"slug": slug, "version": version},
|
||||
)
|
||||
|
||||
|
||||
class AddonsJobError(AddonsError, JobException):
|
||||
@@ -568,64 +346,13 @@ class AuthError(HassioError):
|
||||
"""Auth errors."""
|
||||
|
||||
|
||||
class AuthPasswordResetError(AuthError, APIError):
|
||||
class AuthPasswordResetError(HassioError):
|
||||
"""Auth error if password reset failed."""
|
||||
|
||||
error_key = "auth_password_reset_error"
|
||||
message_template = "Username '{user}' does not exist. Check list of users using '{auth_list_command}'."
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
user: str,
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"user": user, "auth_list_command": "ha auth list"}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AuthListUsersError(AuthError, APIUnknownSupervisorError):
|
||||
class AuthListUsersError(HassioError):
|
||||
"""Auth error if listing users failed."""
|
||||
|
||||
error_key = "auth_list_users_error"
|
||||
message_template = "Can't request listing users on Home Assistant"
|
||||
|
||||
|
||||
class AuthListUsersNoneResponseError(AuthError, APIInternalServerError):
|
||||
"""Auth error if listing users returned invalid None response."""
|
||||
|
||||
error_key = "auth_list_users_none_response_error"
|
||||
message_template = "Home Assistant returned invalid response of `{none}` instead of a list of users. Check Home Assistant logs for details (check with `{logs_command}`)"
|
||||
extra_fields = {"none": "None", "logs_command": "ha core logs"}
|
||||
|
||||
def __init__(self, logger: Callable[..., None] | None = None) -> None:
|
||||
"""Initialize exception."""
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AuthInvalidNonStringValueError(AuthError, APIUnauthorized):
|
||||
"""Auth error if something besides a string provided as username or password."""
|
||||
|
||||
error_key = "auth_invalid_non_string_value_error"
|
||||
message_template = "Username and password must be strings"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
headers: Mapping[str, str] | None = None,
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
super().__init__(None, logger, headers=headers)
|
||||
|
||||
|
||||
class AuthHomeAssistantAPIValidationError(AuthError, APIUnknownSupervisorError):
|
||||
"""Error encountered trying to validate auth details via Home Assistant API."""
|
||||
|
||||
error_key = "auth_home_assistant_api_validation_error"
|
||||
message_template = "Unable to validate authentication details with Home Assistant"
|
||||
|
||||
|
||||
# Host
|
||||
|
||||
@@ -658,6 +385,60 @@ class HostLogError(HostError):
|
||||
"""Internal error with host log."""
|
||||
|
||||
|
||||
# API
|
||||
|
||||
|
||||
class APIError(HassioError, RuntimeError):
|
||||
"""API errors."""
|
||||
|
||||
status = 400
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str | None = None,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
job_id: str | None = None,
|
||||
error: HassioError | None = None,
|
||||
) -> None:
|
||||
"""Raise & log, optionally with job."""
|
||||
# Allow these to be set from another error here since APIErrors essentially wrap others to add a status
|
||||
self.error_key = error.error_key if error else None
|
||||
self.message_template = error.message_template if error else None
|
||||
super().__init__(
|
||||
message, logger, extra_fields=error.extra_fields if error else None
|
||||
)
|
||||
self.job_id = job_id
|
||||
|
||||
|
||||
class APIForbidden(APIError):
|
||||
"""API forbidden error."""
|
||||
|
||||
status = 403
|
||||
|
||||
|
||||
class APINotFound(APIError):
|
||||
"""API not found error."""
|
||||
|
||||
status = 404
|
||||
|
||||
|
||||
class APIGone(APIError):
|
||||
"""API is no longer available."""
|
||||
|
||||
status = 410
|
||||
|
||||
|
||||
class APIAddonNotInstalled(APIError):
|
||||
"""Not installed addon requested at addons API."""
|
||||
|
||||
|
||||
class APIDBMigrationInProgress(APIError):
|
||||
"""Service is unavailable due to an offline DB migration is in progress."""
|
||||
|
||||
status = 503
|
||||
|
||||
|
||||
# Service / Discovery
|
||||
|
||||
|
||||
@@ -835,10 +616,6 @@ class DockerError(HassioError):
|
||||
"""Docker API/Transport errors."""
|
||||
|
||||
|
||||
class DockerBuildError(DockerError):
|
||||
"""Docker error during build."""
|
||||
|
||||
|
||||
class DockerAPIError(DockerError):
|
||||
"""Docker API error."""
|
||||
|
||||
@@ -855,10 +632,6 @@ class DockerNotFound(DockerError):
|
||||
"""Docker object don't Exists."""
|
||||
|
||||
|
||||
class DockerLogOutOfOrder(DockerError):
|
||||
"""Raise when log from docker action was out of order."""
|
||||
|
||||
|
||||
class DockerNoSpaceOnDevice(DockerError):
|
||||
"""Raise if a docker pull fails due to available space."""
|
||||
|
||||
@@ -870,7 +643,7 @@ class DockerNoSpaceOnDevice(DockerError):
|
||||
super().__init__(None, logger=logger)
|
||||
|
||||
|
||||
class DockerHubRateLimitExceeded(DockerError, APITooManyRequests):
|
||||
class DockerHubRateLimitExceeded(DockerError):
|
||||
"""Raise for docker hub rate limit exceeded error."""
|
||||
|
||||
error_key = "dockerhub_rate_limit_exceeded"
|
||||
@@ -878,13 +651,16 @@ class DockerHubRateLimitExceeded(DockerError, APITooManyRequests):
|
||||
"Your IP address has made too many requests to Docker Hub which activated a rate limit. "
|
||||
"For more details see {dockerhub_rate_limit_url}"
|
||||
)
|
||||
extra_fields = {
|
||||
"dockerhub_rate_limit_url": "https://www.home-assistant.io/more-info/dockerhub-rate-limit"
|
||||
}
|
||||
|
||||
def __init__(self, logger: Callable[..., None] | None = None) -> None:
|
||||
"""Raise & log."""
|
||||
super().__init__(None, logger=logger)
|
||||
super().__init__(
|
||||
None,
|
||||
logger=logger,
|
||||
extra_fields={
|
||||
"dockerhub_rate_limit_url": "https://www.home-assistant.io/more-info/dockerhub-rate-limit"
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class DockerJobError(DockerError, JobException):
|
||||
@@ -955,20 +731,6 @@ class StoreNotFound(StoreError):
|
||||
"""Raise if slug is not known."""
|
||||
|
||||
|
||||
class StoreAddonNotFoundError(StoreError, APINotFound):
|
||||
"""Raise if a requested addon is not in the store."""
|
||||
|
||||
error_key = "store_addon_not_found_error"
|
||||
message_template = "Addon {addon} does not exist in the store"
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class StoreJobError(StoreError, JobException):
|
||||
"""Raise on job error with git."""
|
||||
|
||||
@@ -1004,7 +766,7 @@ class BackupJobError(BackupError, JobException):
|
||||
"""Raise on Backup job error."""
|
||||
|
||||
|
||||
class BackupFileNotFoundError(BackupError, APINotFound):
|
||||
class BackupFileNotFoundError(BackupError):
|
||||
"""Raise if the backup file hasn't been found."""
|
||||
|
||||
|
||||
@@ -1016,55 +778,6 @@ class BackupFileExistError(BackupError):
|
||||
"""Raise if the backup file already exists."""
|
||||
|
||||
|
||||
class AddonBackupMetadataInvalidError(BackupError, APIError):
|
||||
"""Raise if invalid metadata file provided for addon in backup."""
|
||||
|
||||
error_key = "addon_backup_metadata_invalid_error"
|
||||
message_template = (
|
||||
"Metadata file for add-on {addon} in backup is invalid: {validation_error}"
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
addon: str,
|
||||
validation_error: str,
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon, "validation_error": validation_error}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonPrePostBackupCommandReturnedError(BackupError, APIError):
|
||||
"""Raise when addon's pre/post backup command returns an error."""
|
||||
|
||||
error_key = "addon_pre_post_backup_command_returned_error"
|
||||
message_template = (
|
||||
"Pre-/Post backup command for add-on {addon} returned error code: "
|
||||
"{exit_code}. Please report this to the addon developer. Enable debug "
|
||||
"logging to capture complete command output using {debug_logging_command}"
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str, exit_code: int
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {
|
||||
"addon": addon,
|
||||
"exit_code": exit_code,
|
||||
"debug_logging_command": "ha supervisor options --logging debug",
|
||||
}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class BackupRestoreUnknownError(BackupError, APIUnknownSupervisorError):
|
||||
"""Raise when an unknown error occurs during backup or restore."""
|
||||
|
||||
error_key = "backup_restore_unknown_error"
|
||||
message_template = "An unknown error occurred during backup/restore"
|
||||
|
||||
|
||||
# Security
|
||||
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@ from ..const import (
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE,
|
||||
ATTR_BOOT,
|
||||
ATTR_DUPLICATE_LOG_FILE,
|
||||
ATTR_IMAGE,
|
||||
ATTR_MESSAGE,
|
||||
ATTR_PORT,
|
||||
@@ -300,16 +299,6 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
"""Set whether backups should exclude database by default."""
|
||||
self._data[ATTR_BACKUPS_EXCLUDE_DATABASE] = value
|
||||
|
||||
@property
|
||||
def duplicate_log_file(self) -> bool:
|
||||
"""Return True if Home Assistant should duplicate logs to file."""
|
||||
return self._data[ATTR_DUPLICATE_LOG_FILE]
|
||||
|
||||
@duplicate_log_file.setter
|
||||
def duplicate_log_file(self, value: bool) -> None:
|
||||
"""Set whether Home Assistant should duplicate logs to file."""
|
||||
self._data[ATTR_DUPLICATE_LOG_FILE] = value
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Prepare Home Assistant object."""
|
||||
await asyncio.wait(
|
||||
|
||||
@@ -10,7 +10,6 @@ from ..const import (
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE,
|
||||
ATTR_BOOT,
|
||||
ATTR_DUPLICATE_LOG_FILE,
|
||||
ATTR_IMAGE,
|
||||
ATTR_PORT,
|
||||
ATTR_REFRESH_TOKEN,
|
||||
@@ -37,7 +36,6 @@ SCHEMA_HASS_CONFIG = vol.Schema(
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT, default=None): vol.Maybe(str),
|
||||
vol.Optional(ATTR_AUDIO_INPUT, default=None): vol.Maybe(str),
|
||||
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_DUPLICATE_LOG_FILE, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_OVERRIDE_IMAGE, default=False): vol.Boolean(),
|
||||
},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
|
||||
@@ -6,8 +6,8 @@ import logging
|
||||
import socket
|
||||
|
||||
from ..dbus.const import (
|
||||
ConnectionState,
|
||||
ConnectionStateFlags,
|
||||
ConnectionStateType,
|
||||
DeviceType,
|
||||
InterfaceAddrGenMode as NMInterfaceAddrGenMode,
|
||||
InterfaceIp6Privacy as NMInterfaceIp6Privacy,
|
||||
@@ -267,47 +267,25 @@ class Interface:
|
||||
return InterfaceMethod.DISABLED
|
||||
|
||||
@staticmethod
|
||||
def _map_nm_addr_gen_mode(addr_gen_mode: int | None) -> InterfaceAddrGenMode:
|
||||
"""Map IPv6 interface addr_gen_mode.
|
||||
|
||||
NetworkManager omits the addr_gen_mode property when set to DEFAULT, so we
|
||||
treat None as DEFAULT here.
|
||||
"""
|
||||
def _map_nm_addr_gen_mode(addr_gen_mode: int) -> InterfaceAddrGenMode:
|
||||
"""Map IPv6 interface addr_gen_mode."""
|
||||
mapping = {
|
||||
NMInterfaceAddrGenMode.EUI64.value: InterfaceAddrGenMode.EUI64,
|
||||
NMInterfaceAddrGenMode.STABLE_PRIVACY.value: InterfaceAddrGenMode.STABLE_PRIVACY,
|
||||
NMInterfaceAddrGenMode.DEFAULT_OR_EUI64.value: InterfaceAddrGenMode.DEFAULT_OR_EUI64,
|
||||
NMInterfaceAddrGenMode.DEFAULT.value: InterfaceAddrGenMode.DEFAULT,
|
||||
None: InterfaceAddrGenMode.DEFAULT,
|
||||
}
|
||||
|
||||
if addr_gen_mode not in mapping:
|
||||
_LOGGER.warning(
|
||||
"Unknown addr_gen_mode value from NetworkManager: %s", addr_gen_mode
|
||||
)
|
||||
|
||||
return mapping.get(addr_gen_mode, InterfaceAddrGenMode.DEFAULT)
|
||||
|
||||
@staticmethod
|
||||
def _map_nm_ip6_privacy(ip6_privacy: int | None) -> InterfaceIp6Privacy:
|
||||
"""Map IPv6 interface ip6_privacy.
|
||||
|
||||
NetworkManager omits the ip6_privacy property when set to DEFAULT, so we
|
||||
treat None as DEFAULT here.
|
||||
"""
|
||||
def _map_nm_ip6_privacy(ip6_privacy: int) -> InterfaceIp6Privacy:
|
||||
"""Map IPv6 interface ip6_privacy."""
|
||||
mapping = {
|
||||
NMInterfaceIp6Privacy.DISABLED.value: InterfaceIp6Privacy.DISABLED,
|
||||
NMInterfaceIp6Privacy.ENABLED_PREFER_PUBLIC.value: InterfaceIp6Privacy.ENABLED_PREFER_PUBLIC,
|
||||
NMInterfaceIp6Privacy.ENABLED.value: InterfaceIp6Privacy.ENABLED,
|
||||
NMInterfaceIp6Privacy.DEFAULT.value: InterfaceIp6Privacy.DEFAULT,
|
||||
None: InterfaceIp6Privacy.DEFAULT,
|
||||
}
|
||||
|
||||
if ip6_privacy not in mapping:
|
||||
_LOGGER.warning(
|
||||
"Unknown ip6_privacy value from NetworkManager: %s", ip6_privacy
|
||||
)
|
||||
|
||||
return mapping.get(ip6_privacy, InterfaceIp6Privacy.DEFAULT)
|
||||
|
||||
@staticmethod
|
||||
@@ -317,8 +295,8 @@ class Interface:
|
||||
return False
|
||||
|
||||
return connection.state in (
|
||||
ConnectionState.ACTIVATED,
|
||||
ConnectionState.ACTIVATING,
|
||||
ConnectionStateType.ACTIVATED,
|
||||
ConnectionStateType.ACTIVATING,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -16,7 +16,7 @@ from ..dbus.const import (
|
||||
DBUS_IFACE_DNS,
|
||||
DBUS_IFACE_NM,
|
||||
DBUS_SIGNAL_NM_CONNECTION_ACTIVE_CHANGED,
|
||||
ConnectionState,
|
||||
ConnectionStateType,
|
||||
ConnectivityState,
|
||||
DeviceType,
|
||||
WirelessMethodType,
|
||||
@@ -338,16 +338,16 @@ class NetworkManager(CoreSysAttributes):
|
||||
# the state change before this point. Get the state currently to
|
||||
# avoid any race condition.
|
||||
await con.update()
|
||||
state: ConnectionState = con.state
|
||||
state: ConnectionStateType = con.state
|
||||
|
||||
while state != ConnectionState.ACTIVATED:
|
||||
if state == ConnectionState.DEACTIVATED:
|
||||
while state != ConnectionStateType.ACTIVATED:
|
||||
if state == ConnectionStateType.DEACTIVATED:
|
||||
raise HostNetworkError(
|
||||
"Activating connection failed, check connection settings."
|
||||
)
|
||||
|
||||
msg = await signal.wait_for_signal()
|
||||
state = ConnectionState(msg[0])
|
||||
state = msg[0]
|
||||
_LOGGER.debug("Active connection state changed to %s", state)
|
||||
|
||||
# update_only means not done by user so don't force a check afterwards
|
||||
|
||||
@@ -102,17 +102,13 @@ class SupervisorJobError:
|
||||
"Unknown error, see Supervisor logs (check with 'ha supervisor logs')"
|
||||
)
|
||||
stage: str | None = None
|
||||
error_key: str | None = None
|
||||
extra_fields: dict[str, Any] | None = None
|
||||
|
||||
def as_dict(self) -> dict[str, Any]:
|
||||
def as_dict(self) -> dict[str, str | None]:
|
||||
"""Return dictionary representation."""
|
||||
return {
|
||||
"type": self.type_.__name__,
|
||||
"message": self.message,
|
||||
"stage": self.stage,
|
||||
"error_key": self.error_key,
|
||||
"extra_fields": self.extra_fields,
|
||||
}
|
||||
|
||||
|
||||
@@ -162,9 +158,7 @@ class SupervisorJob:
|
||||
def capture_error(self, err: HassioError | None = None) -> None:
|
||||
"""Capture an error or record that an unknown error has occurred."""
|
||||
if err:
|
||||
new_error = SupervisorJobError(
|
||||
type(err), str(err), self.stage, err.error_key, err.extra_fields
|
||||
)
|
||||
new_error = SupervisorJobError(type(err), str(err), self.stage)
|
||||
else:
|
||||
new_error = SupervisorJobError(stage=self.stage)
|
||||
self.errors += [new_error]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from ...const import CoreState
|
||||
from ...coresys import CoreSys
|
||||
from ...dbus.const import ConnectionState, ConnectionStateFlags
|
||||
from ...dbus.const import ConnectionStateFlags, ConnectionStateType
|
||||
from ...dbus.network.interface import NetworkInterface
|
||||
from ...exceptions import NetworkInterfaceNotFound
|
||||
from ..const import ContextType, IssueType
|
||||
@@ -47,7 +47,7 @@ class CheckNetworkInterfaceIPV4(CheckBase):
|
||||
|
||||
return not (
|
||||
interface.connection.state
|
||||
in [ConnectionState.ACTIVATED, ConnectionState.ACTIVATING]
|
||||
in [ConnectionStateType.ACTIVATED, ConnectionStateType.ACTIVATING]
|
||||
and ConnectionStateFlags.IP4_READY in interface.connection.state_flags
|
||||
)
|
||||
|
||||
|
||||
@@ -183,22 +183,19 @@ class GitRepo(CoreSysAttributes):
|
||||
raise StoreGitError() from err
|
||||
|
||||
try:
|
||||
repo = self.repo
|
||||
branch = self.repo.active_branch.name
|
||||
|
||||
def _fetch_and_check() -> tuple[str, bool]:
|
||||
"""Fetch from origin and check if changed."""
|
||||
# This property access is I/O bound
|
||||
branch = repo.active_branch.name
|
||||
repo.remotes.origin.fetch(
|
||||
**{"update-shallow": True, "depth": 1} # type: ignore[arg-type]
|
||||
# Download data
|
||||
await self.sys_run_in_executor(
|
||||
ft.partial(
|
||||
self.repo.remotes.origin.fetch,
|
||||
**{"update-shallow": True, "depth": 1}, # type: ignore
|
||||
)
|
||||
changed = repo.commit(branch) != repo.commit(f"origin/{branch}")
|
||||
return branch, changed
|
||||
)
|
||||
|
||||
# Download data and check for changes
|
||||
branch, changed = await self.sys_run_in_executor(_fetch_and_check)
|
||||
|
||||
if changed:
|
||||
if changed := self.repo.commit(branch) != self.repo.commit(
|
||||
f"origin/{branch}"
|
||||
):
|
||||
# Jump on top of that
|
||||
await self.sys_run_in_executor(
|
||||
ft.partial(self.repo.git.reset, f"origin/{branch}", hard=True)
|
||||
|
||||
@@ -28,8 +28,8 @@ from .exceptions import (
|
||||
DockerError,
|
||||
HostAppArmorError,
|
||||
SupervisorAppArmorError,
|
||||
SupervisorError,
|
||||
SupervisorJobError,
|
||||
SupervisorUnknownError,
|
||||
SupervisorUpdateError,
|
||||
)
|
||||
from .jobs.const import JobCondition, JobThrottle
|
||||
@@ -261,7 +261,7 @@ class Supervisor(CoreSysAttributes):
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerError as err:
|
||||
raise SupervisorUnknownError() from err
|
||||
raise SupervisorError() from err
|
||||
|
||||
async def repair(self):
|
||||
"""Repair local Supervisor data."""
|
||||
|
||||
@@ -5,7 +5,6 @@ from datetime import timedelta
|
||||
import errno
|
||||
from http import HTTPStatus
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import MagicMock, PropertyMock, call, patch
|
||||
|
||||
import aiodocker
|
||||
@@ -24,13 +23,7 @@ from supervisor.docker.addon import DockerAddon
|
||||
from supervisor.docker.const import ContainerState
|
||||
from supervisor.docker.manager import CommandReturn, DockerAPI
|
||||
from supervisor.docker.monitor import DockerContainerStateEvent
|
||||
from supervisor.exceptions import (
|
||||
AddonPrePostBackupCommandReturnedError,
|
||||
AddonsJobError,
|
||||
AddonUnknownError,
|
||||
AudioUpdateError,
|
||||
HassioError,
|
||||
)
|
||||
from supervisor.exceptions import AddonsError, AddonsJobError, AudioUpdateError
|
||||
from supervisor.hardware.helper import HwHelper
|
||||
from supervisor.ingress import Ingress
|
||||
from supervisor.store.repository import Repository
|
||||
@@ -509,26 +502,31 @@ async def test_backup_with_pre_post_command(
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("container_get_side_effect", "exec_run_side_effect", "exc_type_raised"),
|
||||
"get_error,exception_on_exec",
|
||||
[
|
||||
(NotFound("missing"), [(1, None)], AddonUnknownError),
|
||||
(DockerException(), [(1, None)], AddonUnknownError),
|
||||
(None, DockerException(), AddonUnknownError),
|
||||
(None, [(1, None)], AddonPrePostBackupCommandReturnedError),
|
||||
(NotFound("missing"), False),
|
||||
(DockerException(), False),
|
||||
(None, True),
|
||||
(None, False),
|
||||
],
|
||||
)
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
|
||||
async def test_backup_with_pre_command_error(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
container_get_side_effect: DockerException | None,
|
||||
exec_run_side_effect: DockerException | list[tuple[int, Any]],
|
||||
exc_type_raised: type[HassioError],
|
||||
get_error: DockerException | None,
|
||||
exception_on_exec: bool,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
) -> None:
|
||||
"""Test backing up an addon with error running pre command."""
|
||||
coresys.docker.containers.get.side_effect = container_get_side_effect
|
||||
container.exec_run.side_effect = exec_run_side_effect
|
||||
if get_error:
|
||||
coresys.docker.containers.get.side_effect = get_error
|
||||
|
||||
if exception_on_exec:
|
||||
container.exec_run.side_effect = DockerException()
|
||||
else:
|
||||
container.exec_run.return_value = (1, None)
|
||||
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
await install_addon_ssh.load()
|
||||
@@ -537,7 +535,7 @@ async def test_backup_with_pre_command_error(
|
||||
with (
|
||||
patch.object(DockerAddon, "is_running", return_value=True),
|
||||
patch.object(Addon, "backup_pre", new=PropertyMock(return_value="backup_pre")),
|
||||
pytest.raises(exc_type_raised),
|
||||
pytest.raises(AddonsError),
|
||||
):
|
||||
assert await install_addon_ssh.backup(tarfile) is None
|
||||
|
||||
@@ -949,7 +947,7 @@ async def test_addon_load_succeeds_with_docker_errors(
|
||||
)
|
||||
caplog.clear()
|
||||
await install_addon_ssh.load()
|
||||
assert "Cannot build addon 'local_ssh' because dockerfile is missing" in caplog.text
|
||||
assert "Invalid build environment" in caplog.text
|
||||
|
||||
# Image build failure
|
||||
caplog.clear()
|
||||
|
||||
@@ -6,13 +6,11 @@ from pathlib import Path
|
||||
from unittest.mock import PropertyMock, patch
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
import pytest
|
||||
|
||||
from supervisor.addons.addon import Addon
|
||||
from supervisor.addons.build import AddonBuild
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import DOCKER_HUB
|
||||
from supervisor.exceptions import AddonBuildDockerfileMissingError
|
||||
|
||||
from tests.common import is_in_list
|
||||
|
||||
@@ -108,11 +106,11 @@ async def test_build_valid(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
type(coresys.arch), "default", new=PropertyMock(return_value="aarch64")
|
||||
),
|
||||
):
|
||||
assert (await build.is_valid()) is None
|
||||
assert await build.is_valid()
|
||||
|
||||
|
||||
async def test_build_invalid(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test build not supported because Dockerfile missing for specified architecture."""
|
||||
"""Test platform set in docker args."""
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
with (
|
||||
patch.object(
|
||||
@@ -121,9 +119,8 @@ async def test_build_invalid(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
patch.object(
|
||||
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
|
||||
),
|
||||
pytest.raises(AddonBuildDockerfileMissingError),
|
||||
):
|
||||
await build.is_valid()
|
||||
assert not await build.is_valid()
|
||||
|
||||
|
||||
async def test_docker_config_no_registries(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
|
||||
@@ -5,7 +5,6 @@ from unittest.mock import MagicMock, PropertyMock, patch
|
||||
|
||||
from aiohttp import ClientResponse
|
||||
from aiohttp.test_utils import TestClient
|
||||
from docker.errors import DockerException
|
||||
import pytest
|
||||
|
||||
from supervisor.addons.addon import Addon
|
||||
@@ -478,11 +477,6 @@ async def test_addon_options_boot_mode_manual_only_invalid(
|
||||
body["message"]
|
||||
== "Addon local_example boot option is set to manual_only so it cannot be changed"
|
||||
)
|
||||
assert body["error_key"] == "addon_boot_config_cannot_change_error"
|
||||
assert body["extra_fields"] == {
|
||||
"addon": "local_example",
|
||||
"boot_config": "manual_only",
|
||||
}
|
||||
|
||||
|
||||
async def get_message(resp: ClientResponse, json_expected: bool) -> str:
|
||||
@@ -551,154 +545,3 @@ async def test_addon_not_installed(
|
||||
resp = await api_client.request(method, url)
|
||||
assert resp.status == 400
|
||||
assert await get_message(resp, json_expected) == "Addon is not installed"
|
||||
|
||||
|
||||
async def test_addon_set_options(api_client: TestClient, install_addon_example: Addon):
|
||||
"""Test setting options for an addon."""
|
||||
resp = await api_client.post(
|
||||
"/addons/local_example/options", json={"options": {"message": "test"}}
|
||||
)
|
||||
assert resp.status == 200
|
||||
assert install_addon_example.options == {"message": "test"}
|
||||
|
||||
|
||||
async def test_addon_reset_options(
|
||||
api_client: TestClient, install_addon_example: Addon
|
||||
):
|
||||
"""Test resetting options for an addon to defaults.
|
||||
|
||||
Fixes SUPERVISOR-171F.
|
||||
"""
|
||||
# First set some custom options
|
||||
install_addon_example.options = {"message": "custom"}
|
||||
assert install_addon_example.persist["options"] == {"message": "custom"}
|
||||
|
||||
# Reset to defaults by sending null
|
||||
resp = await api_client.post(
|
||||
"/addons/local_example/options", json={"options": None}
|
||||
)
|
||||
assert resp.status == 200
|
||||
|
||||
# Persisted options should be empty (meaning defaults will be used)
|
||||
assert install_addon_example.persist["options"] == {}
|
||||
|
||||
|
||||
async def test_addon_set_options_error(
|
||||
api_client: TestClient, install_addon_example: Addon
|
||||
):
|
||||
"""Test setting options for an addon."""
|
||||
resp = await api_client.post(
|
||||
"/addons/local_example/options", json={"options": {"message": True}}
|
||||
)
|
||||
assert resp.status == 400
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "Add-on local_example has invalid options: not a valid value. Got {'message': True}"
|
||||
)
|
||||
assert body["error_key"] == "addon_configuration_invalid_error"
|
||||
assert body["extra_fields"] == {
|
||||
"addon": "local_example",
|
||||
"validation_error": "not a valid value. Got {'message': True}",
|
||||
}
|
||||
|
||||
|
||||
async def test_addon_start_options_error(
|
||||
api_client: TestClient,
|
||||
install_addon_example: Addon,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
):
|
||||
"""Test error writing options when trying to start addon."""
|
||||
install_addon_example.options = {"message": "hello"}
|
||||
|
||||
# Simulate OS error trying to write the file
|
||||
with patch("supervisor.utils.json.atomic_write", side_effect=OSError("fail")):
|
||||
resp = await api_client.post("/addons/local_example/start")
|
||||
assert resp.status == 500
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "An unknown error occurred with addon local_example. Check supervisor logs for details (check with 'ha supervisor logs')"
|
||||
)
|
||||
assert body["error_key"] == "addon_unknown_error"
|
||||
assert body["extra_fields"] == {
|
||||
"addon": "local_example",
|
||||
"logs_command": "ha supervisor logs",
|
||||
}
|
||||
assert "Add-on local_example can't write options" in caplog.text
|
||||
|
||||
# Simulate an update with a breaking change for options schema creating failure on start
|
||||
caplog.clear()
|
||||
install_addon_example.data["schema"] = {"message": "bool"}
|
||||
resp = await api_client.post("/addons/local_example/start")
|
||||
assert resp.status == 400
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "Add-on local_example has invalid options: expected boolean. Got {'message': 'hello'}"
|
||||
)
|
||||
assert body["error_key"] == "addon_configuration_invalid_error"
|
||||
assert body["extra_fields"] == {
|
||||
"addon": "local_example",
|
||||
"validation_error": "expected boolean. Got {'message': 'hello'}",
|
||||
}
|
||||
assert (
|
||||
"Add-on local_example has invalid options: expected boolean. Got {'message': 'hello'}"
|
||||
in caplog.text
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("method", "action"), [("get", "stats"), ("post", "stdin")])
|
||||
@pytest.mark.usefixtures("install_addon_example")
|
||||
async def test_addon_not_running_error(
|
||||
api_client: TestClient, method: str, action: str
|
||||
):
|
||||
"""Test addon not running error for endpoints that require that."""
|
||||
with patch.object(Addon, "with_stdin", new=PropertyMock(return_value=True)):
|
||||
resp = await api_client.request(method, f"/addons/local_example/{action}")
|
||||
|
||||
assert resp.status == 400
|
||||
body = await resp.json()
|
||||
assert body["message"] == "Add-on local_example is not running"
|
||||
assert body["error_key"] == "addon_not_running_error"
|
||||
assert body["extra_fields"] == {"addon": "local_example"}
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("install_addon_example")
|
||||
async def test_addon_write_stdin_not_supported_error(api_client: TestClient):
|
||||
"""Test error when trying to write stdin to addon that does not support it."""
|
||||
resp = await api_client.post("/addons/local_example/stdin")
|
||||
assert resp.status == 400
|
||||
body = await resp.json()
|
||||
assert body["message"] == "Add-on local_example does not support writing to stdin"
|
||||
assert body["error_key"] == "addon_not_supported_write_stdin_error"
|
||||
assert body["extra_fields"] == {"addon": "local_example"}
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("install_addon_ssh")
|
||||
async def test_addon_rebuild_fails_error(api_client: TestClient, coresys: CoreSys):
|
||||
"""Test error when build fails during rebuild for addon."""
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
coresys.docker.containers.run.side_effect = DockerException("fail")
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
CpuArchManager, "supported", new=PropertyMock(return_value=["aarch64"])
|
||||
),
|
||||
patch.object(
|
||||
CpuArchManager, "default", new=PropertyMock(return_value="aarch64")
|
||||
),
|
||||
patch.object(AddonBuild, "get_docker_args", return_value={}),
|
||||
):
|
||||
resp = await api_client.post("/addons/local_ssh/rebuild")
|
||||
assert resp.status == 500
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "An unknown error occurred while trying to build the image for addon local_ssh. Check supervisor logs for details (check with 'ha supervisor logs')"
|
||||
)
|
||||
assert body["error_key"] == "addon_build_failed_unknown_error"
|
||||
assert body["extra_fields"] == {
|
||||
"addon": "local_ssh",
|
||||
"logs_command": "ha supervisor logs",
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Test auth API."""
|
||||
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from typing import Any
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
from aiohttp.hdrs import WWW_AUTHENTICATE
|
||||
@@ -10,8 +9,6 @@ import pytest
|
||||
|
||||
from supervisor.addons.addon import Addon
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.exceptions import HomeAssistantAPIError, HomeAssistantWSError
|
||||
from supervisor.homeassistant.api import HomeAssistantAPI
|
||||
|
||||
from tests.common import MockResponse
|
||||
from tests.const import TEST_ADDON_SLUG
|
||||
@@ -103,52 +100,6 @@ async def test_password_reset(
|
||||
assert "Successful password reset for 'john'" in caplog.text
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("post_mock", "expected_log"),
|
||||
[
|
||||
(
|
||||
MagicMock(return_value=MockResponse(status=400)),
|
||||
"The user 'john' is not registered",
|
||||
),
|
||||
(
|
||||
MagicMock(side_effect=HomeAssistantAPIError("fail")),
|
||||
"Can't request password reset on Home Assistant: fail",
|
||||
),
|
||||
],
|
||||
)
|
||||
async def test_failed_password_reset(
|
||||
api_client: TestClient,
|
||||
coresys: CoreSys,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
websession: MagicMock,
|
||||
post_mock: MagicMock,
|
||||
expected_log: str,
|
||||
):
|
||||
"""Test failed password reset."""
|
||||
coresys.homeassistant.api.access_token = "abc123"
|
||||
# pylint: disable-next=protected-access
|
||||
coresys.homeassistant.api._access_token_expires = datetime.now(tz=UTC) + timedelta(
|
||||
days=1
|
||||
)
|
||||
|
||||
websession.post = post_mock
|
||||
resp = await api_client.post(
|
||||
"/auth/reset", json={"username": "john", "password": "doe"}
|
||||
)
|
||||
assert resp.status == 400
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "Username 'john' does not exist. Check list of users using 'ha auth list'."
|
||||
)
|
||||
assert body["error_key"] == "auth_password_reset_error"
|
||||
assert body["extra_fields"] == {
|
||||
"user": "john",
|
||||
"auth_list_command": "ha auth list",
|
||||
}
|
||||
assert expected_log in caplog.text
|
||||
|
||||
|
||||
async def test_list_users(
|
||||
api_client: TestClient, coresys: CoreSys, ha_ws_client: AsyncMock
|
||||
):
|
||||
@@ -169,48 +120,6 @@ async def test_list_users(
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("send_command_mock", "error_response", "expected_log"),
|
||||
[
|
||||
(
|
||||
AsyncMock(return_value=None),
|
||||
{
|
||||
"result": "error",
|
||||
"message": "Home Assistant returned invalid response of `None` instead of a list of users. Check Home Assistant logs for details (check with `ha core logs`)",
|
||||
"error_key": "auth_list_users_none_response_error",
|
||||
"extra_fields": {"none": "None", "logs_command": "ha core logs"},
|
||||
},
|
||||
"Home Assistant returned invalid response of `None` instead of a list of users. Check Home Assistant logs for details (check with `ha core logs`)",
|
||||
),
|
||||
(
|
||||
AsyncMock(side_effect=HomeAssistantWSError("fail")),
|
||||
{
|
||||
"result": "error",
|
||||
"message": "Can't request listing users on Home Assistant. Check supervisor logs for details (check with 'ha supervisor logs')",
|
||||
"error_key": "auth_list_users_error",
|
||||
"extra_fields": {"logs_command": "ha supervisor logs"},
|
||||
},
|
||||
"Can't request listing users on Home Assistant: fail",
|
||||
),
|
||||
],
|
||||
)
|
||||
async def test_list_users_failure(
|
||||
api_client: TestClient,
|
||||
ha_ws_client: AsyncMock,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
send_command_mock: AsyncMock,
|
||||
error_response: dict[str, Any],
|
||||
expected_log: str,
|
||||
):
|
||||
"""Test failure listing users via API."""
|
||||
ha_ws_client.async_send_command = send_command_mock
|
||||
resp = await api_client.get("/auth/list")
|
||||
assert resp.status == 500
|
||||
result = await resp.json()
|
||||
assert result == error_response
|
||||
assert expected_log in caplog.text
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("field", "api_client"),
|
||||
[("username", TEST_ADDON_SLUG), ("user", TEST_ADDON_SLUG)],
|
||||
@@ -247,13 +156,6 @@ async def test_auth_json_failure_none(
|
||||
mock_check_login.return_value = True
|
||||
resp = await api_client.post("/auth", json={"username": user, "password": password})
|
||||
assert resp.status == 401
|
||||
assert (
|
||||
resp.headers["WWW-Authenticate"]
|
||||
== 'Basic realm="Home Assistant Authentication"'
|
||||
)
|
||||
body = await resp.json()
|
||||
assert body["message"] == "Username and password must be strings"
|
||||
assert body["error_key"] == "auth_invalid_non_string_value_error"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("api_client", [TEST_ADDON_SLUG], indirect=True)
|
||||
@@ -365,26 +267,3 @@ async def test_non_addon_token_no_auth_access(api_client: TestClient):
|
||||
"""Test auth where add-on is not allowed to access auth API."""
|
||||
resp = await api_client.post("/auth", json={"username": "test", "password": "pass"})
|
||||
assert resp.status == 403
|
||||
|
||||
|
||||
@pytest.mark.parametrize("api_client", [TEST_ADDON_SLUG], indirect=True)
|
||||
@pytest.mark.usefixtures("install_addon_ssh")
|
||||
async def test_auth_backend_login_failure(api_client: TestClient):
|
||||
"""Test backend login failure on auth."""
|
||||
with (
|
||||
patch.object(HomeAssistantAPI, "check_api_state", return_value=True),
|
||||
patch.object(
|
||||
HomeAssistantAPI, "make_request", side_effect=HomeAssistantAPIError("fail")
|
||||
),
|
||||
):
|
||||
resp = await api_client.post(
|
||||
"/auth", json={"username": "test", "password": "pass"}
|
||||
)
|
||||
assert resp.status == 500
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "Unable to validate authentication details with Home Assistant. Check supervisor logs for details (check with 'ha supervisor logs')"
|
||||
)
|
||||
assert body["error_key"] == "auth_home_assistant_api_validation_error"
|
||||
assert body["extra_fields"] == {"logs_command": "ha supervisor logs"}
|
||||
|
||||
@@ -17,7 +17,6 @@ from supervisor.const import CoreState
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
from supervisor.exceptions import (
|
||||
AddonPrePostBackupCommandReturnedError,
|
||||
AddonsError,
|
||||
BackupInvalidError,
|
||||
HomeAssistantBackupError,
|
||||
@@ -25,7 +24,6 @@ from supervisor.exceptions import (
|
||||
from supervisor.homeassistant.core import HomeAssistantCore
|
||||
from supervisor.homeassistant.module import HomeAssistant
|
||||
from supervisor.homeassistant.websocket import HomeAssistantWebSocket
|
||||
from supervisor.jobs import SupervisorJob
|
||||
from supervisor.mounts.mount import Mount
|
||||
from supervisor.supervisor import Supervisor
|
||||
|
||||
@@ -403,8 +401,6 @@ async def test_api_backup_errors(
|
||||
"type": "BackupError",
|
||||
"message": str(err),
|
||||
"stage": None,
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
]
|
||||
assert job["child_jobs"][2]["name"] == "backup_store_folders"
|
||||
@@ -441,8 +437,6 @@ async def test_api_backup_errors(
|
||||
"type": "HomeAssistantBackupError",
|
||||
"message": "Backup error",
|
||||
"stage": "home_assistant",
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
]
|
||||
assert job["child_jobs"][0]["name"] == "backup_store_homeassistant"
|
||||
@@ -451,8 +445,6 @@ async def test_api_backup_errors(
|
||||
"type": "HomeAssistantBackupError",
|
||||
"message": "Backup error",
|
||||
"stage": None,
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
]
|
||||
assert len(job["child_jobs"]) == 1
|
||||
@@ -757,8 +749,6 @@ async def test_backup_to_multiple_locations_error_on_copy(
|
||||
"type": "BackupError",
|
||||
"message": "Could not copy backup to .cloud_backup due to: ",
|
||||
"stage": None,
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1493,44 +1483,3 @@ async def test_immediate_list_after_missing_file_restore(
|
||||
result = await resp.json()
|
||||
assert len(result["data"]["backups"]) == 1
|
||||
assert result["data"]["backups"][0]["slug"] == "93b462f8"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("command", ["backup_pre", "backup_post"])
|
||||
@pytest.mark.usefixtures("install_addon_example", "tmp_supervisor_data")
|
||||
async def test_pre_post_backup_command_error(
|
||||
api_client: TestClient, coresys: CoreSys, container: MagicMock, command: str
|
||||
):
|
||||
"""Test pre/post backup command error."""
|
||||
await coresys.core.set_state(CoreState.RUNNING)
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
|
||||
container.status = "running"
|
||||
container.exec_run.return_value = (1, b"")
|
||||
with patch.object(Addon, command, new=PropertyMock(return_value="test")):
|
||||
resp = await api_client.post(
|
||||
"/backups/new/partial", json={"addons": ["local_example"]}
|
||||
)
|
||||
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
job_id = body["data"]["job_id"]
|
||||
job: SupervisorJob | None = None
|
||||
for j in coresys.jobs.jobs:
|
||||
if j.name == "backup_store_addons" and j.parent_id == job_id:
|
||||
job = j
|
||||
break
|
||||
|
||||
assert job
|
||||
assert job.done is True
|
||||
assert job.errors[0].type_ == AddonPrePostBackupCommandReturnedError
|
||||
assert job.errors[0].message == (
|
||||
"Pre-/Post backup command for add-on local_example returned error code: "
|
||||
"1. Please report this to the addon developer. Enable debug "
|
||||
"logging to capture complete command output using ha supervisor options --logging debug"
|
||||
)
|
||||
assert job.errors[0].error_key == "addon_pre_post_backup_command_returned_error"
|
||||
assert job.errors[0].extra_fields == {
|
||||
"addon": "local_example",
|
||||
"exit_code": 1,
|
||||
"debug_logging_command": "ha supervisor options --logging debug",
|
||||
}
|
||||
|
||||
@@ -118,6 +118,15 @@ async def test_api_migrate_docker_storage_driver(
|
||||
in coresys.resolution.suggestions
|
||||
)
|
||||
|
||||
# Test migration back to overlay2 (graph driver)
|
||||
system_service.MigrateDockerStorageDriver.calls.clear()
|
||||
resp = await api_client.post(
|
||||
"/docker/migrate-storage-driver",
|
||||
json={"storage_driver": "overlay2"},
|
||||
)
|
||||
assert resp.status == 200
|
||||
assert system_service.MigrateDockerStorageDriver.calls == [("overlay2",)]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
|
||||
async def test_api_migrate_docker_storage_driver_invalid_backend(
|
||||
|
||||
@@ -305,6 +305,8 @@ async def test_api_progress_updates_home_assistant_update(
|
||||
and evt.args[0]["data"]["event"] == WSEvent.JOB
|
||||
and evt.args[0]["data"]["data"]["name"] == "home_assistant_core_update"
|
||||
]
|
||||
# Count-based progress: 2 layers need pulling (each worth 50%)
|
||||
# Layers that already exist are excluded from progress calculation
|
||||
assert events[:5] == [
|
||||
{
|
||||
"stage": None,
|
||||
@@ -318,36 +320,36 @@ async def test_api_progress_updates_home_assistant_update(
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0.1,
|
||||
"progress": 9.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 1.7,
|
||||
"progress": 25.6,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 4.0,
|
||||
"progress": 35.4,
|
||||
"done": False,
|
||||
},
|
||||
]
|
||||
assert events[-5:] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 95.5,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 96.9,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 99.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 100,
|
||||
|
||||
@@ -374,8 +374,6 @@ async def test_job_with_error(
|
||||
"type": "SupervisorError",
|
||||
"message": "bad",
|
||||
"stage": "test",
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
],
|
||||
"child_jobs": [
|
||||
@@ -393,8 +391,6 @@ async def test_job_with_error(
|
||||
"type": "SupervisorError",
|
||||
"message": "bad",
|
||||
"stage": None,
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
],
|
||||
"child_jobs": [],
|
||||
|
||||
@@ -4,6 +4,7 @@ import asyncio
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
|
||||
|
||||
from aiohttp import ClientResponse
|
||||
from aiohttp.test_utils import TestClient
|
||||
from awesomeversion import AwesomeVersion
|
||||
import pytest
|
||||
@@ -291,6 +292,14 @@ async def test_api_detached_addon_documentation(
|
||||
assert result == "Addon local_ssh does not exist in the store"
|
||||
|
||||
|
||||
async def get_message(resp: ClientResponse, json_expected: bool) -> str:
|
||||
"""Get message from response based on response type."""
|
||||
if json_expected:
|
||||
body = await resp.json()
|
||||
return body["message"]
|
||||
return await resp.text()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("method", "url", "json_expected"),
|
||||
[
|
||||
@@ -316,13 +325,7 @@ async def test_store_addon_not_found(
|
||||
"""Test store addon not found error."""
|
||||
resp = await api_client.request(method, url)
|
||||
assert resp.status == 404
|
||||
if json_expected:
|
||||
body = await resp.json()
|
||||
assert body["message"] == "Addon bad does not exist in the store"
|
||||
assert body["error_key"] == "store_addon_not_found_error"
|
||||
assert body["extra_fields"] == {"addon": "bad"}
|
||||
else:
|
||||
assert await resp.text() == "Addon bad does not exist in the store"
|
||||
assert await get_message(resp, json_expected) == "Addon bad does not exist"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -761,6 +764,8 @@ async def test_api_progress_updates_addon_install_update(
|
||||
and evt.args[0]["data"]["data"]["name"] == job_name
|
||||
and evt.args[0]["data"]["data"]["reference"] == addon_slug
|
||||
]
|
||||
# Count-based progress: 2 layers need pulling (each worth 50%)
|
||||
# Layers that already exist are excluded from progress calculation
|
||||
assert events[:4] == [
|
||||
{
|
||||
"stage": None,
|
||||
@@ -769,36 +774,36 @@ async def test_api_progress_updates_addon_install_update(
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0.1,
|
||||
"progress": 9.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 1.7,
|
||||
"progress": 25.6,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 4.0,
|
||||
"progress": 35.4,
|
||||
"done": False,
|
||||
},
|
||||
]
|
||||
assert events[-5:] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 95.5,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 96.9,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 99.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 100,
|
||||
|
||||
@@ -7,7 +7,6 @@ from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
|
||||
from aiohttp.test_utils import TestClient
|
||||
from awesomeversion import AwesomeVersion
|
||||
from blockbuster import BlockingError
|
||||
from docker.errors import DockerException
|
||||
import pytest
|
||||
|
||||
from supervisor.const import CoreState
|
||||
@@ -359,6 +358,8 @@ async def test_api_progress_updates_supervisor_update(
|
||||
and evt.args[0]["data"]["event"] == WSEvent.JOB
|
||||
and evt.args[0]["data"]["data"]["name"] == "supervisor_update"
|
||||
]
|
||||
# Count-based progress: 2 layers need pulling (each worth 50%)
|
||||
# Layers that already exist are excluded from progress calculation
|
||||
assert events[:4] == [
|
||||
{
|
||||
"stage": None,
|
||||
@@ -367,36 +368,36 @@ async def test_api_progress_updates_supervisor_update(
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 0.1,
|
||||
"progress": 9.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 1.7,
|
||||
"progress": 25.6,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 4.0,
|
||||
"progress": 35.4,
|
||||
"done": False,
|
||||
},
|
||||
]
|
||||
assert events[-5:] == [
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 95.5,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 96.9,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.2,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 98.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 99.3,
|
||||
"done": False,
|
||||
},
|
||||
{
|
||||
"stage": None,
|
||||
"progress": 100,
|
||||
@@ -408,37 +409,3 @@ async def test_api_progress_updates_supervisor_update(
|
||||
"done": True,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
async def test_api_supervisor_stats(api_client: TestClient, coresys: CoreSys):
|
||||
"""Test supervisor stats."""
|
||||
coresys.docker.containers.get.return_value.status = "running"
|
||||
coresys.docker.containers.get.return_value.stats.return_value = load_json_fixture(
|
||||
"container_stats.json"
|
||||
)
|
||||
|
||||
resp = await api_client.get("/supervisor/stats")
|
||||
assert resp.status == 200
|
||||
result = await resp.json()
|
||||
assert result["data"]["cpu_percent"] == 90.0
|
||||
assert result["data"]["memory_usage"] == 59700000
|
||||
assert result["data"]["memory_limit"] == 4000000000
|
||||
assert result["data"]["memory_percent"] == 1.49
|
||||
|
||||
|
||||
async def test_supervisor_api_stats_failure(
|
||||
api_client: TestClient, coresys: CoreSys, caplog: pytest.LogCaptureFixture
|
||||
):
|
||||
"""Test supervisor stats failure."""
|
||||
coresys.docker.containers.get.side_effect = DockerException("fail")
|
||||
|
||||
resp = await api_client.get("/supervisor/stats")
|
||||
assert resp.status == 500
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "An unknown error occurred with Supervisor. Check supervisor logs for details (check with 'ha supervisor logs')"
|
||||
)
|
||||
assert body["error_key"] == "supervisor_unknown_error"
|
||||
assert body["extra_fields"] == {"logs_command": "ha supervisor logs"}
|
||||
assert "Could not inspect container 'hassio_supervisor': fail" in caplog.text
|
||||
|
||||
@@ -144,9 +144,9 @@ async def docker() -> DockerAPI:
|
||||
|
||||
docker_images.inspect.return_value = image_inspect
|
||||
docker_images.list.return_value = [image_inspect]
|
||||
docker_images.import_image = AsyncMock(
|
||||
return_value=[{"stream": "Loaded image: test:latest\n"}]
|
||||
)
|
||||
docker_images.import_image.return_value = [
|
||||
{"stream": "Loaded image: test:latest\n"}
|
||||
]
|
||||
|
||||
docker_images.pull.return_value = AsyncIterator([{}])
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ from unittest.mock import Mock, PropertyMock, patch
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
import pytest
|
||||
|
||||
from supervisor.dbus.const import ConnectionState
|
||||
from supervisor.dbus.const import ConnectionStateType
|
||||
from supervisor.dbus.network import NetworkManager
|
||||
from supervisor.dbus.network.interface import NetworkInterface
|
||||
from supervisor.exceptions import (
|
||||
@@ -93,7 +93,7 @@ async def test_activate_connection(
|
||||
"/org/freedesktop/NetworkManager/Settings/1",
|
||||
"/org/freedesktop/NetworkManager/Devices/1",
|
||||
)
|
||||
assert connection.state == ConnectionState.ACTIVATED
|
||||
assert connection.state == ConnectionStateType.ACTIVATED
|
||||
assert (
|
||||
connection.settings.object_path == "/org/freedesktop/NetworkManager/Settings/1"
|
||||
)
|
||||
@@ -117,7 +117,7 @@ async def test_add_and_activate_connection(
|
||||
)
|
||||
assert settings.connection.uuid == "0c23631e-2118-355c-bbb0-8943229cb0d6"
|
||||
assert settings.ipv4.method == "auto"
|
||||
assert connection.state == ConnectionState.ACTIVATED
|
||||
assert connection.state == ConnectionStateType.ACTIVATED
|
||||
assert (
|
||||
connection.settings.object_path == "/org/freedesktop/NetworkManager/Settings/1"
|
||||
)
|
||||
|
||||
@@ -35,8 +35,8 @@ class System(DBusServiceMock):
|
||||
"""Migrate Docker storage driver."""
|
||||
if isinstance(self.response_migrate_docker_storage_driver, DBusError):
|
||||
raise self.response_migrate_docker_storage_driver # pylint: disable=raising-bad-type
|
||||
if backend != "overlayfs":
|
||||
if backend not in ("overlayfs", "overlay2"):
|
||||
raise DBusError(
|
||||
ErrorType.FAILED,
|
||||
f"unsupported driver: {backend} (only 'overlayfs' is currently supported)",
|
||||
f"unsupported driver: {backend} (only 'overlayfs' and 'overlay2' are supported)",
|
||||
)
|
||||
|
||||
@@ -1,49 +1,9 @@
|
||||
"""Test docker login."""
|
||||
|
||||
import pytest
|
||||
|
||||
# pylint: disable=protected-access
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import DOCKER_HUB, DOCKER_HUB_LEGACY
|
||||
from supervisor.docker.const import DOCKER_HUB
|
||||
from supervisor.docker.interface import DockerInterface
|
||||
from supervisor.docker.utils import get_registry_from_image
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("image_ref", "expected_registry"),
|
||||
[
|
||||
# No registry - Docker Hub images
|
||||
("nginx", None),
|
||||
("nginx:latest", None),
|
||||
("library/nginx", None),
|
||||
("library/nginx:latest", None),
|
||||
("homeassistant/amd64-supervisor", None),
|
||||
("homeassistant/amd64-supervisor:1.2.3", None),
|
||||
# Registry with dot
|
||||
("ghcr.io/homeassistant/amd64-supervisor", "ghcr.io"),
|
||||
("ghcr.io/homeassistant/amd64-supervisor:latest", "ghcr.io"),
|
||||
("myregistry.com/nginx", "myregistry.com"),
|
||||
("registry.example.com/org/image:v1", "registry.example.com"),
|
||||
("127.0.0.1/myimage", "127.0.0.1"),
|
||||
# Registry with port
|
||||
("myregistry:5000/myimage", "myregistry:5000"),
|
||||
("localhost:5000/myimage", "localhost:5000"),
|
||||
("registry.io:5000/org/app:v1", "registry.io:5000"),
|
||||
# localhost special case
|
||||
("localhost/myimage", "localhost"),
|
||||
("localhost/myimage:tag", "localhost"),
|
||||
# IPv6
|
||||
("[::1]:5000/myimage", "[::1]:5000"),
|
||||
("[2001:db8::1]:5000/myimage:tag", "[2001:db8::1]:5000"),
|
||||
],
|
||||
)
|
||||
def test_get_registry_from_image(image_ref: str, expected_registry: str | None):
|
||||
"""Test get_registry_from_image extracts registry from image reference.
|
||||
|
||||
Based on Docker's reference implementation:
|
||||
vendor/github.com/distribution/reference/normalize.go
|
||||
"""
|
||||
assert get_registry_from_image(image_ref) == expected_registry
|
||||
|
||||
|
||||
def test_no_credentials(coresys: CoreSys, test_docker_interface: DockerInterface):
|
||||
@@ -87,36 +47,3 @@ def test_matching_credentials(coresys: CoreSys, test_docker_interface: DockerInt
|
||||
)
|
||||
assert credentials["username"] == "Spongebob Squarepants"
|
||||
assert "registry" not in credentials
|
||||
|
||||
|
||||
def test_legacy_docker_hub_credentials(
|
||||
coresys: CoreSys, test_docker_interface: DockerInterface
|
||||
):
|
||||
"""Test legacy hub.docker.com credentials are used for Docker Hub images."""
|
||||
coresys.docker.config._data["registries"] = {
|
||||
DOCKER_HUB_LEGACY: {"username": "LegacyUser", "password": "Password1!"},
|
||||
}
|
||||
|
||||
credentials = test_docker_interface._get_credentials(
|
||||
"homeassistant/amd64-supervisor"
|
||||
)
|
||||
assert credentials["username"] == "LegacyUser"
|
||||
# No registry should be included for Docker Hub
|
||||
assert "registry" not in credentials
|
||||
|
||||
|
||||
def test_docker_hub_preferred_over_legacy(
|
||||
coresys: CoreSys, test_docker_interface: DockerInterface
|
||||
):
|
||||
"""Test docker.io is preferred over legacy hub.docker.com when both exist."""
|
||||
coresys.docker.config._data["registries"] = {
|
||||
DOCKER_HUB: {"username": "NewUser", "password": "Password1!"},
|
||||
DOCKER_HUB_LEGACY: {"username": "LegacyUser", "password": "Password2!"},
|
||||
}
|
||||
|
||||
credentials = test_docker_interface._get_credentials(
|
||||
"homeassistant/amd64-supervisor"
|
||||
)
|
||||
# docker.io should be preferred
|
||||
assert credentials["username"] == "NewUser"
|
||||
assert "registry" not in credentials
|
||||
|
||||
@@ -46,7 +46,6 @@ async def test_homeassistant_start(
|
||||
"TZ": ANY,
|
||||
"SUPERVISOR_TOKEN": ANY,
|
||||
"HASSIO_TOKEN": ANY,
|
||||
# no "HA_DUPLICATE_LOG_FILE"
|
||||
}
|
||||
assert run.call_args.kwargs["mounts"] == [
|
||||
DEV_MOUNT,
|
||||
@@ -106,28 +105,6 @@ async def test_homeassistant_start(
|
||||
assert "volumes" not in run.call_args.kwargs
|
||||
|
||||
|
||||
async def test_homeassistant_start_with_duplicate_log_file(
|
||||
coresys: CoreSys, tmp_supervisor_data: Path, path_extern
|
||||
):
|
||||
"""Test starting homeassistant with duplicate_log_file enabled."""
|
||||
coresys.homeassistant.version = AwesomeVersion("2025.12.0")
|
||||
coresys.homeassistant.duplicate_log_file = True
|
||||
|
||||
with (
|
||||
patch.object(DockerAPI, "run") as run,
|
||||
patch.object(
|
||||
DockerHomeAssistant, "is_running", side_effect=[False, False, True]
|
||||
),
|
||||
patch("supervisor.homeassistant.core.asyncio.sleep"),
|
||||
):
|
||||
await coresys.homeassistant.core.start()
|
||||
|
||||
run.assert_called_once()
|
||||
env = run.call_args.kwargs["environment"]
|
||||
assert "HA_DUPLICATE_LOG_FILE" in env
|
||||
assert env["HA_DUPLICATE_LOG_FILE"] == "1"
|
||||
|
||||
|
||||
async def test_landingpage_start(
|
||||
coresys: CoreSys, tmp_supervisor_data: Path, path_extern
|
||||
):
|
||||
@@ -156,7 +133,6 @@ async def test_landingpage_start(
|
||||
"TZ": ANY,
|
||||
"SUPERVISOR_TOKEN": ANY,
|
||||
"HASSIO_TOKEN": ANY,
|
||||
# no "HA_DUPLICATE_LOG_FILE"
|
||||
}
|
||||
assert run.call_args.kwargs["mounts"] == [
|
||||
DEV_MOUNT,
|
||||
|
||||
@@ -54,7 +54,7 @@ async def test_docker_image_platform(
|
||||
coresys.docker.images.inspect.return_value = {"Id": "test:1.2.3"}
|
||||
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test", arch=cpu_arch)
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
"test", tag="1.2.3", platform=platform, auth=None, stream=True, timeout=None
|
||||
"test", tag="1.2.3", platform=platform, auth=None, stream=True
|
||||
)
|
||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||
|
||||
@@ -71,12 +71,7 @@ async def test_docker_image_default_platform(
|
||||
):
|
||||
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
"test",
|
||||
tag="1.2.3",
|
||||
platform="linux/386",
|
||||
auth=None,
|
||||
stream=True,
|
||||
timeout=None,
|
||||
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
||||
)
|
||||
|
||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||
@@ -116,12 +111,7 @@ async def test_private_registry_credentials_passed_to_pull(
|
||||
expected_auth["registry"] = registry_key
|
||||
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
image,
|
||||
tag="1.2.3",
|
||||
platform="linux/amd64",
|
||||
auth=expected_auth,
|
||||
stream=True,
|
||||
timeout=None,
|
||||
image, tag="1.2.3", platform="linux/amd64", auth=expected_auth, stream=True
|
||||
)
|
||||
|
||||
|
||||
@@ -370,12 +360,7 @@ async def test_install_fires_progress_events(
|
||||
):
|
||||
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
"test",
|
||||
tag="1.2.3",
|
||||
platform="linux/386",
|
||||
auth=None,
|
||||
stream=True,
|
||||
timeout=None,
|
||||
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
||||
)
|
||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||
|
||||
@@ -724,11 +709,18 @@ async def test_install_progress_handles_layers_skipping_download(
|
||||
await install_task
|
||||
await event.wait()
|
||||
|
||||
# First update from layer download should have rather low progress ((260937/25445459) / 2 ~ 0.5%)
|
||||
assert install_job_snapshots[0]["progress"] < 1
|
||||
# With the new progress calculation approach:
|
||||
# - Progress is weighted by layer size
|
||||
# - Small layers that skip downloading get minimal size (1 byte)
|
||||
# - Progress should increase monotonically
|
||||
assert len(install_job_snapshots) > 0
|
||||
|
||||
# Total 8 events should lead to a progress update on the install job
|
||||
assert len(install_job_snapshots) == 8
|
||||
# Verify progress is monotonically increasing (or stable)
|
||||
for i in range(1, len(install_job_snapshots)):
|
||||
assert (
|
||||
install_job_snapshots[i]["progress"]
|
||||
>= install_job_snapshots[i - 1]["progress"]
|
||||
)
|
||||
|
||||
# Job should complete successfully
|
||||
assert job.done is True
|
||||
@@ -832,12 +824,7 @@ async def test_install_progress_containerd_snapshot(
|
||||
with patch.object(Supervisor, "arch", PropertyMock(return_value="i386")):
|
||||
await test_docker_interface.mock_install()
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
"test",
|
||||
tag="1.2.3",
|
||||
platform="linux/386",
|
||||
auth=None,
|
||||
stream=True,
|
||||
timeout=None,
|
||||
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
||||
)
|
||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||
|
||||
@@ -864,24 +851,24 @@ async def test_install_progress_containerd_snapshot(
|
||||
}
|
||||
|
||||
assert [c.args[0] for c in ha_ws_client.async_send_command.call_args_list] == [
|
||||
# During downloading we get continuous progress updates from download status
|
||||
# Count-based progress: 2 layers, each = 50%. Download = 0-35%, Extract = 35-50%
|
||||
job_event(0),
|
||||
job_event(1.7),
|
||||
job_event(3.4),
|
||||
job_event(8.5),
|
||||
job_event(8.4),
|
||||
job_event(10.2),
|
||||
job_event(15.3),
|
||||
job_event(18.8),
|
||||
job_event(29.0),
|
||||
job_event(35.8),
|
||||
job_event(42.6),
|
||||
job_event(49.5),
|
||||
job_event(56.0),
|
||||
job_event(62.8),
|
||||
# Downloading phase is considered 70% of total. After we only get one update
|
||||
# per image downloaded when extraction is finished. It uses the total size
|
||||
# received during downloading to determine percent complete then.
|
||||
job_event(15.2),
|
||||
job_event(18.7),
|
||||
job_event(28.8),
|
||||
job_event(35.7),
|
||||
job_event(42.4),
|
||||
job_event(49.3),
|
||||
job_event(55.8),
|
||||
job_event(62.7),
|
||||
# Downloading phase is considered 70% of layer's progress.
|
||||
# After download complete, extraction takes remaining 30% per layer.
|
||||
job_event(70.0),
|
||||
job_event(84.8),
|
||||
job_event(85.0),
|
||||
job_event(100),
|
||||
job_event(100, True),
|
||||
]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from docker.errors import APIError, DockerException, NotFound
|
||||
import pytest
|
||||
@@ -412,9 +412,9 @@ async def test_repair_failures(coresys: CoreSys, caplog: pytest.LogCaptureFixtur
|
||||
async def test_import_image(coresys: CoreSys, tmp_path: Path, log_starter: str):
|
||||
"""Test importing an image into docker."""
|
||||
(test_tar := tmp_path / "test.tar").touch()
|
||||
coresys.docker.images.import_image = AsyncMock(
|
||||
return_value=[{"stream": f"{log_starter}: imported"}]
|
||||
)
|
||||
coresys.docker.images.import_image.return_value = [
|
||||
{"stream": f"{log_starter}: imported"}
|
||||
]
|
||||
coresys.docker.images.inspect.return_value = {"Id": "imported"}
|
||||
|
||||
image = await coresys.docker.import_image(test_tar)
|
||||
@@ -426,9 +426,9 @@ async def test_import_image(coresys: CoreSys, tmp_path: Path, log_starter: str):
|
||||
async def test_import_image_error(coresys: CoreSys, tmp_path: Path):
|
||||
"""Test failure importing an image into docker."""
|
||||
(test_tar := tmp_path / "test.tar").touch()
|
||||
coresys.docker.images.import_image = AsyncMock(
|
||||
return_value=[{"errorDetail": {"message": "fail"}}]
|
||||
)
|
||||
coresys.docker.images.import_image.return_value = [
|
||||
{"errorDetail": {"message": "fail"}}
|
||||
]
|
||||
|
||||
with pytest.raises(DockerError, match="Can't import image from tar: fail"):
|
||||
await coresys.docker.import_image(test_tar)
|
||||
@@ -441,12 +441,10 @@ async def test_import_multiple_images_in_tar(
|
||||
):
|
||||
"""Test importing an image into docker."""
|
||||
(test_tar := tmp_path / "test.tar").touch()
|
||||
coresys.docker.images.import_image = AsyncMock(
|
||||
return_value=[
|
||||
{"stream": "Loaded image: imported-1"},
|
||||
{"stream": "Loaded image: imported-2"},
|
||||
]
|
||||
)
|
||||
coresys.docker.images.import_image.return_value = [
|
||||
{"stream": "Loaded image: imported-1"},
|
||||
{"stream": "Loaded image: imported-2"},
|
||||
]
|
||||
|
||||
assert await coresys.docker.import_image(test_tar) is None
|
||||
|
||||
|
||||
786
tests/docker/test_pull_progress.py
Normal file
786
tests/docker/test_pull_progress.py
Normal file
@@ -0,0 +1,786 @@
|
||||
"""Tests for image pull progress tracking."""
|
||||
|
||||
import pytest
|
||||
|
||||
from supervisor.docker.manager import PullLogEntry, PullProgressDetail
|
||||
from supervisor.docker.pull_progress import (
|
||||
DOWNLOAD_WEIGHT,
|
||||
EXTRACT_WEIGHT,
|
||||
ImagePullProgress,
|
||||
LayerProgress,
|
||||
)
|
||||
|
||||
|
||||
class TestLayerProgress:
|
||||
"""Tests for LayerProgress class."""
|
||||
|
||||
def test_already_exists_layer(self):
|
||||
"""Test that already existing layer returns 100%."""
|
||||
layer = LayerProgress(layer_id="abc123", already_exists=True)
|
||||
assert layer.calculate_progress() == 100.0
|
||||
|
||||
def test_extract_complete_layer(self):
|
||||
"""Test that extracted layer returns 100%."""
|
||||
layer = LayerProgress(
|
||||
layer_id="abc123",
|
||||
total_size=1000,
|
||||
download_current=1000,
|
||||
download_complete=True,
|
||||
extract_complete=True,
|
||||
)
|
||||
assert layer.calculate_progress() == 100.0
|
||||
|
||||
def test_download_complete_not_extracted(self):
|
||||
"""Test layer that finished downloading but not extracting."""
|
||||
layer = LayerProgress(
|
||||
layer_id="abc123",
|
||||
total_size=1000,
|
||||
download_current=1000,
|
||||
download_complete=True,
|
||||
extract_complete=False,
|
||||
)
|
||||
assert layer.calculate_progress() == DOWNLOAD_WEIGHT # 70%
|
||||
|
||||
def test_extraction_progress_overlay2(self):
|
||||
"""Test layer with byte-based extraction progress (overlay2)."""
|
||||
layer = LayerProgress(
|
||||
layer_id="abc123",
|
||||
total_size=1000,
|
||||
download_current=1000,
|
||||
extract_current=500, # 50% extracted
|
||||
download_complete=True,
|
||||
extract_complete=False,
|
||||
)
|
||||
# 70% + (50% of 30%) = 70% + 15% = 85%
|
||||
assert layer.calculate_progress() == DOWNLOAD_WEIGHT + (0.5 * EXTRACT_WEIGHT)
|
||||
|
||||
def test_downloading_progress(self):
|
||||
"""Test layer during download phase."""
|
||||
layer = LayerProgress(
|
||||
layer_id="abc123",
|
||||
total_size=1000,
|
||||
download_current=500, # 50% downloaded
|
||||
download_complete=False,
|
||||
)
|
||||
# 50% of 70% = 35%
|
||||
assert layer.calculate_progress() == 35.0
|
||||
|
||||
def test_no_size_info_yet(self):
|
||||
"""Test layer with no size information."""
|
||||
layer = LayerProgress(layer_id="abc123")
|
||||
assert layer.calculate_progress() == 0.0
|
||||
|
||||
|
||||
class TestImagePullProgress:
|
||||
"""Tests for ImagePullProgress class."""
|
||||
|
||||
def test_empty_progress(self):
|
||||
"""Test progress with no layers."""
|
||||
progress = ImagePullProgress()
|
||||
assert progress.calculate_progress() == 0.0
|
||||
|
||||
def test_all_layers_already_exist(self):
|
||||
"""Test when all layers already exist locally.
|
||||
|
||||
When an image is fully cached, there are no "Downloading" events.
|
||||
Progress stays at 0 until the job completes and sets 100%.
|
||||
"""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
# Simulate "Already exists" events
|
||||
entry1 = PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Already exists",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
entry2 = PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer2",
|
||||
status="Already exists",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
progress.process_event(entry1)
|
||||
progress.process_event(entry2)
|
||||
|
||||
# No downloading events = no progress reported (job completion sets 100%)
|
||||
assert progress.calculate_progress() == 0.0
|
||||
|
||||
def test_single_layer_download(self):
|
||||
"""Test progress tracking for single layer download."""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
# Pull fs layer
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Start downloading
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
||||
)
|
||||
)
|
||||
# 50% of download phase = 35%
|
||||
assert progress.calculate_progress() == pytest.approx(35.0)
|
||||
|
||||
# Download complete
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Download complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
assert progress.calculate_progress() == 70.0
|
||||
|
||||
# Pull complete
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pull complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
assert progress.calculate_progress() == 100.0
|
||||
|
||||
def test_multiple_layers_equal_weight_progress(self):
|
||||
"""Test count-based progress where each layer contributes equally."""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
# Two layers: sizes don't matter for weight, each layer = 50%
|
||||
|
||||
# Pulling fs layer for both
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="large",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="small",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Large layer: 50% downloaded = 35% layer progress (50% of 70%)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="large",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
||||
)
|
||||
)
|
||||
|
||||
# Small layer: 100% downloaded, waiting for extraction = 70% layer progress
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="small",
|
||||
status="Download complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="small",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=100, total=100),
|
||||
)
|
||||
)
|
||||
|
||||
# Progress calculation (count-based, equal weight per layer):
|
||||
# Large layer: 35% (50% of 70% download weight)
|
||||
# Small layer: 70% (download complete)
|
||||
# Each layer = 50% weight
|
||||
# Total: (35 + 70) / 2 = 52.5%
|
||||
assert progress.calculate_progress() == pytest.approx(52.5)
|
||||
|
||||
def test_download_retry(self):
|
||||
"""Test that download retry resets progress."""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Download 50%
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
||||
)
|
||||
)
|
||||
assert progress.calculate_progress() == pytest.approx(35.0)
|
||||
|
||||
# Retry
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Retrying in 5 seconds",
|
||||
)
|
||||
)
|
||||
assert progress.calculate_progress() == 0.0
|
||||
|
||||
def test_layer_skips_download(self):
|
||||
"""Test small layer that goes straight to Download complete."""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="small",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Goes directly to Download complete (skipping Downloading events)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="small",
|
||||
status="Download complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Should still work - sets minimal size
|
||||
layer = progress.layers["small"]
|
||||
assert layer.total_size == 1
|
||||
assert layer.download_complete is True
|
||||
|
||||
def test_containerd_extract_progress(self):
|
||||
"""Test extraction progress with containerd snapshotter (time-based)."""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Download complete
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=1000, total=1000),
|
||||
)
|
||||
)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Download complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Containerd extraction progress (time-based, not byte-based)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Extracting",
|
||||
progress_detail=PullProgressDetail(current=5, units="s"),
|
||||
)
|
||||
)
|
||||
|
||||
# Should be at 70% (download complete, time-based extraction not tracked)
|
||||
assert progress.calculate_progress() == 70.0
|
||||
|
||||
# Pull complete
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pull complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
assert progress.calculate_progress() == 100.0
|
||||
|
||||
def test_overlay2_extract_progress(self):
|
||||
"""Test extraction progress with overlay2 (byte-based)."""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Download complete
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=1000, total=1000),
|
||||
)
|
||||
)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Download complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# At download complete, progress should be 70%
|
||||
assert progress.calculate_progress() == 70.0
|
||||
|
||||
# Overlay2 extraction progress (byte-based, 50% extracted)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Extracting",
|
||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
||||
)
|
||||
)
|
||||
|
||||
# Should be at 70% + (50% of 30%) = 85%
|
||||
assert progress.calculate_progress() == pytest.approx(85.0)
|
||||
|
||||
# Extraction continues to 80%
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Extracting",
|
||||
progress_detail=PullProgressDetail(current=800, total=1000),
|
||||
)
|
||||
)
|
||||
|
||||
# Should be at 70% + (80% of 30%) = 94%
|
||||
assert progress.calculate_progress() == pytest.approx(94.0)
|
||||
|
||||
# Pull complete
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pull complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
assert progress.calculate_progress() == 100.0
|
||||
|
||||
def test_get_stage(self):
|
||||
"""Test stage detection."""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
assert progress.get_stage() is None
|
||||
|
||||
# Add a layer that needs downloading
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
||||
)
|
||||
)
|
||||
assert progress.get_stage() == "Downloading"
|
||||
|
||||
# Download complete
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Download complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
assert progress.get_stage() == "Extracting"
|
||||
|
||||
# Pull complete
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pull complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
assert progress.get_stage() == "Pull complete"
|
||||
|
||||
def test_should_update_job(self):
|
||||
"""Test update threshold logic."""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
# Initial state - no updates
|
||||
should_update, _ = progress.should_update_job()
|
||||
assert not should_update
|
||||
|
||||
# Add a layer and start downloading
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Small progress - 1%
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=20, total=1000),
|
||||
)
|
||||
)
|
||||
# 2% of download = 1.4% total
|
||||
should_update, current = progress.should_update_job()
|
||||
assert should_update
|
||||
assert current == pytest.approx(1.4)
|
||||
|
||||
# Tiny increment - shouldn't trigger update
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=25, total=1000),
|
||||
)
|
||||
)
|
||||
should_update, _ = progress.should_update_job()
|
||||
assert not should_update
|
||||
|
||||
# Larger increment - should trigger
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=100, total=1000),
|
||||
)
|
||||
)
|
||||
should_update, _ = progress.should_update_job()
|
||||
assert should_update
|
||||
|
||||
def test_verifying_checksum(self):
|
||||
"""Test that Verifying Checksum marks download as nearly complete."""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=800, total=1000),
|
||||
)
|
||||
)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Verifying Checksum",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
layer = progress.layers["layer1"]
|
||||
assert layer.download_current == 1000 # Should be set to total
|
||||
|
||||
def test_events_without_status_ignored(self):
|
||||
"""Test that events without status are ignored."""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
# Event without status (just id field)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="abc123",
|
||||
)
|
||||
)
|
||||
|
||||
# Event without id
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
status="Digest: sha256:abc123",
|
||||
)
|
||||
)
|
||||
|
||||
# They shouldn't create layers or cause errors
|
||||
assert len(progress.layers) == 0
|
||||
|
||||
def test_mixed_already_exists_and_pull(self):
|
||||
"""Test combination of cached and pulled layers."""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
# Layer 1 already exists
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="cached",
|
||||
status="Already exists",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Layer 2 needs to be pulled
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="pulled",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="pulled",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
||||
)
|
||||
)
|
||||
|
||||
# Only 1 layer needs pulling (cached layer excluded)
|
||||
# pulled: 35% (50% of 70% download weight)
|
||||
assert progress.calculate_progress() == pytest.approx(35.0)
|
||||
|
||||
# Complete the pulled layer
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="pulled",
|
||||
status="Download complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="pulled",
|
||||
status="Pull complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
assert progress.calculate_progress() == 100.0
|
||||
|
||||
def test_pending_layers_prevent_premature_100(self):
|
||||
"""Test that layers without size info scale down progress."""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
# First batch of layers - they complete
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer2",
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Layer1 downloads and completes
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=1000, total=1000),
|
||||
)
|
||||
)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer1",
|
||||
status="Pull complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Layer2 is still pending (no size info yet) - simulating Docker rate limiting
|
||||
# Progress should NOT be 100% because layer2 hasn't started
|
||||
|
||||
# Layer1 is 100% complete, layer2 is 0%
|
||||
# With scaling: 1 known layer at 100%, 1 pending layer
|
||||
# Scale factor = 1/(1+1) = 0.5, so progress = 100 * 0.5 = 50%
|
||||
assert progress.calculate_progress() == pytest.approx(50.0)
|
||||
|
||||
# Now layer2 starts downloading
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer2",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=500, total=1000),
|
||||
)
|
||||
)
|
||||
|
||||
# Now both layers have size info, no scaling needed
|
||||
# Layer1: 100%, Layer2: 35% (50% of 70%)
|
||||
# Weighted by equal size: (100 + 35) / 2 = 67.5%
|
||||
assert progress.calculate_progress() == pytest.approx(67.5)
|
||||
|
||||
# Complete layer2
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="layer2",
|
||||
status="Pull complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
assert progress.calculate_progress() == 100.0
|
||||
|
||||
def test_large_layers_appearing_late_dont_cause_regression(self):
|
||||
"""Test that large layers discovered late don't cause progress to drop.
|
||||
|
||||
This simulates Docker's rate-limiting behavior where small layers complete
|
||||
first, then large layers start downloading later.
|
||||
"""
|
||||
progress = ImagePullProgress()
|
||||
|
||||
# All layers announced upfront (Docker does this)
|
||||
for layer_id in ["small1", "small2", "big1", "big2"]:
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id=layer_id,
|
||||
status="Pulling fs layer",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Big layers are "Waiting" (rate limited)
|
||||
for layer_id in ["big1", "big2"]:
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id=layer_id,
|
||||
status="Waiting",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# Small layers download quickly (1KB each)
|
||||
for layer_id in ["small1", "small2"]:
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id=layer_id,
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=1000, total=1000),
|
||||
)
|
||||
)
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id=layer_id,
|
||||
status="Pull complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
# At this point, 2 small layers are complete, 2 big layers are unknown size
|
||||
progress_before_big = progress.calculate_progress()
|
||||
|
||||
# Now big layers start downloading - they're 100MB each!
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="big1",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=1000000, total=100000000),
|
||||
)
|
||||
)
|
||||
|
||||
progress_after_big1 = progress.calculate_progress()
|
||||
|
||||
# Progress should NOT drop significantly when big layer appears
|
||||
# The monotonic tracking in should_update_job will help, but the
|
||||
# raw calculation should also not regress too badly
|
||||
assert progress_after_big1 >= progress_before_big * 0.5, (
|
||||
f"Progress dropped too much: {progress_before_big} -> {progress_after_big1}"
|
||||
)
|
||||
|
||||
# Second big layer appears
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id="big2",
|
||||
status="Downloading",
|
||||
progress_detail=PullProgressDetail(current=1000000, total=100000000),
|
||||
)
|
||||
)
|
||||
|
||||
# Should still make forward progress overall
|
||||
# Complete all layers
|
||||
for layer_id in ["big1", "big2"]:
|
||||
progress.process_event(
|
||||
PullLogEntry(
|
||||
job_id="test",
|
||||
id=layer_id,
|
||||
status="Pull complete",
|
||||
progress_detail=PullProgressDetail(),
|
||||
)
|
||||
)
|
||||
|
||||
assert progress.calculate_progress() == 100.0
|
||||
@@ -200,8 +200,6 @@ async def test_notify_on_change(coresys: CoreSys, ha_ws_client: AsyncMock):
|
||||
"type": "HassioError",
|
||||
"message": "Unknown error, see Supervisor logs (check with 'ha supervisor logs')",
|
||||
"stage": "test",
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
],
|
||||
"created": ANY,
|
||||
@@ -230,8 +228,6 @@ async def test_notify_on_change(coresys: CoreSys, ha_ws_client: AsyncMock):
|
||||
"type": "HassioError",
|
||||
"message": "Unknown error, see Supervisor logs (check with 'ha supervisor logs')",
|
||||
"stage": "test",
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
],
|
||||
"created": ANY,
|
||||
|
||||
Reference in New Issue
Block a user