Compare commits

..

3 Commits

Author SHA1 Message Date
Stefan Agner
87e1e7a3ab Exclude already-existing layers from pull progress calculation
Layers that already exist locally should not count towards download
progress since there's nothing to download for them. Only layers that
need pulling are included in the progress calculation.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-01 21:19:12 +01:00
Stefan Agner
e7c8700db9 Fix pytest 2025-12-01 21:19:12 +01:00
Stefan Agner
a4f681586e Use count-based progress for Docker image pulls
Refactor Docker image pull progress to use a simpler count-based approach
where each layer contributes equally (100% / total_layers) regardless of
size. This replaces the previous size-weighted calculation that was
susceptible to progress regression.

The core issue was that Docker rate-limits concurrent downloads (~3 at a
time) and reports layer sizes only when downloading starts. With size-
weighted progress, large layers appearing late would cause progress to
drop dramatically (e.g., 59% -> 29%) as the total size increased.

The new approach:
- Each layer contributes equally to overall progress
- Per-layer progress: 70% download weight, 30% extraction weight
- Progress only starts after first "Downloading" event (when layer
  count is known)
- Always caps at 99% - job completion handles final 100%

This simplifies the code by moving progress tracking to a dedicated
module (pull_progress.py) and removing complex size-based scaling logic
that tried to account for unknown layer sizes.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-01 21:19:03 +01:00
104 changed files with 2202 additions and 2765 deletions

View File

@@ -1,7 +1,6 @@
# General files
.git
.github
.gitkeep
.devcontainer
.vscode

View File

@@ -53,10 +53,10 @@ jobs:
version: ${{ steps.version.outputs.version }}
channel: ${{ steps.version.outputs.channel }}
publish: ${{ steps.version.outputs.publish }}
build_wheels: ${{ steps.requirements.outputs.build_wheels }}
requirements: ${{ steps.requirements.outputs.changed }}
steps:
- name: Checkout the repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
fetch-depth: 0
@@ -72,25 +72,20 @@ jobs:
- name: Get changed files
id: changed_files
if: github.event_name != 'release'
if: steps.version.outputs.publish == 'false'
uses: masesgroup/retrieve-changed-files@491e80760c0e28d36ca6240a27b1ccb8e1402c13 # v3.0.0
- name: Check if requirements files changed
id: requirements
run: |
# No wheels build necessary for releases
if [[ "${{ github.event_name }}" == "release" ]]; then
echo "build_wheels=false" >> "$GITHUB_OUTPUT"
elif [[ "${{ steps.changed_files.outputs.all }}" =~ (requirements\.txt|build\.yaml|\.github/workflows/builder\.yml) ]]; then
echo "build_wheels=true" >> "$GITHUB_OUTPUT"
else
echo "build_wheels=false" >> "$GITHUB_OUTPUT"
if [[ "${{ steps.changed_files.outputs.all }}" =~ (requirements.txt|build.yaml) ]]; then
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
build:
name: Build ${{ matrix.arch }} supervisor
needs: init
runs-on: ${{ matrix.runs-on }}
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
@@ -98,66 +93,34 @@ jobs:
strategy:
matrix:
arch: ${{ fromJson(needs.init.outputs.architectures) }}
include:
- runs-on: ubuntu-24.04
- runs-on: ubuntu-24.04-arm
arch: aarch64
env:
WHEELS_ABI: cp313
WHEELS_TAG: musllinux_1_2
WHEELS_APK_DEPS: "libffi-dev;openssl-dev;yaml-dev"
WHEELS_SKIP_BINARY: aiohttp
steps:
- name: Checkout the repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
fetch-depth: 0
- name: Write env-file for wheels build
if: needs.init.outputs.build_wheels == 'true'
- name: Write env-file
if: needs.init.outputs.requirements == 'true'
run: |
(
# Fix out of memory issues with rust
echo "CARGO_NET_GIT_FETCH_WITH_CLI=true"
) > .env_file
- name: Build and publish wheels
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'true'
uses: home-assistant/wheels@e5742a69d69f0e274e2689c998900c7d19652c21 # 2025.12.0
# home-assistant/wheels doesn't support sha pinning
- name: Build wheels
if: needs.init.outputs.requirements == 'true'
uses: home-assistant/wheels@2025.11.0
with:
abi: cp313
tag: musllinux_1_2
arch: ${{ matrix.arch }}
wheels-key: ${{ secrets.WHEELS_KEY }}
abi: ${{ env.WHEELS_ABI }}
tag: ${{ env.WHEELS_TAG }}
arch: ${{ matrix.arch }}
apk: ${{ env.WHEELS_APK_DEPS }}
skip-binary: ${{ env.WHEELS_SKIP_BINARY }}
apk: "libffi-dev;openssl-dev;yaml-dev"
skip-binary: aiohttp
env-file: true
requirements: "requirements.txt"
- name: Build local wheels
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
uses: home-assistant/wheels@e5742a69d69f0e274e2689c998900c7d19652c21 # 2025.12.0
with:
wheels-host: ""
wheels-user: ""
wheels-key: ""
local-wheels-repo-path: "wheels/"
abi: ${{ env.WHEELS_ABI }}
tag: ${{ env.WHEELS_TAG }}
arch: ${{ matrix.arch }}
apk: ${{ env.WHEELS_APK_DEPS }}
skip-binary: ${{ env.WHEELS_SKIP_BINARY }}
env-file: true
requirements: "requirements.txt"
- name: Upload local wheels artifact
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: wheels-${{ matrix.arch }}
path: wheels
retention-days: 1
- name: Set version
if: needs.init.outputs.publish == 'true'
uses: home-assistant/actions/helpers/version@master
@@ -204,7 +167,6 @@ jobs:
- name: Build supervisor
uses: home-assistant/builder@2025.11.0
with:
image: ${{ matrix.arch }}
args: |
$BUILD_ARGS \
--${{ matrix.arch }} \
@@ -219,7 +181,7 @@ jobs:
steps:
- name: Checkout the repository
if: needs.init.outputs.publish == 'true'
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Initialize git
if: needs.init.outputs.publish == 'true'
@@ -244,14 +206,7 @@ jobs:
timeout-minutes: 60
steps:
- name: Checkout the repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Download local wheels artifact
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
with:
name: wheels-amd64
path: wheels
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
# home-assistant/builder doesn't support sha pinning
- name: Build the Supervisor

View File

@@ -26,7 +26,7 @@ jobs:
name: Prepare Python dependencies
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Python
id: python
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
@@ -34,7 +34,7 @@ jobs:
python-version: ${{ env.DEFAULT_PYTHON }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: venv
key: |
@@ -48,7 +48,7 @@ jobs:
pip install -r requirements.txt -r requirements_tests.txt
- name: Restore pre-commit environment from cache
id: cache-precommit
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: ${{ env.PRE_COMMIT_CACHE }}
lookup-only: true
@@ -68,7 +68,7 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
id: python
@@ -76,7 +76,7 @@ jobs:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: venv
key: |
@@ -88,7 +88,7 @@ jobs:
exit 1
- name: Restore pre-commit environment from cache
id: cache-precommit
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: ${{ env.PRE_COMMIT_CACHE }}
key: |
@@ -111,7 +111,7 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
id: python
@@ -119,7 +119,7 @@ jobs:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: venv
key: |
@@ -131,7 +131,7 @@ jobs:
exit 1
- name: Restore pre-commit environment from cache
id: cache-precommit
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: ${{ env.PRE_COMMIT_CACHE }}
key: |
@@ -154,7 +154,7 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Register hadolint problem matcher
run: |
echo "::add-matcher::.github/workflows/matchers/hadolint.json"
@@ -169,7 +169,7 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
id: python
@@ -177,7 +177,7 @@ jobs:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: venv
key: |
@@ -189,7 +189,7 @@ jobs:
exit 1
- name: Restore pre-commit environment from cache
id: cache-precommit
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: ${{ env.PRE_COMMIT_CACHE }}
key: |
@@ -213,7 +213,7 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
id: python
@@ -221,7 +221,7 @@ jobs:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: venv
key: |
@@ -233,7 +233,7 @@ jobs:
exit 1
- name: Restore pre-commit environment from cache
id: cache-precommit
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: ${{ env.PRE_COMMIT_CACHE }}
key: |
@@ -257,7 +257,7 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
id: python
@@ -265,7 +265,7 @@ jobs:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: venv
key: |
@@ -293,7 +293,7 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
id: python
@@ -307,7 +307,7 @@ jobs:
echo "key=mypy-${{ env.MYPY_CACHE_VERSION }}-$mypy_version-$(date -u '+%Y-%m-%dT%H:%M:%s')" >> $GITHUB_OUTPUT
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: venv
key: >-
@@ -318,7 +318,7 @@ jobs:
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Restore mypy cache
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: .mypy_cache
key: >-
@@ -339,7 +339,7 @@ jobs:
name: Run tests Python ${{ needs.prepare.outputs.python-version }}
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
id: python
@@ -351,7 +351,7 @@ jobs:
cosign-release: "v2.5.3"
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: venv
key: |
@@ -386,7 +386,7 @@ jobs:
-o console_output_style=count \
tests
- name: Upload coverage artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: coverage
path: .coverage
@@ -398,7 +398,7 @@ jobs:
needs: ["pytest", "prepare"]
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
id: python
@@ -406,7 +406,7 @@ jobs:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: venv
key: |
@@ -417,7 +417,7 @@ jobs:
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Download all coverage artifacts
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
with:
name: coverage
path: coverage/
@@ -428,4 +428,4 @@ jobs:
coverage report
coverage xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1

View File

@@ -9,7 +9,7 @@ jobs:
lock:
runs-on: ubuntu-latest
steps:
- uses: dessant/lock-threads@7266a7ce5c1df01b1c6db85bf8cd86c737dadbe7 # v6.0.0
- uses: dessant/lock-threads@1bf7ec25051fe7c00bdd17e6a7cf3d7bfb7dc771 # v5.0.1
with:
github-token: ${{ github.token }}
issue-inactive-days: "30"

View File

@@ -11,7 +11,7 @@ jobs:
name: Release Drafter
steps:
- name: Checkout the repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
fetch-depth: 0

View File

@@ -10,9 +10,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Sentry Release
uses: getsentry/action-release@dab6548b3c03c4717878099e43782cf5be654289 # v3.5.0
uses: getsentry/action-release@128c5058bbbe93c8e02147fe0a9c713f166259a6 # v3.4.0
env:
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}

View File

@@ -9,7 +9,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-stale: 30

View File

@@ -14,7 +14,7 @@ jobs:
latest_version: ${{ steps.latest_frontend_version.outputs.latest_tag }}
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Get latest frontend release
id: latest_frontend_version
uses: abatilo/release-info-action@32cb932219f1cee3fc4f4a298fd65ead5d35b661 # v1.3.3
@@ -49,7 +49,7 @@ jobs:
if: needs.check-version.outputs.skip != 'true'
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Clear www folder
run: |
rm -rf supervisor/api/panel/*
@@ -68,7 +68,7 @@ jobs:
run: |
rm -f supervisor/api/panel/home_assistant_frontend_supervisor-*.tar.gz
- name: Create PR
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
with:
commit-message: "Update frontend to version ${{ needs.check-version.outputs.latest_version }}"
branch: autoupdate-frontend

5
.gitignore vendored
View File

@@ -24,9 +24,6 @@ var/
.installed.cfg
*.egg
# Local wheels
wheels/**/*.whl
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
@@ -105,4 +102,4 @@ ENV/
/.dmypy.json
# Mac
.DS_Store
.DS_Store

View File

@@ -7,6 +7,9 @@ ENV \
CRYPTOGRAPHY_OPENSSL_NO_LEGACY=1 \
UV_SYSTEM_PYTHON=true
ARG \
COSIGN_VERSION
# Install base
WORKDIR /usr/src
RUN \
@@ -22,22 +25,14 @@ RUN \
openssl \
yaml \
\
&& pip3 install uv==0.9.18
&& curl -Lso /usr/bin/cosign "https://github.com/home-assistant/cosign/releases/download/${COSIGN_VERSION}/cosign_${BUILD_ARCH}" \
&& chmod a+x /usr/bin/cosign \
&& pip3 install uv==0.8.9
# Install requirements
RUN \
--mount=type=bind,source=./requirements.txt,target=/usr/src/requirements.txt \
--mount=type=bind,source=./wheels,target=/usr/src/wheels \
if ls /usr/src/wheels/musllinux/* >/dev/null 2>&1; then \
LOCAL_WHEELS=/usr/src/wheels/musllinux; \
echo "Using local wheels from: $LOCAL_WHEELS"; \
else \
LOCAL_WHEELS=; \
echo "No local wheels found"; \
fi && \
uv pip install --compile-bytecode --no-cache --no-build \
-r requirements.txt \
${LOCAL_WHEELS:+--find-links $LOCAL_WHEELS}
uv pip install --compile-bytecode --no-cache --no-build -r requirements.txt
# Install Home Assistant Supervisor
COPY . supervisor

View File

@@ -1,10 +1,12 @@
image: ghcr.io/home-assistant/{arch}-hassio-supervisor
build_from:
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.13-alpine3.22-2025.12.2
amd64: ghcr.io/home-assistant/amd64-base-python:3.13-alpine3.22-2025.12.2
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.13-alpine3.22-2025.11.1
amd64: ghcr.io/home-assistant/amd64-base-python:3.13-alpine3.22-2025.11.1
cosign:
base_identity: https://github.com/home-assistant/docker-base/.*
identity: https://github.com/home-assistant/supervisor/.*
args:
COSIGN_VERSION: 2.5.3
labels:
io.hass.type: supervisor
org.opencontainers.image.title: Home Assistant Supervisor

View File

@@ -1,32 +1,32 @@
aiodns==3.6.1
aiodns==3.5.0
aiodocker==0.24.0
aiohttp==3.13.3
aiohttp==3.13.2
atomicwrites-homeassistant==1.4.1
attrs==25.4.0
awesomeversion==25.8.0
backports.zstd==1.3.0
blockbuster==1.5.26
backports.zstd==1.1.0
blockbuster==1.5.25
brotli==1.2.0
ciso8601==2.3.3
colorlog==6.10.1
cpe==1.3.1
cryptography==46.0.3
debugpy==1.8.19
debugpy==1.8.17
deepmerge==2.0
dirhash==0.5.0
docker==7.1.0
faust-cchardet==2.1.19
gitpython==3.1.46
gitpython==3.1.45
jinja2==3.1.6
log-rate-limit==1.4.2
orjson==3.11.5
orjson==3.11.4
pulsectl==24.12.0
pyudev==0.24.4
PyYAML==6.0.3
requests==2.32.5
securetar==2025.12.0
sentry-sdk==2.48.0
securetar==2025.2.1
sentry-sdk==2.46.0
setuptools==80.9.0
voluptuous==0.16.0
voluptuous==0.15.2
dbus-fast==3.1.2
zlib-fast==0.2.1

View File

@@ -1,16 +1,16 @@
astroid==4.0.3
coverage==7.13.1
mypy==1.19.1
pre-commit==4.5.1
astroid==4.0.2
coverage==7.12.0
mypy==1.19.0
pre-commit==4.5.0
pylint==4.0.4
pytest-aiohttp==1.1.0
pytest-asyncio==1.3.0
pytest-cov==7.0.0
pytest-timeout==2.4.0
pytest==9.0.2
ruff==0.14.10
time-machine==3.2.0
types-docker==7.1.0.20251202
pytest==9.0.1
ruff==0.14.7
time-machine==3.1.0
types-docker==7.1.0.20251129
types-pyyaml==6.0.12.20250915
types-requests==2.32.4.20250913
urllib3==2.6.2
urllib3==2.5.0

View File

@@ -24,6 +24,8 @@ from securetar import AddFileError, atomic_contents_add, secure_path
import voluptuous as vol
from voluptuous.humanize import humanize_error
from supervisor.utils.dt import utc_from_timestamp
from ..bus import EventListener
from ..const import (
ATTR_ACCESS_TOKEN,
@@ -64,22 +66,13 @@ from ..docker.const import ContainerState
from ..docker.monitor import DockerContainerStateEvent
from ..docker.stats import DockerStats
from ..exceptions import (
AddonBackupMetadataInvalidError,
AddonBuildFailedUnknownError,
AddonConfigurationInvalidError,
AddonNotRunningError,
AddonConfigurationError,
AddonNotSupportedError,
AddonNotSupportedWriteStdinError,
AddonPrePostBackupCommandReturnedError,
AddonsError,
AddonsJobError,
AddonUnknownError,
BackupRestoreUnknownError,
ConfigurationFileError,
DockerBuildError,
DockerError,
HostAppArmorError,
StoreAddonNotFoundError,
)
from ..hardware.data import Device
from ..homeassistant.const import WSEvent
@@ -90,7 +83,6 @@ from ..resolution.data import Issue
from ..store.addon import AddonStore
from ..utils import check_port
from ..utils.apparmor import adjust_profile
from ..utils.dt import utc_from_timestamp
from ..utils.json import read_json_file, write_json_file
from ..utils.sentry import async_capture_exception
from .const import (
@@ -243,7 +235,7 @@ class Addon(AddonModel):
await self.instance.check_image(self.version, default_image, self.arch)
except DockerError:
_LOGGER.info("No %s addon Docker image %s found", self.slug, self.image)
with suppress(DockerError, AddonNotSupportedError):
with suppress(DockerError):
await self.instance.install(self.version, default_image, arch=self.arch)
self.persist[ATTR_IMAGE] = default_image
@@ -726,16 +718,18 @@ class Addon(AddonModel):
options = self.schema.validate(self.options)
await self.sys_run_in_executor(write_json_file, self.path_options, options)
except vol.Invalid as ex:
raise AddonConfigurationInvalidError(
_LOGGER.error,
addon=self.slug,
validation_error=humanize_error(self.options, ex),
) from None
except ConfigurationFileError as err:
_LOGGER.error(
"Add-on %s has invalid options: %s",
self.slug,
humanize_error(self.options, ex),
)
except ConfigurationFileError:
_LOGGER.error("Add-on %s can't write options", self.slug)
raise AddonUnknownError(addon=self.slug) from err
else:
_LOGGER.debug("Add-on %s write options: %s", self.slug, options)
return
_LOGGER.debug("Add-on %s write options: %s", self.slug, options)
raise AddonConfigurationError()
@Job(
name="addon_unload",
@@ -778,7 +772,7 @@ class Addon(AddonModel):
async def install(self) -> None:
"""Install and setup this addon."""
if not self.addon_store:
raise StoreAddonNotFoundError(addon=self.slug)
raise AddonsError("Missing from store, cannot install!")
await self.sys_addons.data.install(self.addon_store)
@@ -799,17 +793,9 @@ class Addon(AddonModel):
await self.instance.install(
self.latest_version, self.addon_store.image, arch=self.arch
)
except AddonsError:
await self.sys_addons.data.uninstall(self)
raise
except DockerBuildError as err:
_LOGGER.error("Could not build image for addon %s: %s", self.slug, err)
await self.sys_addons.data.uninstall(self)
raise AddonBuildFailedUnknownError(addon=self.slug) from err
except DockerError as err:
_LOGGER.error("Could not pull image to update addon %s: %s", self.slug, err)
await self.sys_addons.data.uninstall(self)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
# Finish initialization and set up listeners
await self.load()
@@ -833,8 +819,7 @@ class Addon(AddonModel):
try:
await self.instance.remove(remove_image=remove_image)
except DockerError as err:
_LOGGER.error("Could not remove image for addon %s: %s", self.slug, err)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
self.state = AddonState.UNKNOWN
@@ -899,7 +884,7 @@ class Addon(AddonModel):
if it was running. Else nothing is returned.
"""
if not self.addon_store:
raise StoreAddonNotFoundError(addon=self.slug)
raise AddonsError("Missing from store, cannot update!")
old_image = self.image
# Cache data to prevent races with other updates to global
@@ -907,12 +892,8 @@ class Addon(AddonModel):
try:
await self.instance.update(store.version, store.image, arch=self.arch)
except DockerBuildError as err:
_LOGGER.error("Could not build image for addon %s: %s", self.slug, err)
raise AddonBuildFailedUnknownError(addon=self.slug) from err
except DockerError as err:
_LOGGER.error("Could not pull image to update addon %s: %s", self.slug, err)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
# Stop the addon if running
if (last_state := self.state) in {AddonState.STARTED, AddonState.STARTUP}:
@@ -954,23 +935,12 @@ class Addon(AddonModel):
"""
last_state: AddonState = self.state
try:
# remove docker container and image but not addon config
# remove docker container but not addon config
try:
await self.instance.remove()
except DockerError as err:
_LOGGER.error("Could not remove image for addon %s: %s", self.slug, err)
raise AddonUnknownError(addon=self.slug) from err
try:
await self.instance.install(self.version)
except DockerBuildError as err:
_LOGGER.error("Could not build image for addon %s: %s", self.slug, err)
raise AddonBuildFailedUnknownError(addon=self.slug) from err
except DockerError as err:
_LOGGER.error(
"Could not pull image to update addon %s: %s", self.slug, err
)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
if self.addon_store:
await self.sys_addons.data.update(self.addon_store)
@@ -1141,9 +1111,8 @@ class Addon(AddonModel):
try:
await self.instance.run()
except DockerError as err:
_LOGGER.error("Could not start container for addon %s: %s", self.slug, err)
self.state = AddonState.ERROR
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
return self.sys_create_task(self._wait_for_startup())
@@ -1158,9 +1127,8 @@ class Addon(AddonModel):
try:
await self.instance.stop()
except DockerError as err:
_LOGGER.error("Could not stop container for addon %s: %s", self.slug, err)
self.state = AddonState.ERROR
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
@Job(
name="addon_restart",
@@ -1193,15 +1161,9 @@ class Addon(AddonModel):
async def stats(self) -> DockerStats:
"""Return stats of container."""
try:
if not await self.is_running():
raise AddonNotRunningError(_LOGGER.warning, addon=self.slug)
return await self.instance.stats()
except DockerError as err:
_LOGGER.error(
"Could not get stats of container for addon %s: %s", self.slug, err
)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
@Job(
name="addon_write_stdin",
@@ -1211,18 +1173,14 @@ class Addon(AddonModel):
async def write_stdin(self, data) -> None:
"""Write data to add-on stdin."""
if not self.with_stdin:
raise AddonNotSupportedWriteStdinError(_LOGGER.error, addon=self.slug)
raise AddonNotSupportedError(
f"Add-on {self.slug} does not support writing to stdin!", _LOGGER.error
)
try:
if not await self.is_running():
raise AddonNotRunningError(_LOGGER.warning, addon=self.slug)
await self.instance.write_stdin(data)
return await self.instance.write_stdin(data)
except DockerError as err:
_LOGGER.error(
"Could not write stdin to container for addon %s: %s", self.slug, err
)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
async def _backup_command(self, command: str) -> None:
try:
@@ -1231,14 +1189,15 @@ class Addon(AddonModel):
_LOGGER.debug(
"Pre-/Post backup command failed with: %s", command_return.output
)
raise AddonPrePostBackupCommandReturnedError(
_LOGGER.error, addon=self.slug, exit_code=command_return.exit_code
raise AddonsError(
f"Pre-/Post backup command returned error code: {command_return.exit_code}",
_LOGGER.error,
)
except DockerError as err:
_LOGGER.error(
"Failed running pre-/post backup command %s: %s", command, err
)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError(
f"Failed running pre-/post backup command {command}: {str(err)}",
_LOGGER.error,
) from err
@Job(
name="addon_begin_backup",
@@ -1327,14 +1286,15 @@ class Addon(AddonModel):
try:
self.instance.export_image(temp_path.joinpath("image.tar"))
except DockerError as err:
raise BackupRestoreUnknownError() from err
raise AddonsError() from err
# Store local configs/state
try:
write_json_file(temp_path.joinpath("addon.json"), metadata)
except ConfigurationFileError as err:
_LOGGER.error("Can't save meta for %s: %s", self.slug, err)
raise BackupRestoreUnknownError() from err
raise AddonsError(
f"Can't save meta for {self.slug}", _LOGGER.error
) from err
# Store AppArmor Profile
if apparmor_profile:
@@ -1344,7 +1304,9 @@ class Addon(AddonModel):
apparmor_profile, profile_backup_file
)
except HostAppArmorError as err:
raise BackupRestoreUnknownError() from err
raise AddonsError(
"Can't backup AppArmor profile", _LOGGER.error
) from err
# Write tarfile
with tar_file as backup:
@@ -1398,8 +1360,7 @@ class Addon(AddonModel):
)
_LOGGER.info("Finish backup for addon %s", self.slug)
except (tarfile.TarError, OSError, AddFileError) as err:
_LOGGER.error("Can't write backup tarfile for addon %s: %s", self.slug, err)
raise BackupRestoreUnknownError() from err
raise AddonsError(f"Can't write tarfile: {err}", _LOGGER.error) from err
finally:
if was_running:
wait_for_start = await self.end_backup()
@@ -1441,24 +1402,28 @@ class Addon(AddonModel):
try:
tmp, data = await self.sys_run_in_executor(_extract_tarfile)
except tarfile.TarError as err:
_LOGGER.error("Can't extract backup tarfile for %s: %s", self.slug, err)
raise BackupRestoreUnknownError() from err
raise AddonsError(
f"Can't read tarfile {tar_file}: {err}", _LOGGER.error
) from err
except ConfigurationFileError as err:
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
try:
# Validate
try:
data = SCHEMA_ADDON_BACKUP(data)
except vol.Invalid as err:
raise AddonBackupMetadataInvalidError(
raise AddonsError(
f"Can't validate {self.slug}, backup data: {humanize_error(data, err)}",
_LOGGER.error,
addon=self.slug,
validation_error=humanize_error(data, err),
) from err
# Validate availability. Raises if not
self._validate_availability(data[ATTR_SYSTEM], logger=_LOGGER.error)
# If available
if not self._available(data[ATTR_SYSTEM]):
raise AddonNotSupportedError(
f"Add-on {self.slug} is not available for this platform",
_LOGGER.error,
)
# Restore local add-on information
_LOGGER.info("Restore config for addon %s", self.slug)
@@ -1517,10 +1482,9 @@ class Addon(AddonModel):
try:
await self.sys_run_in_executor(_restore_data)
except shutil.Error as err:
_LOGGER.error(
"Can't restore origin data for %s: %s", self.slug, err
)
raise BackupRestoreUnknownError() from err
raise AddonsError(
f"Can't restore origin data: {err}", _LOGGER.error
) from err
# Restore AppArmor
profile_file = Path(tmp.name, "apparmor.txt")
@@ -1531,11 +1495,10 @@ class Addon(AddonModel):
)
except HostAppArmorError as err:
_LOGGER.error(
"Can't restore AppArmor profile for add-on %s: %s",
"Can't restore AppArmor profile for add-on %s",
self.slug,
err,
)
raise BackupRestoreUnknownError() from err
raise AddonsError() from err
finally:
# Is add-on loaded

View File

@@ -5,7 +5,6 @@ from __future__ import annotations
import base64
from functools import cached_property
import json
import logging
from pathlib import Path
from typing import TYPE_CHECKING, Any
@@ -24,22 +23,15 @@ from ..const import (
CpuArch,
)
from ..coresys import CoreSys, CoreSysAttributes
from ..docker.const import DOCKER_HUB, DOCKER_HUB_LEGACY
from ..docker.const import DOCKER_HUB
from ..docker.interface import MAP_ARCH
from ..exceptions import (
AddonBuildArchitectureNotSupportedError,
AddonBuildDockerfileMissingError,
ConfigurationFileError,
HassioArchNotFound,
)
from ..exceptions import ConfigurationFileError, HassioArchNotFound
from ..utils.common import FileConfiguration, find_one_filetype
from .validate import SCHEMA_BUILD_CONFIG
if TYPE_CHECKING:
from .manager import AnyAddon
_LOGGER: logging.Logger = logging.getLogger(__name__)
class AddonBuild(FileConfiguration, CoreSysAttributes):
"""Handle build options for add-ons."""
@@ -120,7 +112,7 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
return self.addon.path_location.joinpath(f"Dockerfile.{self.arch}")
return self.addon.path_location.joinpath("Dockerfile")
async def is_valid(self) -> None:
async def is_valid(self) -> bool:
"""Return true if the build env is valid."""
def build_is_valid() -> bool:
@@ -132,17 +124,9 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
)
try:
if not await self.sys_run_in_executor(build_is_valid):
raise AddonBuildDockerfileMissingError(
_LOGGER.error, addon=self.addon.slug
)
return await self.sys_run_in_executor(build_is_valid)
except HassioArchNotFound:
raise AddonBuildArchitectureNotSupportedError(
_LOGGER.error,
addon=self.addon.slug,
addon_arch_list=self.addon.supported_arch,
system_arch_list=[arch.value for arch in self.sys_arch.supported],
) from None
return False
def get_docker_config_json(self) -> str | None:
"""Generate Docker config.json content with registry credentials for base image.
@@ -171,11 +155,8 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
# Use the actual registry URL for the key
# Docker Hub uses "https://index.docker.io/v1/" as the key
# Support both docker.io (official) and hub.docker.com (legacy)
registry_key = (
"https://index.docker.io/v1/"
if registry in (DOCKER_HUB, DOCKER_HUB_LEGACY)
else registry
"https://index.docker.io/v1/" if registry == DOCKER_HUB else registry
)
config = {"auths": {registry_key: {"auth": auth_string}}}

View File

@@ -11,6 +11,8 @@ from typing import Any
from awesomeversion import AwesomeVersion, AwesomeVersionException
from supervisor.utils.dt import utc_from_timestamp
from ..const import (
ATTR_ADVANCED,
ATTR_APPARMOR,
@@ -98,7 +100,6 @@ from ..exceptions import (
from ..jobs.const import JOB_GROUP_ADDON
from ..jobs.job_group import JobGroup
from ..utils import version_is_new_enough
from ..utils.dt import utc_from_timestamp
from .configuration import FolderMapping
from .const import (
ATTR_BACKUP,
@@ -315,12 +316,12 @@ class AddonModel(JobGroup, ABC):
@property
def panel_title(self) -> str:
"""Return panel title for Ingress frame."""
"""Return panel icon for Ingress frame."""
return self.data.get(ATTR_PANEL_TITLE, self.name)
@property
def panel_admin(self) -> bool:
"""Return if panel is only available for admin users."""
def panel_admin(self) -> str:
"""Return panel icon for Ingress frame."""
return self.data[ATTR_PANEL_ADMIN]
@property
@@ -488,7 +489,7 @@ class AddonModel(JobGroup, ABC):
return self.data[ATTR_DEVICETREE]
@property
def with_tmpfs(self) -> bool:
def with_tmpfs(self) -> str | None:
"""Return if tmp is in memory of add-on."""
return self.data[ATTR_TMPFS]
@@ -508,7 +509,7 @@ class AddonModel(JobGroup, ABC):
return self.data[ATTR_VIDEO]
@property
def homeassistant_version(self) -> AwesomeVersion | None:
def homeassistant_version(self) -> str | None:
"""Return min Home Assistant version they needed by Add-on."""
return self.data.get(ATTR_HOMEASSISTANT)

View File

@@ -75,7 +75,7 @@ class AddonOptions(CoreSysAttributes):
"""Create a schema for add-on options."""
return vol.Schema(vol.All(dict, self))
def __call__(self, struct: dict[str, Any]) -> dict[str, Any]:
def __call__(self, struct):
"""Create schema validator for add-ons options."""
options = {}
@@ -193,7 +193,9 @@ class AddonOptions(CoreSysAttributes):
f"Fatal error for option '{key}' with type '{typ}' in {self._name} ({self._slug})"
) from None
def _nested_validate_list(self, typ: Any, data_list: Any, key: str) -> list[Any]:
def _nested_validate_list(
self, typ: Any, data_list: list[Any], key: str
) -> list[Any]:
"""Validate nested items."""
options = []
@@ -211,7 +213,7 @@ class AddonOptions(CoreSysAttributes):
return options
def _nested_validate_dict(
self, typ: dict[Any, Any], data_dict: Any, key: str
self, typ: dict[Any, Any], data_dict: dict[Any, Any], key: str
) -> dict[Any, Any]:
"""Validate nested items."""
options = {}
@@ -262,7 +264,7 @@ class UiOptions(CoreSysAttributes):
def __init__(self, coresys: CoreSys) -> None:
"""Initialize UI option render."""
self.coresys: CoreSys = coresys
self.coresys = coresys
def __call__(self, raw_schema: dict[str, Any]) -> list[dict[str, Any]]:
"""Generate UI schema."""
@@ -277,10 +279,10 @@ class UiOptions(CoreSysAttributes):
def _ui_schema_element(
self,
ui_schema: list[dict[str, Any]],
value: str | list[Any] | dict[str, Any],
value: str,
key: str,
multiple: bool = False,
) -> None:
):
if isinstance(value, list):
# nested value list
assert not multiple

View File

@@ -782,10 +782,6 @@ class RestAPI(CoreSysAttributes):
web.delete(
"/store/repositories/{repository}", api_store.remove_repository
),
web.post(
"/store/repositories/{repository}/repair",
api_store.repositories_repository_repair,
),
]
)

View File

@@ -100,9 +100,6 @@ from ..const import (
from ..coresys import CoreSysAttributes
from ..docker.stats import DockerStats
from ..exceptions import (
AddonBootConfigCannotChangeError,
AddonConfigurationInvalidError,
AddonNotSupportedWriteStdinError,
APIAddonNotInstalled,
APIError,
APIForbidden,
@@ -128,7 +125,6 @@ SCHEMA_OPTIONS = vol.Schema(
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
vol.Optional(ATTR_INGRESS_PANEL): vol.Boolean(),
vol.Optional(ATTR_WATCHDOG): vol.Boolean(),
vol.Optional(ATTR_OPTIONS): vol.Maybe(dict),
}
)
@@ -304,24 +300,19 @@ class APIAddons(CoreSysAttributes):
# Update secrets for validation
await self.sys_homeassistant.secrets.reload()
# Extend schema with add-on specific validation
addon_schema = SCHEMA_OPTIONS.extend(
{vol.Optional(ATTR_OPTIONS): vol.Maybe(addon.schema)}
)
# Validate/Process Body
body = await api_validate(SCHEMA_OPTIONS, request)
body = await api_validate(addon_schema, request)
if ATTR_OPTIONS in body:
# None resets options to defaults, otherwise validate the options
if body[ATTR_OPTIONS] is None:
addon.options = None
else:
try:
addon.options = addon.schema(body[ATTR_OPTIONS])
except vol.Invalid as ex:
raise AddonConfigurationInvalidError(
addon=addon.slug,
validation_error=humanize_error(body[ATTR_OPTIONS], ex),
) from None
addon.options = body[ATTR_OPTIONS]
if ATTR_BOOT in body:
if addon.boot_config == AddonBootConfig.MANUAL_ONLY:
raise AddonBootConfigCannotChangeError(
addon=addon.slug, boot_config=addon.boot_config.value
raise APIError(
f"Addon {addon.slug} boot option is set to {addon.boot_config} so it cannot be changed"
)
addon.boot = body[ATTR_BOOT]
if ATTR_AUTO_UPDATE in body:
@@ -394,7 +385,7 @@ class APIAddons(CoreSysAttributes):
return data
@api_process
async def options_config(self, request: web.Request) -> dict[str, Any]:
async def options_config(self, request: web.Request) -> None:
"""Validate user options for add-on."""
slug: str = request.match_info["addon"]
if slug != "self":
@@ -439,11 +430,11 @@ class APIAddons(CoreSysAttributes):
}
@api_process
async def uninstall(self, request: web.Request) -> None:
async def uninstall(self, request: web.Request) -> Awaitable[None]:
"""Uninstall add-on."""
addon = self.get_addon_for_request(request)
body: dict[str, Any] = await api_validate(SCHEMA_UNINSTALL, request)
await asyncio.shield(
return await asyncio.shield(
self.sys_addons.uninstall(
addon.slug, remove_config=body[ATTR_REMOVE_CONFIG]
)
@@ -485,7 +476,7 @@ class APIAddons(CoreSysAttributes):
"""Write to stdin of add-on."""
addon = self.get_addon_for_request(request)
if not addon.with_stdin:
raise AddonNotSupportedWriteStdinError(_LOGGER.error, addon=addon.slug)
raise APIError(f"STDIN not supported the {addon.slug} add-on")
data = await request.read()
await asyncio.shield(addon.write_stdin(data))

View File

@@ -15,7 +15,7 @@ import voluptuous as vol
from ..addons.addon import Addon
from ..const import ATTR_NAME, ATTR_PASSWORD, ATTR_USERNAME, REQUEST_FROM
from ..coresys import CoreSysAttributes
from ..exceptions import APIForbidden, AuthInvalidNonStringValueError
from ..exceptions import APIForbidden
from .const import (
ATTR_GROUP_IDS,
ATTR_IS_ACTIVE,
@@ -69,9 +69,7 @@ class APIAuth(CoreSysAttributes):
try:
_ = username.encode and password.encode # type: ignore
except AttributeError:
raise AuthInvalidNonStringValueError(
_LOGGER.error, headers=REALM_HEADER
) from None
raise HTTPUnauthorized(headers=REALM_HEADER) from None
return self.sys_auth.check_login(
addon, cast(str, username), cast(str, password)

View File

@@ -211,7 +211,7 @@ class APIBackups(CoreSysAttributes):
await self.sys_backups.save_data()
@api_process
async def reload(self, _: web.Request) -> bool:
async def reload(self, _):
"""Reload backup list."""
await asyncio.shield(self.sys_backups.reload())
return True
@@ -421,7 +421,7 @@ class APIBackups(CoreSysAttributes):
await self.sys_backups.remove(backup, locations=locations)
@api_process
async def download(self, request: web.Request) -> web.StreamResponse:
async def download(self, request: web.Request):
"""Download a backup file."""
backup = self._extract_slug(request)
# Query will give us '' for /backups, convert value to None
@@ -451,7 +451,7 @@ class APIBackups(CoreSysAttributes):
return response
@api_process
async def upload(self, request: web.Request) -> dict[str, str] | bool:
async def upload(self, request: web.Request):
"""Upload a backup file."""
location: LOCATION_TYPE = None
locations: list[LOCATION_TYPE] | None = None

View File

@@ -7,6 +7,8 @@ from aiohttp import web
from awesomeversion import AwesomeVersion
import voluptuous as vol
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
from ..const import (
ATTR_ENABLE_IPV6,
ATTR_HOSTNAME,
@@ -21,7 +23,6 @@ from ..const import (
)
from ..coresys import CoreSysAttributes
from ..exceptions import APINotFound
from ..resolution.const import ContextType, IssueType, SuggestionType
from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
@@ -45,7 +46,7 @@ SCHEMA_OPTIONS = vol.Schema(
SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER = vol.Schema(
{
vol.Required(ATTR_STORAGE_DRIVER): vol.In(["overlayfs"]),
vol.Required(ATTR_STORAGE_DRIVER): vol.In(["overlayfs", "overlay2"]),
}
)
@@ -54,7 +55,7 @@ class APIDocker(CoreSysAttributes):
"""Handle RESTful API for Docker configuration."""
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
async def info(self, request: web.Request):
"""Get docker info."""
data_registries = {}
for hostname, registry in self.sys_docker.config.registries.items():
@@ -112,7 +113,7 @@ class APIDocker(CoreSysAttributes):
return {ATTR_REGISTRIES: data_registries}
@api_process
async def create_registry(self, request: web.Request) -> None:
async def create_registry(self, request: web.Request):
"""Create a new docker registry."""
body = await api_validate(SCHEMA_DOCKER_REGISTRY, request)
@@ -122,7 +123,7 @@ class APIDocker(CoreSysAttributes):
await self.sys_docker.config.save_data()
@api_process
async def remove_registry(self, request: web.Request) -> None:
async def remove_registry(self, request: web.Request):
"""Delete a docker registry."""
hostname = request.match_info.get(ATTR_HOSTNAME)
if hostname not in self.sys_docker.config.registries:

View File

@@ -18,7 +18,6 @@ from ..const import (
ATTR_BLK_WRITE,
ATTR_BOOT,
ATTR_CPU_PERCENT,
ATTR_DUPLICATE_LOG_FILE,
ATTR_IMAGE,
ATTR_IP_ADDRESS,
ATTR_JOB_ID,
@@ -56,7 +55,6 @@ SCHEMA_OPTIONS = vol.Schema(
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(str),
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE): vol.Boolean(),
vol.Optional(ATTR_DUPLICATE_LOG_FILE): vol.Boolean(),
}
)
@@ -114,7 +112,6 @@ class APIHomeAssistant(CoreSysAttributes):
ATTR_AUDIO_INPUT: self.sys_homeassistant.audio_input,
ATTR_AUDIO_OUTPUT: self.sys_homeassistant.audio_output,
ATTR_BACKUPS_EXCLUDE_DATABASE: self.sys_homeassistant.backups_exclude_database,
ATTR_DUPLICATE_LOG_FILE: self.sys_homeassistant.duplicate_log_file,
}
@api_process
@@ -154,13 +151,10 @@ class APIHomeAssistant(CoreSysAttributes):
ATTR_BACKUPS_EXCLUDE_DATABASE
]
if ATTR_DUPLICATE_LOG_FILE in body:
self.sys_homeassistant.duplicate_log_file = body[ATTR_DUPLICATE_LOG_FILE]
await self.sys_homeassistant.save_data()
@api_process
async def stats(self, request: web.Request) -> dict[str, Any]:
async def stats(self, request: web.Request) -> dict[Any, str]:
"""Return resource information."""
stats = await self.sys_homeassistant.core.stats()
if not stats:
@@ -197,7 +191,7 @@ class APIHomeAssistant(CoreSysAttributes):
return await update_task
@api_process
async def stop(self, request: web.Request) -> None:
async def stop(self, request: web.Request) -> Awaitable[None]:
"""Stop Home Assistant."""
body = await api_validate(SCHEMA_STOP, request)
await self._check_offline_migration(force=body[ATTR_FORCE])

View File

@@ -1,7 +1,6 @@
"""Init file for Supervisor host RESTful API."""
import asyncio
from collections.abc import Awaitable
from contextlib import suppress
import json
import logging
@@ -100,7 +99,7 @@ class APIHost(CoreSysAttributes):
)
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
async def info(self, request):
"""Return host information."""
return {
ATTR_AGENT_VERSION: self.sys_dbus.agent.version,
@@ -129,7 +128,7 @@ class APIHost(CoreSysAttributes):
}
@api_process
async def options(self, request: web.Request) -> None:
async def options(self, request):
"""Edit host settings."""
body = await api_validate(SCHEMA_OPTIONS, request)
@@ -140,7 +139,7 @@ class APIHost(CoreSysAttributes):
)
@api_process
async def reboot(self, request: web.Request) -> None:
async def reboot(self, request):
"""Reboot host."""
body = await api_validate(SCHEMA_SHUTDOWN, request)
await self._check_ha_offline_migration(force=body[ATTR_FORCE])
@@ -148,7 +147,7 @@ class APIHost(CoreSysAttributes):
return await asyncio.shield(self.sys_host.control.reboot())
@api_process
async def shutdown(self, request: web.Request) -> None:
async def shutdown(self, request):
"""Poweroff host."""
body = await api_validate(SCHEMA_SHUTDOWN, request)
await self._check_ha_offline_migration(force=body[ATTR_FORCE])
@@ -156,12 +155,12 @@ class APIHost(CoreSysAttributes):
return await asyncio.shield(self.sys_host.control.shutdown())
@api_process
def reload(self, request: web.Request) -> Awaitable[None]:
def reload(self, request):
"""Reload host data."""
return asyncio.shield(self.sys_host.reload())
@api_process
async def services(self, request: web.Request) -> dict[str, Any]:
async def services(self, request):
"""Return list of available services."""
services = []
for unit in self.sys_host.services:
@@ -176,7 +175,7 @@ class APIHost(CoreSysAttributes):
return {ATTR_SERVICES: services}
@api_process
async def list_boots(self, _: web.Request) -> dict[str, Any]:
async def list_boots(self, _: web.Request):
"""Return a list of boot IDs."""
boot_ids = await self.sys_host.logs.get_boot_ids()
return {
@@ -187,7 +186,7 @@ class APIHost(CoreSysAttributes):
}
@api_process
async def list_identifiers(self, _: web.Request) -> dict[str, list[str]]:
async def list_identifiers(self, _: web.Request):
"""Return a list of syslog identifiers."""
return {ATTR_IDENTIFIERS: await self.sys_host.logs.get_identifiers()}
@@ -333,7 +332,7 @@ class APIHost(CoreSysAttributes):
)
@api_process
async def disk_usage(self, request: web.Request) -> dict[str, Any]:
async def disk_usage(self, request: web.Request) -> dict:
"""Return a breakdown of storage usage for the system."""
max_depth = request.query.get(ATTR_MAX_DEPTH, 1)

View File

@@ -1,15 +1,17 @@
"""Handle security part of this API."""
from collections.abc import Awaitable, Callable
from collections.abc import Callable
import logging
import re
from typing import Final
from urllib.parse import unquote
from aiohttp.web import Request, StreamResponse, middleware
from aiohttp.web import Request, Response, middleware
from aiohttp.web_exceptions import HTTPBadRequest, HTTPForbidden, HTTPUnauthorized
from awesomeversion import AwesomeVersion
from supervisor.homeassistant.const import LANDINGPAGE
from ...addons.const import RE_SLUG
from ...const import (
REQUEST_FROM,
@@ -21,7 +23,6 @@ from ...const import (
VALID_API_STATES,
)
from ...coresys import CoreSys, CoreSysAttributes
from ...homeassistant.const import LANDINGPAGE
from ...utils import version_is_new_enough
from ..utils import api_return_error, extract_supervisor_token
@@ -88,7 +89,7 @@ CORE_ONLY_PATHS: Final = re.compile(
)
# Policy role add-on API access
ADDONS_ROLE_ACCESS: dict[str, re.Pattern[str]] = {
ADDONS_ROLE_ACCESS: dict[str, re.Pattern] = {
ROLE_DEFAULT: re.compile(
r"^(?:"
r"|/.+/info"
@@ -179,9 +180,7 @@ class SecurityMiddleware(CoreSysAttributes):
return unquoted
@middleware
async def block_bad_requests(
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
) -> StreamResponse:
async def block_bad_requests(self, request: Request, handler: Callable) -> Response:
"""Process request and tblock commonly known exploit attempts."""
if FILTERS.search(self._recursive_unquote(request.path)):
_LOGGER.warning(
@@ -199,9 +198,7 @@ class SecurityMiddleware(CoreSysAttributes):
return await handler(request)
@middleware
async def system_validation(
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
) -> StreamResponse:
async def system_validation(self, request: Request, handler: Callable) -> Response:
"""Check if core is ready to response."""
if self.sys_core.state not in VALID_API_STATES:
return api_return_error(
@@ -211,9 +208,7 @@ class SecurityMiddleware(CoreSysAttributes):
return await handler(request)
@middleware
async def token_validation(
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
) -> StreamResponse:
async def token_validation(self, request: Request, handler: Callable) -> Response:
"""Check security access of this layer."""
request_from: CoreSysAttributes | None = None
supervisor_token = extract_supervisor_token(request)
@@ -284,9 +279,7 @@ class SecurityMiddleware(CoreSysAttributes):
raise HTTPForbidden()
@middleware
async def core_proxy(
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
) -> StreamResponse:
async def core_proxy(self, request: Request, handler: Callable) -> Response:
"""Validate user from Core API proxy."""
if (
request[REQUEST_FROM] != self.sys_homeassistant

View File

@@ -13,10 +13,11 @@ from aiohttp.hdrs import AUTHORIZATION, CONTENT_TYPE
from aiohttp.http_websocket import WSMsgType
from aiohttp.web_exceptions import HTTPBadGateway, HTTPUnauthorized
from supervisor.utils.logging import AddonLoggerAdapter
from ..coresys import CoreSysAttributes
from ..exceptions import APIError, HomeAssistantAPIError, HomeAssistantAuthError
from ..utils.json import json_dumps
from ..utils.logging import AddonLoggerAdapter
_LOGGER: logging.Logger = logging.getLogger(__name__)

View File

@@ -5,9 +5,10 @@ from typing import Any
from aiohttp import web
import voluptuous as vol
from supervisor.exceptions import APIGone
from ..const import ATTR_FORCE_SECURITY, ATTR_PWNED
from ..coresys import CoreSysAttributes
from ..exceptions import APIGone
from .utils import api_process, api_validate
# pylint: disable=no-value-for-parameter

View File

@@ -1,9 +1,5 @@
"""Init file for Supervisor network RESTful API."""
from typing import Any
from aiohttp import web
from ..const import (
ATTR_AVAILABLE,
ATTR_PROVIDERS,
@@ -29,7 +25,7 @@ class APIServices(CoreSysAttributes):
return service
@api_process
async def list_services(self, request: web.Request) -> dict[str, Any]:
async def list_services(self, request):
"""Show register services."""
services = []
for service in self.sys_services.list_services:
@@ -44,7 +40,7 @@ class APIServices(CoreSysAttributes):
return {ATTR_SERVICES: services}
@api_process
async def set_service(self, request: web.Request) -> None:
async def set_service(self, request):
"""Write data into a service."""
service = self._extract_service(request)
body = await api_validate(service.schema, request)
@@ -54,7 +50,7 @@ class APIServices(CoreSysAttributes):
await service.set_service_data(addon, body)
@api_process
async def get_service(self, request: web.Request) -> dict[str, Any]:
async def get_service(self, request):
"""Read data into a service."""
service = self._extract_service(request)
@@ -66,7 +62,7 @@ class APIServices(CoreSysAttributes):
return service.get_service_data()
@api_process
async def del_service(self, request: web.Request) -> None:
async def del_service(self, request):
"""Delete data into a service."""
service = self._extract_service(request)
addon = request[REQUEST_FROM]

View File

@@ -53,8 +53,7 @@ from ..const import (
REQUEST_FROM,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError, APIForbidden, APINotFound, StoreAddonNotFoundError
from ..resolution.const import ContextType, SuggestionType
from ..exceptions import APIError, APIForbidden, APINotFound
from ..store.addon import AddonStore
from ..store.repository import Repository
from ..store.validate import validate_repository
@@ -105,7 +104,7 @@ class APIStore(CoreSysAttributes):
addon_slug: str = request.match_info["addon"]
if not (addon := self.sys_addons.get(addon_slug)):
raise StoreAddonNotFoundError(addon=addon_slug)
raise APINotFound(f"Addon {addon_slug} does not exist")
if installed and not addon.is_installed:
raise APIError(f"Addon {addon_slug} is not installed")
@@ -113,7 +112,7 @@ class APIStore(CoreSysAttributes):
if not installed and addon.is_installed:
addon = cast(Addon, addon)
if not addon.addon_store:
raise StoreAddonNotFoundError(addon=addon_slug)
raise APINotFound(f"Addon {addon_slug} does not exist in the store")
return addon.addon_store
return addon
@@ -350,30 +349,13 @@ class APIStore(CoreSysAttributes):
return self._generate_repository_information(repository)
@api_process
async def add_repository(self, request: web.Request) -> None:
async def add_repository(self, request: web.Request):
"""Add repository to the store."""
body = await api_validate(SCHEMA_ADD_REPOSITORY, request)
await asyncio.shield(self.sys_store.add_repository(body[ATTR_REPOSITORY]))
@api_process
async def remove_repository(self, request: web.Request) -> None:
async def remove_repository(self, request: web.Request):
"""Remove repository from the store."""
repository: Repository = self._extract_repository(request)
await asyncio.shield(self.sys_store.remove_repository(repository))
@api_process
async def repositories_repository_repair(self, request: web.Request) -> None:
"""Repair repository."""
repository: Repository = self._extract_repository(request)
await asyncio.shield(repository.reset())
# If we have an execute reset suggestion on this repository, dismiss it and the issue
for suggestion in self.sys_resolution.suggestions:
if (
suggestion.type == SuggestionType.EXECUTE_RESET
and suggestion.context == ContextType.STORE
and suggestion.reference == repository.slug
):
for issue in self.sys_resolution.issues_for_suggestion(suggestion):
self.sys_resolution.dismiss_issue(issue)
return

View File

@@ -80,7 +80,7 @@ class APISupervisor(CoreSysAttributes):
"""Handle RESTful API for Supervisor functions."""
@api_process
async def ping(self, request: web.Request) -> bool:
async def ping(self, request):
"""Return ok for signal that the API is ready."""
return True

View File

@@ -1,7 +1,7 @@
"""Init file for Supervisor util for RESTful API."""
import asyncio
from collections.abc import Callable, Mapping
from collections.abc import Callable
import json
from typing import Any, cast
@@ -26,7 +26,7 @@ from ..const import (
RESULT_OK,
)
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import APIError, DockerAPIError, HassioError
from ..exceptions import APIError, BackupFileNotFoundError, DockerAPIError, HassioError
from ..jobs import JobSchedulerOptions, SupervisorJob
from ..utils import check_exception_chain, get_message_from_exception_chain
from ..utils.json import json_dumps, json_loads as json_loads_util
@@ -67,10 +67,10 @@ def api_process(method):
"""Return API information."""
try:
answer = await method(*args, **kwargs)
except BackupFileNotFoundError as err:
return api_return_error(err, status=404)
except APIError as err:
return api_return_error(
err, status=err.status, job_id=err.job_id, headers=err.headers
)
return api_return_error(err, status=err.status, job_id=err.job_id)
except HassioError as err:
return api_return_error(err)
@@ -139,7 +139,6 @@ def api_return_error(
error_type: str | None = None,
status: int = 400,
*,
headers: Mapping[str, str] | None = None,
job_id: str | None = None,
) -> web.Response:
"""Return an API error message."""
@@ -152,15 +151,10 @@ def api_return_error(
match error_type:
case const.CONTENT_TYPE_TEXT:
return web.Response(
body=message, content_type=error_type, status=status, headers=headers
)
return web.Response(body=message, content_type=error_type, status=status)
case const.CONTENT_TYPE_BINARY:
return web.Response(
body=message.encode(),
content_type=error_type,
status=status,
headers=headers,
body=message.encode(), content_type=error_type, status=status
)
case _:
result: dict[str, Any] = {
@@ -178,7 +172,6 @@ def api_return_error(
result,
status=status,
dumps=json_dumps,
headers=headers,
)

View File

@@ -9,10 +9,8 @@ from .addons.addon import Addon
from .const import ATTR_PASSWORD, ATTR_TYPE, ATTR_USERNAME, FILE_HASSIO_AUTH
from .coresys import CoreSys, CoreSysAttributes
from .exceptions import (
AuthHomeAssistantAPIValidationError,
AuthInvalidNonStringValueError,
AuthError,
AuthListUsersError,
AuthListUsersNoneResponseError,
AuthPasswordResetError,
HomeAssistantAPIError,
HomeAssistantWSError,
@@ -85,8 +83,10 @@ class Auth(FileConfiguration, CoreSysAttributes):
self, addon: Addon, username: str | None, password: str | None
) -> bool:
"""Check username login."""
if username is None or password is None:
raise AuthInvalidNonStringValueError(_LOGGER.error)
if password is None:
raise AuthError("None as password is not supported!", _LOGGER.error)
if username is None:
raise AuthError("None as username is not supported!", _LOGGER.error)
_LOGGER.info("Auth request from '%s' for '%s'", addon.slug, username)
@@ -137,7 +137,7 @@ class Auth(FileConfiguration, CoreSysAttributes):
finally:
self._running.pop(username, None)
raise AuthHomeAssistantAPIValidationError()
raise AuthError()
async def change_password(self, username: str, password: str) -> None:
"""Change user password login."""
@@ -155,7 +155,7 @@ class Auth(FileConfiguration, CoreSysAttributes):
except HomeAssistantAPIError as err:
_LOGGER.error("Can't request password reset on Home Assistant: %s", err)
raise AuthPasswordResetError(user=username)
raise AuthPasswordResetError()
async def list_users(self) -> list[dict[str, Any]]:
"""List users on the Home Assistant instance."""
@@ -166,12 +166,15 @@ class Auth(FileConfiguration, CoreSysAttributes):
{ATTR_TYPE: "config/auth/list"}
)
except HomeAssistantWSError as err:
_LOGGER.error("Can't request listing users on Home Assistant: %s", err)
raise AuthListUsersError() from err
raise AuthListUsersError(
f"Can't request listing users on Home Assistant: {err}", _LOGGER.error
) from err
if users is not None:
return users
raise AuthListUsersNoneResponseError(_LOGGER.error)
raise AuthListUsersError(
"Can't request listing users on Home Assistant!", _LOGGER.error
)
@staticmethod
def _rehash(value: str, salt2: str = "") -> str:

View File

@@ -60,6 +60,7 @@ from ..utils.dt import parse_datetime, utcnow
from ..utils.json import json_bytes
from ..utils.sentinel import DEFAULT
from .const import BUF_SIZE, LOCATION_CLOUD_BACKUP, BackupType
from .utils import password_to_key
from .validate import SCHEMA_BACKUP
IGNORED_COMPARISON_FIELDS = {ATTR_PROTECTED, ATTR_CRYPTO, ATTR_DOCKER}
@@ -100,7 +101,7 @@ class Backup(JobGroup):
self._data: dict[str, Any] = data or {ATTR_SLUG: slug}
self._tmp: TemporaryDirectory | None = None
self._outer_secure_tarfile: SecureTarFile | None = None
self._password: str | None = None
self._key: bytes | None = None
self._locations: dict[str | None, BackupLocation] = {
location: BackupLocation(
path=tar_file,
@@ -326,7 +327,7 @@ class Backup(JobGroup):
# Set password
if password:
self._password = password
self._init_password(password)
self._data[ATTR_PROTECTED] = True
self._data[ATTR_CRYPTO] = CRYPTO_AES128
self._locations[self.location].protected = True
@@ -336,7 +337,14 @@ class Backup(JobGroup):
def set_password(self, password: str | None) -> None:
"""Set the password for an existing backup."""
self._password = password
if password:
self._init_password(password)
else:
self._key = None
def _init_password(self, password: str) -> None:
"""Create key from password."""
self._key = password_to_key(password)
async def validate_backup(self, location: str | None) -> None:
"""Validate backup.
@@ -366,9 +374,9 @@ class Backup(JobGroup):
with SecureTarFile(
ending, # Not used
gzip=self.compressed,
key=self._key,
mode="r",
fileobj=test_tar_file,
password=self._password,
):
# If we can read the tar file, the password is correct
return
@@ -584,7 +592,7 @@ class Backup(JobGroup):
addon_file = self._outer_secure_tarfile.create_inner_tar(
f"./{tar_name}",
gzip=self.compressed,
password=self._password,
key=self._key,
)
# Take backup
try:
@@ -620,6 +628,9 @@ class Backup(JobGroup):
if start_task := await self._addon_save(addon):
start_tasks.append(start_task)
except BackupError as err:
err = BackupError(
f"Can't backup add-on {addon.slug}: {str(err)}", _LOGGER.error
)
self.sys_jobs.current.capture_error(err)
return start_tasks
@@ -635,9 +646,9 @@ class Backup(JobGroup):
addon_file = SecureTarFile(
Path(self._tmp.name, tar_name),
"r",
key=self._key,
gzip=self.compressed,
bufsize=BUF_SIZE,
password=self._password,
)
# If exists inside backup
@@ -733,7 +744,7 @@ class Backup(JobGroup):
with outer_secure_tarfile.create_inner_tar(
f"./{tar_name}",
gzip=self.compressed,
password=self._password,
key=self._key,
) as tar_file:
atomic_contents_add(
tar_file,
@@ -794,9 +805,9 @@ class Backup(JobGroup):
with SecureTarFile(
tar_name,
"r",
key=self._key,
gzip=self.compressed,
bufsize=BUF_SIZE,
password=self._password,
) as tar_file:
tar_file.extractall(
path=origin_dir, members=tar_file, filter="fully_trusted"
@@ -857,7 +868,7 @@ class Backup(JobGroup):
homeassistant_file = self._outer_secure_tarfile.create_inner_tar(
f"./{tar_name}",
gzip=self.compressed,
password=self._password,
key=self._key,
)
await self.sys_homeassistant.backup(homeassistant_file, exclude_database)
@@ -880,11 +891,7 @@ class Backup(JobGroup):
self._tmp.name, f"homeassistant.tar{'.gz' if self.compressed else ''}"
)
homeassistant_file = SecureTarFile(
tar_name,
"r",
gzip=self.compressed,
bufsize=BUF_SIZE,
password=self._password,
tar_name, "r", key=self._key, gzip=self.compressed, bufsize=BUF_SIZE
)
await self.sys_homeassistant.restore(

View File

@@ -6,6 +6,21 @@ import re
RE_DIGITS = re.compile(r"\d+")
def password_to_key(password: str) -> bytes:
"""Generate a AES Key from password."""
key: bytes = password.encode()
for _ in range(100):
key = hashlib.sha256(key).digest()
return key[:16]
def key_to_iv(key: bytes) -> bytes:
"""Generate an iv from Key."""
for _ in range(100):
key = hashlib.sha256(key).digest()
return key[:16]
def create_slug(name: str, date_str: str) -> str:
"""Generate a hash from repository."""
key = f"{date_str} - {name}".lower().encode()

View File

@@ -179,7 +179,6 @@ ATTR_DOCKER = "docker"
ATTR_DOCKER_API = "docker_api"
ATTR_DOCUMENTATION = "documentation"
ATTR_DOMAINS = "domains"
ATTR_DUPLICATE_LOG_FILE = "duplicate_log_file"
ATTR_ENABLE = "enable"
ATTR_ENABLE_IPV6 = "enable_ipv6"
ATTR_ENABLED = "enabled"

View File

@@ -2,7 +2,8 @@
from typing import Any
from ...utils import dbus_connected
from supervisor.dbus.utils import dbus_connected
from .const import BOARD_NAME_SUPERVISED
from .interface import BoardProxy

View File

@@ -250,7 +250,7 @@ class ConnectionType(StrEnum):
WIRELESS = "802-11-wireless"
class ConnectionState(IntEnum):
class ConnectionStateType(IntEnum):
"""Connection states.
https://networkmanager.dev/docs/api/latest/nm-dbus-types.html#NMActiveConnectionState

View File

@@ -7,7 +7,8 @@ from typing import Any
from dbus_fast.aio.message_bus import MessageBus
from ..exceptions import DBusInterfaceError, DBusNotConnectedError
from supervisor.exceptions import DBusInterfaceError, DBusNotConnectedError
from ..utils.dbus import DBus
from .utils import dbus_connected

View File

@@ -90,8 +90,8 @@ class Ip4Properties(IpProperties):
class Ip6Properties(IpProperties):
"""IPv6 properties object for Network Manager."""
addr_gen_mode: int | None
ip6_privacy: int | None
addr_gen_mode: int
ip6_privacy: int
dns: list[bytes] | None

View File

@@ -2,6 +2,8 @@
from typing import Any
from supervisor.dbus.network.setting import NetworkSetting
from ..const import (
DBUS_ATTR_CONNECTION,
DBUS_ATTR_ID,
@@ -14,13 +16,12 @@ from ..const import (
DBUS_IFACE_CONNECTION_ACTIVE,
DBUS_NAME_NM,
DBUS_OBJECT_BASE,
ConnectionState,
ConnectionStateFlags,
ConnectionStateType,
)
from ..interface import DBusInterfaceProxy, dbus_property
from ..utils import dbus_connected
from .ip_configuration import IpConfiguration
from .setting import NetworkSetting
class NetworkConnection(DBusInterfaceProxy):
@@ -66,9 +67,9 @@ class NetworkConnection(DBusInterfaceProxy):
@property
@dbus_property
def state(self) -> ConnectionState:
def state(self) -> ConnectionStateType:
"""Return the state of the connection."""
return ConnectionState(self.properties[DBUS_ATTR_STATE])
return ConnectionStateType(self.properties[DBUS_ATTR_STATE])
@property
def state_flags(self) -> set[ConnectionStateFlags]:

View File

@@ -16,11 +16,7 @@ from ....host.const import (
InterfaceType,
MulticastDnsMode,
)
from ...const import (
InterfaceAddrGenMode as NMInterfaceAddrGenMode,
InterfaceIp6Privacy as NMInterfaceIp6Privacy,
MulticastDnsValue,
)
from ...const import MulticastDnsValue
from .. import NetworkManager
from . import (
CONF_ATTR_802_ETHERNET,
@@ -122,41 +118,24 @@ def _get_ipv6_connection_settings(
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "auto")
if ipv6setting:
if ipv6setting.addr_gen_mode == InterfaceAddrGenMode.EUI64:
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
"i", NMInterfaceAddrGenMode.EUI64.value
)
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 0)
elif (
not support_addr_gen_mode_defaults
or ipv6setting.addr_gen_mode == InterfaceAddrGenMode.STABLE_PRIVACY
):
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
"i", NMInterfaceAddrGenMode.STABLE_PRIVACY.value
)
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 1)
elif ipv6setting.addr_gen_mode == InterfaceAddrGenMode.DEFAULT_OR_EUI64:
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
"i", NMInterfaceAddrGenMode.DEFAULT_OR_EUI64.value
)
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 2)
else:
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
"i", NMInterfaceAddrGenMode.DEFAULT.value
)
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 3)
if ipv6setting.ip6_privacy == InterfaceIp6Privacy.DISABLED:
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
"i", NMInterfaceIp6Privacy.DISABLED.value
)
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 0)
elif ipv6setting.ip6_privacy == InterfaceIp6Privacy.ENABLED_PREFER_PUBLIC:
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
"i", NMInterfaceIp6Privacy.ENABLED_PREFER_PUBLIC.value
)
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 1)
elif ipv6setting.ip6_privacy == InterfaceIp6Privacy.ENABLED:
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
"i", NMInterfaceIp6Privacy.ENABLED.value
)
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 2)
else:
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
"i", NMInterfaceIp6Privacy.DEFAULT.value
)
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", -1)
elif ipv6setting.method == InterfaceMethod.DISABLED:
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "link-local")
elif ipv6setting.method == InterfaceMethod.STATIC:

View File

@@ -2,7 +2,6 @@
from __future__ import annotations
from collections.abc import Awaitable
from contextlib import suppress
from ipaddress import IPv4Address
import logging
@@ -10,13 +9,14 @@ import os
from pathlib import Path
from socket import SocketIO
import tempfile
from typing import TYPE_CHECKING, Literal, cast
from typing import TYPE_CHECKING, cast
import aiodocker
from attr import evolve
from awesomeversion import AwesomeVersion
import docker
import docker.errors
from docker.types import Mount
import requests
from ..addons.build import AddonBuild
@@ -35,7 +35,6 @@ from ..coresys import CoreSys
from ..exceptions import (
CoreDNSError,
DBusError,
DockerBuildError,
DockerError,
DockerJobError,
DockerNotFound,
@@ -67,11 +66,8 @@ from .const import (
PATH_SHARE,
PATH_SSL,
Capabilities,
DockerMount,
MountBindOptions,
MountType,
PropagationMode,
Ulimit,
)
from .interface import DockerInterface
@@ -274,7 +270,7 @@ class DockerAddon(DockerInterface):
}
@property
def network_mode(self) -> Literal["host"] | None:
def network_mode(self) -> str | None:
"""Return network mode for add-on."""
if self.addon.host_network:
return "host"
@@ -313,28 +309,28 @@ class DockerAddon(DockerInterface):
return None
@property
def ulimits(self) -> list[Ulimit] | None:
def ulimits(self) -> list[docker.types.Ulimit] | None:
"""Generate ulimits for add-on."""
limits: list[Ulimit] = []
limits: list[docker.types.Ulimit] = []
# Need schedule functions
if self.addon.with_realtime:
limits.append(Ulimit(name="rtprio", soft=90, hard=99))
limits.append(docker.types.Ulimit(name="rtprio", soft=90, hard=99))
# Set available memory for memlock to 128MB
mem = 128 * 1024 * 1024
limits.append(Ulimit(name="memlock", soft=mem, hard=mem))
limits.append(docker.types.Ulimit(name="memlock", soft=mem, hard=mem))
# Add configurable ulimits from add-on config
for name, config in self.addon.ulimits.items():
if isinstance(config, int):
# Simple format: both soft and hard limits are the same
limits.append(Ulimit(name=name, soft=config, hard=config))
limits.append(docker.types.Ulimit(name=name, soft=config, hard=config))
elif isinstance(config, dict):
# Detailed format: both soft and hard limits are mandatory
soft = config["soft"]
hard = config["hard"]
limits.append(Ulimit(name=name, soft=soft, hard=hard))
limits.append(docker.types.Ulimit(name=name, soft=soft, hard=hard))
# Return None if no ulimits are present
if limits:
@@ -353,7 +349,7 @@ class DockerAddon(DockerInterface):
return None
@property
def mounts(self) -> list[DockerMount]:
def mounts(self) -> list[Mount]:
"""Return mounts for container."""
addon_mapping = self.addon.map_volumes
@@ -363,8 +359,8 @@ class DockerAddon(DockerInterface):
mounts = [
MOUNT_DEV,
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.addon.path_extern_data.as_posix(),
target=target_data_path or PATH_PRIVATE_DATA.as_posix(),
read_only=False,
@@ -374,8 +370,8 @@ class DockerAddon(DockerInterface):
# setup config mappings
if MappingType.CONFIG in addon_mapping:
mounts.append(
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_homeassistant.as_posix(),
target=addon_mapping[MappingType.CONFIG].path
or PATH_HOMEASSISTANT_CONFIG_LEGACY.as_posix(),
@@ -387,8 +383,8 @@ class DockerAddon(DockerInterface):
# Map addon's public config folder if not using deprecated config option
if self.addon.addon_config_used:
mounts.append(
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.addon.path_extern_config.as_posix(),
target=addon_mapping[MappingType.ADDON_CONFIG].path
or PATH_PUBLIC_CONFIG.as_posix(),
@@ -399,8 +395,8 @@ class DockerAddon(DockerInterface):
# Map Home Assistant config in new way
if MappingType.HOMEASSISTANT_CONFIG in addon_mapping:
mounts.append(
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_homeassistant.as_posix(),
target=addon_mapping[MappingType.HOMEASSISTANT_CONFIG].path
or PATH_HOMEASSISTANT_CONFIG.as_posix(),
@@ -412,8 +408,8 @@ class DockerAddon(DockerInterface):
if MappingType.ALL_ADDON_CONFIGS in addon_mapping:
mounts.append(
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_addon_configs.as_posix(),
target=addon_mapping[MappingType.ALL_ADDON_CONFIGS].path
or PATH_ALL_ADDON_CONFIGS.as_posix(),
@@ -423,8 +419,8 @@ class DockerAddon(DockerInterface):
if MappingType.SSL in addon_mapping:
mounts.append(
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_ssl.as_posix(),
target=addon_mapping[MappingType.SSL].path or PATH_SSL.as_posix(),
read_only=addon_mapping[MappingType.SSL].read_only,
@@ -433,8 +429,8 @@ class DockerAddon(DockerInterface):
if MappingType.ADDONS in addon_mapping:
mounts.append(
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_addons_local.as_posix(),
target=addon_mapping[MappingType.ADDONS].path
or PATH_LOCAL_ADDONS.as_posix(),
@@ -444,8 +440,8 @@ class DockerAddon(DockerInterface):
if MappingType.BACKUP in addon_mapping:
mounts.append(
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_backup.as_posix(),
target=addon_mapping[MappingType.BACKUP].path
or PATH_BACKUP.as_posix(),
@@ -455,25 +451,25 @@ class DockerAddon(DockerInterface):
if MappingType.SHARE in addon_mapping:
mounts.append(
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_share.as_posix(),
target=addon_mapping[MappingType.SHARE].path
or PATH_SHARE.as_posix(),
read_only=addon_mapping[MappingType.SHARE].read_only,
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
propagation=PropagationMode.RSLAVE,
)
)
if MappingType.MEDIA in addon_mapping:
mounts.append(
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_media.as_posix(),
target=addon_mapping[MappingType.MEDIA].path
or PATH_MEDIA.as_posix(),
read_only=addon_mapping[MappingType.MEDIA].read_only,
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
propagation=PropagationMode.RSLAVE,
)
)
@@ -485,8 +481,8 @@ class DockerAddon(DockerInterface):
if not Path(gpio_path).exists():
continue
mounts.append(
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=gpio_path,
target=gpio_path,
read_only=False,
@@ -496,8 +492,8 @@ class DockerAddon(DockerInterface):
# DeviceTree support
if self.addon.with_devicetree:
mounts.append(
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source="/sys/firmware/devicetree/base",
target="/device-tree",
read_only=True,
@@ -511,8 +507,8 @@ class DockerAddon(DockerInterface):
# Kernel Modules support
if self.addon.with_kernel_modules:
mounts.append(
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source="/lib/modules",
target="/lib/modules",
read_only=True,
@@ -530,20 +526,20 @@ class DockerAddon(DockerInterface):
# Configuration Audio
if self.addon.with_audio:
mounts += [
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.addon.path_extern_pulse.as_posix(),
target="/etc/pulse/client.conf",
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
target="/run/audio",
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
target="/etc/asound.conf",
read_only=True,
@@ -553,14 +549,14 @@ class DockerAddon(DockerInterface):
# System Journal access
if self.addon.with_journald:
mounts += [
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
target=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
target=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
read_only=True,
@@ -686,12 +682,13 @@ class DockerAddon(DockerInterface):
async def _build(self, version: AwesomeVersion, image: str | None = None) -> None:
"""Build a Docker container."""
build_env = await AddonBuild(self.coresys, self.addon).load_config()
# Check if the build environment is valid, raises if not
await build_env.is_valid()
if not await build_env.is_valid():
_LOGGER.error("Invalid build environment, can't build this add-on!")
raise DockerError()
_LOGGER.info("Starting build for %s:%s", self.image, version)
def build_image() -> tuple[str, str]:
def build_image():
if build_env.squash:
_LOGGER.warning(
"Ignoring squash build option for %s as Docker BuildKit does not support it.",
@@ -708,9 +705,7 @@ class DockerAddon(DockerInterface):
# Remove dangling builder container if it exists by any chance
# E.g. because of an abrupt host shutdown/reboot during a build
with suppress(docker.errors.NotFound):
self.sys_docker.containers_legacy.get(builder_name).remove(
force=True, v=True
)
self.sys_docker.containers.get(builder_name).remove(force=True, v=True)
# Generate Docker config with registry credentials for base image if needed
docker_config_path: Path | None = None
@@ -766,9 +761,8 @@ class DockerAddon(DockerInterface):
requests.RequestException,
aiodocker.DockerError,
) as err:
raise DockerBuildError(
f"Can't build {self.image}:{version}: {err!s}", _LOGGER.error
) from err
_LOGGER.error("Can't build %s:%s: %s", self.image, version, err)
raise DockerError() from err
_LOGGER.info("Build %s:%s done", self.image, version)
@@ -826,9 +820,12 @@ class DockerAddon(DockerInterface):
on_condition=DockerJobError,
concurrency=JobConcurrency.GROUP_REJECT,
)
def write_stdin(self, data: bytes) -> Awaitable[None]:
async def write_stdin(self, data: bytes) -> None:
"""Write to add-on stdin."""
return self.sys_run_in_executor(self._write_stdin, data)
if not await self.is_running():
raise DockerError()
await self.sys_run_in_executor(self._write_stdin, data)
def _write_stdin(self, data: bytes) -> None:
"""Write to add-on stdin.
@@ -837,7 +834,7 @@ class DockerAddon(DockerInterface):
"""
try:
# Load needed docker objects
container = self.sys_docker.containers_legacy.get(self.name)
container = self.sys_docker.containers.get(self.name)
# attach_socket returns SocketIO for local Docker connections (Unix socket)
socket = cast(
SocketIO, container.attach_socket(params={"stdin": 1, "stream": 1})
@@ -900,7 +897,7 @@ class DockerAddon(DockerInterface):
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers_legacy.get, self.name
self.sys_docker.containers.get, self.name
)
except docker.errors.NotFound:
if self._hw_listener:

View File

@@ -2,6 +2,9 @@
import logging
import docker
from docker.types import Mount
from ..const import DOCKER_CPU_RUNTIME_ALLOCATION
from ..coresys import CoreSysAttributes
from ..exceptions import DockerJobError
@@ -16,9 +19,7 @@ from .const import (
MOUNT_UDEV,
PATH_PRIVATE_DATA,
Capabilities,
DockerMount,
MountType,
Ulimit,
)
from .interface import DockerInterface
@@ -41,12 +42,12 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
return AUDIO_DOCKER_NAME
@property
def mounts(self) -> list[DockerMount]:
def mounts(self) -> list[Mount]:
"""Return mounts for container."""
mounts = [
MOUNT_DEV,
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_audio.as_posix(),
target=PATH_PRIVATE_DATA.as_posix(),
read_only=False,
@@ -74,10 +75,10 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
return [Capabilities.SYS_NICE, Capabilities.SYS_RESOURCE]
@property
def ulimits(self) -> list[Ulimit]:
def ulimits(self) -> list[docker.types.Ulimit]:
"""Generate ulimits for audio."""
# Pulseaudio by default tries to use real-time scheduling with priority of 5.
return [Ulimit(name="rtprio", soft=10, hard=10)]
return [docker.types.Ulimit(name="rtprio", soft=10, hard=10)]
@property
def cpu_rt_runtime(self) -> int | None:

View File

@@ -2,24 +2,19 @@
from __future__ import annotations
from contextlib import suppress
from dataclasses import dataclass
from enum import Enum, StrEnum
from functools import total_ordering
from enum import StrEnum
from pathlib import PurePath
import re
from typing import Any, cast
from docker.types import Mount
from ..const import MACHINE_ID
RE_RETRYING_DOWNLOAD_STATUS = re.compile(r"Retrying in \d+ seconds?")
# Docker Hub registry identifier
DOCKER_HUB = "hub.docker.com"
# Docker Hub registry identifier (official default)
# Docker's default registry is docker.io
DOCKER_HUB = "docker.io"
# Legacy Docker Hub identifier for backward compatibility
DOCKER_HUB_LEGACY = "hub.docker.com"
# Regex to match images with a registry host (e.g., ghcr.io/org/image)
IMAGE_WITH_HOST = re.compile(r"^((?:[a-z0-9]+(?:-[a-z0-9]+)*\.)+[a-z]{2,})\/.+")
class Capabilities(StrEnum):
@@ -81,145 +76,33 @@ class PropagationMode(StrEnum):
RSLAVE = "rslave"
@total_ordering
class PullImageLayerStage(Enum):
"""Job stages for pulling an image layer.
These are a subset of the statuses in a docker pull image log. They
are the standardized ones that are the most useful to us.
"""
PULLING_FS_LAYER = 1, "Pulling fs layer"
RETRYING_DOWNLOAD = 2, "Retrying download"
DOWNLOADING = 2, "Downloading"
VERIFYING_CHECKSUM = 3, "Verifying Checksum"
DOWNLOAD_COMPLETE = 4, "Download complete"
EXTRACTING = 5, "Extracting"
PULL_COMPLETE = 6, "Pull complete"
def __init__(self, order: int, status: str) -> None:
"""Set fields from values."""
self.order = order
self.status = status
def __eq__(self, value: object, /) -> bool:
"""Check equality, allow StrEnum style comparisons on status."""
with suppress(AttributeError):
return self.status == cast(PullImageLayerStage, value).status
return self.status == value
def __lt__(self, other: object) -> bool:
"""Order instances."""
with suppress(AttributeError):
return self.order < cast(PullImageLayerStage, other).order
return False
def __hash__(self) -> int:
"""Hash instance."""
return hash(self.status)
@classmethod
def from_status(cls, status: str) -> PullImageLayerStage | None:
"""Return stage instance from pull log status."""
for i in cls:
if i.status == status:
return i
# This one includes number of seconds until download so its not constant
if RE_RETRYING_DOWNLOAD_STATUS.match(status):
return cls.RETRYING_DOWNLOAD
return None
@dataclass(slots=True, frozen=True)
class MountBindOptions:
"""Bind options for docker mount."""
propagation: PropagationMode | None = None
read_only_non_recursive: bool | None = None
def to_dict(self) -> dict[str, Any]:
"""To dictionary representation."""
out: dict[str, Any] = {}
if self.propagation:
out["Propagation"] = self.propagation.value
if self.read_only_non_recursive is not None:
out["ReadOnlyNonRecursive"] = self.read_only_non_recursive
return out
@dataclass(slots=True, frozen=True)
class DockerMount:
"""A docker mount."""
type: MountType
source: str
target: str
read_only: bool
bind_options: MountBindOptions | None = None
def to_dict(self) -> dict[str, Any]:
"""To dictionary representation."""
out: dict[str, Any] = {
"Type": self.type.value,
"Source": self.source,
"Target": self.target,
"ReadOnly": self.read_only,
}
if self.bind_options:
out["BindOptions"] = self.bind_options.to_dict()
return out
@dataclass(slots=True, frozen=True)
class Ulimit:
"""A linux user limit."""
name: str
soft: int
hard: int
def to_dict(self) -> dict[str, str | int]:
"""To dictionary representation."""
return {
"Name": self.name,
"Soft": self.soft,
"Hard": self.hard,
}
ENV_DUPLICATE_LOG_FILE = "HA_DUPLICATE_LOG_FILE"
ENV_TIME = "TZ"
ENV_TOKEN = "SUPERVISOR_TOKEN"
ENV_TOKEN_OLD = "HASSIO_TOKEN"
LABEL_MANAGED = "supervisor_managed"
MOUNT_DBUS = DockerMount(
type=MountType.BIND, source="/run/dbus", target="/run/dbus", read_only=True
MOUNT_DBUS = Mount(
type=MountType.BIND.value, source="/run/dbus", target="/run/dbus", read_only=True
)
MOUNT_DEV = DockerMount(
type=MountType.BIND,
source="/dev",
target="/dev",
read_only=True,
bind_options=MountBindOptions(read_only_non_recursive=True),
MOUNT_DEV = Mount(
type=MountType.BIND.value, source="/dev", target="/dev", read_only=True
)
MOUNT_DOCKER = DockerMount(
type=MountType.BIND,
MOUNT_DEV.setdefault("BindOptions", {})["ReadOnlyNonRecursive"] = True
MOUNT_DOCKER = Mount(
type=MountType.BIND.value,
source="/run/docker.sock",
target="/run/docker.sock",
read_only=True,
)
MOUNT_MACHINE_ID = DockerMount(
type=MountType.BIND,
MOUNT_MACHINE_ID = Mount(
type=MountType.BIND.value,
source=MACHINE_ID.as_posix(),
target=MACHINE_ID.as_posix(),
read_only=True,
)
MOUNT_UDEV = DockerMount(
type=MountType.BIND, source="/run/udev", target="/run/udev", read_only=True
MOUNT_UDEV = Mount(
type=MountType.BIND.value, source="/run/udev", target="/run/udev", read_only=True
)
PATH_PRIVATE_DATA = PurePath("/data")

View File

@@ -2,11 +2,13 @@
import logging
from docker.types import Mount
from ..coresys import CoreSysAttributes
from ..exceptions import DockerJobError
from ..jobs.const import JobConcurrency
from ..jobs.decorator import Job
from .const import ENV_TIME, MOUNT_DBUS, DockerMount, MountType
from .const import ENV_TIME, MOUNT_DBUS, MountType
from .interface import DockerInterface
_LOGGER: logging.Logger = logging.getLogger(__name__)
@@ -45,8 +47,8 @@ class DockerDNS(DockerInterface, CoreSysAttributes):
security_opt=self.security_opt,
environment={ENV_TIME: self.sys_timezone},
mounts=[
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_dns.as_posix(),
target="/config",
read_only=False,

View File

@@ -5,6 +5,7 @@ import logging
import re
from awesomeversion import AwesomeVersion
from docker.types import Mount
from ..const import LABEL_MACHINE
from ..exceptions import DockerJobError
@@ -13,7 +14,6 @@ from ..homeassistant.const import LANDINGPAGE
from ..jobs.const import JobConcurrency
from ..jobs.decorator import Job
from .const import (
ENV_DUPLICATE_LOG_FILE,
ENV_TIME,
ENV_TOKEN,
ENV_TOKEN_OLD,
@@ -25,8 +25,6 @@ from .const import (
PATH_PUBLIC_CONFIG,
PATH_SHARE,
PATH_SSL,
DockerMount,
MountBindOptions,
MountType,
PropagationMode,
)
@@ -92,15 +90,15 @@ class DockerHomeAssistant(DockerInterface):
)
@property
def mounts(self) -> list[DockerMount]:
def mounts(self) -> list[Mount]:
"""Return mounts for container."""
mounts = [
MOUNT_DEV,
MOUNT_DBUS,
MOUNT_UDEV,
# HA config folder
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_homeassistant.as_posix(),
target=PATH_PUBLIC_CONFIG.as_posix(),
read_only=False,
@@ -112,45 +110,41 @@ class DockerHomeAssistant(DockerInterface):
mounts.extend(
[
# All other folders
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_ssl.as_posix(),
target=PATH_SSL.as_posix(),
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_share.as_posix(),
target=PATH_SHARE.as_posix(),
read_only=False,
bind_options=MountBindOptions(
propagation=PropagationMode.RSLAVE
),
propagation=PropagationMode.RSLAVE.value,
),
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_media.as_posix(),
target=PATH_MEDIA.as_posix(),
read_only=False,
bind_options=MountBindOptions(
propagation=PropagationMode.RSLAVE
),
propagation=PropagationMode.RSLAVE.value,
),
# Configuration audio
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_homeassistant.path_extern_pulse.as_posix(),
target="/etc/pulse/client.conf",
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
target="/run/audio",
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
target="/etc/asound.conf",
read_only=True,
@@ -180,8 +174,6 @@ class DockerHomeAssistant(DockerInterface):
}
if restore_job_id:
environment[ENV_RESTORE_JOB_ID] = restore_job_id
if self.sys_homeassistant.duplicate_log_file:
environment[ENV_DUPLICATE_LOG_FILE] = "1"
await self._run(
tag=(self.sys_homeassistant.version),
name=self.name,
@@ -221,20 +213,20 @@ class DockerHomeAssistant(DockerInterface):
init=True,
entrypoint=[],
mounts=[
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_homeassistant.as_posix(),
target="/config",
read_only=False,
),
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_ssl.as_posix(),
target="/ssl",
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(
type=MountType.BIND.value,
source=self.sys_config.path_extern_share.as_posix(),
target="/share",
read_only=False,

View File

@@ -19,7 +19,6 @@ import docker
from docker.models.containers import Container
import requests
from ..bus import EventListener
from ..const import (
ATTR_PASSWORD,
ATTR_REGISTRY,
@@ -35,25 +34,18 @@ from ..exceptions import (
DockerError,
DockerHubRateLimitExceeded,
DockerJobError,
DockerLogOutOfOrder,
DockerNotFound,
DockerRequestError,
)
from ..jobs import SupervisorJob
from ..jobs.const import JOB_GROUP_DOCKER_INTERFACE, JobConcurrency
from ..jobs.decorator import Job
from ..jobs.job_group import JobGroup
from ..resolution.const import ContextType, IssueType, SuggestionType
from ..utils.sentry import async_capture_exception
from .const import (
DOCKER_HUB,
DOCKER_HUB_LEGACY,
ContainerState,
PullImageLayerStage,
RestartPolicy,
)
from .const import DOCKER_HUB, ContainerState, RestartPolicy
from .manager import CommandReturn, PullLogEntry
from .monitor import DockerContainerStateEvent
from .pull_progress import ImagePullProgress
from .stats import DockerStats
_LOGGER: logging.Logger = logging.getLogger(__name__)
@@ -190,8 +182,7 @@ class DockerInterface(JobGroup, ABC):
stored = self.sys_docker.config.registries[registry]
credentials[ATTR_USERNAME] = stored[ATTR_USERNAME]
credentials[ATTR_PASSWORD] = stored[ATTR_PASSWORD]
# Don't include registry for Docker Hub (both official and legacy)
if registry not in (DOCKER_HUB, DOCKER_HUB_LEGACY):
if registry != DOCKER_HUB:
credentials[ATTR_REGISTRY] = registry
_LOGGER.debug(
@@ -202,159 +193,6 @@ class DockerInterface(JobGroup, ABC):
return credentials
def _process_pull_image_log( # noqa: C901
self, install_job_id: str, reference: PullLogEntry
) -> None:
"""Process events fired from a docker while pulling an image, filtered to a given job id."""
if (
reference.job_id != install_job_id
or not reference.id
or not reference.status
or not (stage := PullImageLayerStage.from_status(reference.status))
):
return
# Pulling FS Layer is our marker for a layer that needs to be downloaded and extracted. Otherwise it already exists and we can ignore
job: SupervisorJob | None = None
if stage == PullImageLayerStage.PULLING_FS_LAYER:
job = self.sys_jobs.new_job(
name="Pulling container image layer",
initial_stage=stage.status,
reference=reference.id,
parent_id=install_job_id,
internal=True,
)
job.done = False
return
# Find our sub job to update details of
for j in self.sys_jobs.jobs:
if j.parent_id == install_job_id and j.reference == reference.id:
job = j
break
# There should no longer be any real risk of logs out of order anymore.
# However tests with very small images have shown that sometimes Docker
# skips stages in log. So keeping this one as a safety check on null job
if not job:
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {install_job_id} but could not find a matching job, skipping",
_LOGGER.debug,
)
# For progress calculation we assume downloading is 70% of time, extracting is 30% and others stages negligible
progress = job.progress
match stage:
case PullImageLayerStage.DOWNLOADING | PullImageLayerStage.EXTRACTING:
if (
reference.progress_detail
and reference.progress_detail.current
and reference.progress_detail.total
):
progress = (
reference.progress_detail.current
/ reference.progress_detail.total
)
if stage == PullImageLayerStage.DOWNLOADING:
progress = 70 * progress
else:
progress = 70 + 30 * progress
case (
PullImageLayerStage.VERIFYING_CHECKSUM
| PullImageLayerStage.DOWNLOAD_COMPLETE
):
progress = 70
case PullImageLayerStage.PULL_COMPLETE:
progress = 100
case PullImageLayerStage.RETRYING_DOWNLOAD:
progress = 0
# No real risk of getting things out of order in current implementation
# but keeping this one in case another change to these trips us up.
if stage != PullImageLayerStage.RETRYING_DOWNLOAD and progress < job.progress:
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for job {job.uuid} that implied progress was {progress} but current progress is {job.progress}, skipping",
_LOGGER.debug,
)
# Our filters have all passed. Time to update the job
# Only downloading and extracting have progress details. Use that to set extra
# We'll leave it around on later stages as the total bytes may be useful after that stage
# Enforce range to prevent float drift error
progress = max(0, min(progress, 100))
if (
stage in {PullImageLayerStage.DOWNLOADING, PullImageLayerStage.EXTRACTING}
and reference.progress_detail
and reference.progress_detail.current is not None
and reference.progress_detail.total is not None
):
job.update(
progress=progress,
stage=stage.status,
extra={
"current": reference.progress_detail.current,
"total": reference.progress_detail.total,
},
)
else:
# If we reach DOWNLOAD_COMPLETE without ever having set extra (small layers that skip
# the downloading phase), set a minimal extra so aggregate progress calculation can proceed
extra = job.extra
if stage == PullImageLayerStage.DOWNLOAD_COMPLETE and not job.extra:
extra = {"current": 1, "total": 1}
job.update(
progress=progress,
stage=stage.status,
done=stage == PullImageLayerStage.PULL_COMPLETE,
extra=None if stage == PullImageLayerStage.RETRYING_DOWNLOAD else extra,
)
# Once we have received a progress update for every child job, start to set status of the main one
install_job = self.sys_jobs.get_job(install_job_id)
layer_jobs = [
job
for job in self.sys_jobs.jobs
if job.parent_id == install_job.uuid
and job.name == "Pulling container image layer"
]
# First set the total bytes to be downloaded/extracted on the main job
if not install_job.extra:
total = 0
for job in layer_jobs:
if not job.extra:
return
total += job.extra["total"]
install_job.extra = {"total": total}
else:
total = install_job.extra["total"]
# Then determine total progress based on progress of each sub-job, factoring in size of each compared to total
progress = 0.0
stage = PullImageLayerStage.PULL_COMPLETE
for job in layer_jobs:
if not job.extra or not job.extra.get("total"):
return
progress += job.progress * (job.extra["total"] / total)
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
if job_stage < PullImageLayerStage.EXTRACTING:
stage = PullImageLayerStage.DOWNLOADING
elif (
stage == PullImageLayerStage.PULL_COMPLETE
and job_stage < PullImageLayerStage.PULL_COMPLETE
):
stage = PullImageLayerStage.EXTRACTING
# Ensure progress is 100 at this point to prevent float drift
if stage == PullImageLayerStage.PULL_COMPLETE:
progress = 100
# To reduce noise, limit updates to when result has changed by an entire percent or when stage changed
if stage != install_job.stage or progress >= install_job.progress + 1:
install_job.update(stage=stage.status, progress=max(0, min(progress, 100)))
@Job(
name="docker_interface_install",
on_condition=DockerJobError,
@@ -374,30 +212,35 @@ class DockerInterface(JobGroup, ABC):
raise ValueError("Cannot pull without an image!")
image_arch = arch or self.sys_arch.supervisor
listener: EventListener | None = None
pull_progress = ImagePullProgress()
current_job = self.sys_jobs.current
async def process_pull_event(event: PullLogEntry) -> None:
"""Process pull event and update job progress."""
if event.job_id != current_job.uuid:
return
# Process event through progress tracker
pull_progress.process_event(event)
# Update job if progress changed significantly (>= 1%)
should_update, progress = pull_progress.should_update_job()
if should_update:
stage = pull_progress.get_stage()
current_job.update(progress=progress, stage=stage)
listener = self.sys_bus.register_event(
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_event
)
_LOGGER.info("Downloading docker image %s with tag %s.", image, version)
try:
# Get credentials for private registries to pass to aiodocker
credentials = self._get_credentials(image) or None
curr_job_id = self.sys_jobs.current.uuid
async def process_pull_image_log(reference: PullLogEntry) -> None:
try:
self._process_pull_image_log(curr_job_id, reference)
except DockerLogOutOfOrder as err:
# Send all these to sentry. Missing a few progress updates
# shouldn't matter to users but matters to us
await async_capture_exception(err)
listener = self.sys_bus.register_event(
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_image_log
)
# Pull new image, passing credentials to aiodocker
docker_image = await self.sys_docker.pull_image(
self.sys_jobs.current.uuid,
current_job.uuid,
image,
str(version),
platform=MAP_ARCH[image_arch],
@@ -445,8 +288,7 @@ class DockerInterface(JobGroup, ABC):
f"Unknown error with {image}:{version!s} -> {err!s}", _LOGGER.error
) from err
finally:
if listener:
self.sys_bus.remove_listener(listener)
self.sys_bus.remove_listener(listener)
self._meta = docker_image
@@ -457,34 +299,35 @@ class DockerInterface(JobGroup, ABC):
return True
return False
async def _get_container(self) -> Container | None:
"""Get docker container, returns None if not found."""
try:
return await self.sys_run_in_executor(
self.sys_docker.containers_legacy.get, self.name
)
except docker.errors.NotFound:
return None
except docker.errors.DockerException as err:
raise DockerAPIError(
f"Docker API error occurred while getting container information: {err!s}"
) from err
except requests.RequestException as err:
raise DockerRequestError(
f"Error communicating with Docker to get container information: {err!s}"
) from err
async def is_running(self) -> bool:
"""Return True if Docker is running."""
if docker_container := await self._get_container():
return docker_container.status == "running"
return False
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers.get, self.name
)
except docker.errors.NotFound:
return False
except docker.errors.DockerException as err:
raise DockerAPIError() from err
except requests.RequestException as err:
raise DockerRequestError() from err
return docker_container.status == "running"
async def current_state(self) -> ContainerState:
"""Return current state of container."""
if docker_container := await self._get_container():
return _container_state_from_model(docker_container)
return ContainerState.UNKNOWN
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers.get, self.name
)
except docker.errors.NotFound:
return ContainerState.UNKNOWN
except docker.errors.DockerException as err:
raise DockerAPIError() from err
except requests.RequestException as err:
raise DockerRequestError() from err
return _container_state_from_model(docker_container)
@Job(name="docker_interface_attach", concurrency=JobConcurrency.GROUP_QUEUE)
async def attach(
@@ -493,7 +336,7 @@ class DockerInterface(JobGroup, ABC):
"""Attach to running Docker container."""
with suppress(docker.errors.DockerException, requests.RequestException):
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers_legacy.get, self.name
self.sys_docker.containers.get, self.name
)
self._meta = docker_container.attrs
self.sys_docker.monitor.watch_container(docker_container)
@@ -519,9 +362,7 @@ class DockerInterface(JobGroup, ABC):
# Successful?
if not self._meta:
raise DockerError(
f"Could not get metadata on container or image for {self.name}"
)
raise DockerError()
_LOGGER.info("Attaching to %s with version %s", self.image, self.version)
@Job(
@@ -533,11 +374,8 @@ class DockerInterface(JobGroup, ABC):
"""Run Docker image."""
raise NotImplementedError()
async def _run(self, *, name: str, **kwargs) -> None:
"""Run Docker image with retry if necessary."""
if not (image := self.image):
raise ValueError(f"Cannot determine image to use to run {self.name}!")
async def _run(self, **kwargs) -> None:
"""Run Docker image with retry inf necessary."""
if await self.is_running():
return
@@ -546,14 +384,16 @@ class DockerInterface(JobGroup, ABC):
# Create & Run container
try:
container_metadata = await self.sys_docker.run(image, name=name, **kwargs)
docker_container = await self.sys_run_in_executor(
self.sys_docker.run, self.image, **kwargs
)
except DockerNotFound as err:
# If image is missing, capture the exception as this shouldn't happen
await async_capture_exception(err)
raise
# Store metadata
self._meta = container_metadata
self._meta = docker_container.attrs
@Job(
name="docker_interface_stop",
@@ -725,8 +565,14 @@ class DockerInterface(JobGroup, ABC):
async def is_failed(self) -> bool:
"""Return True if Docker is failing state."""
if not (docker_container := await self._get_container()):
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers.get, self.name
)
except docker.errors.NotFound:
return False
except (docker.errors.DockerException, requests.RequestException) as err:
raise DockerError() from err
# container is not running
if docker_container.status != "exited":

View File

@@ -13,12 +13,10 @@ import logging
import os
from pathlib import Path
import re
from typing import Any, Final, Literal, Self, cast
from typing import Any, Final, Self, cast
import aiodocker
from aiodocker.containers import DockerContainers
from aiodocker.images import DockerImages
from aiodocker.types import JSONObject
from aiohttp import ClientSession, ClientTimeout, UnixConnector
import attr
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
@@ -51,19 +49,9 @@ from ..exceptions import (
)
from ..utils.common import FileConfiguration
from ..validate import SCHEMA_DOCKER_CONFIG
from .const import (
DOCKER_HUB,
DOCKER_HUB_LEGACY,
LABEL_MANAGED,
Capabilities,
DockerMount,
MountType,
RestartPolicy,
Ulimit,
)
from .const import DOCKER_HUB, IMAGE_WITH_HOST, LABEL_MANAGED
from .monitor import DockerMonitor
from .network import DockerNetwork
from .utils import get_registry_from_image
_LOGGER: logging.Logger = logging.getLogger(__name__)
@@ -224,25 +212,19 @@ class DockerConfig(FileConfiguration):
Matches the image against configured registries and returns the registry
name if found, or None if no matching credentials are configured.
Uses Docker's domain detection logic from:
vendor/github.com/distribution/reference/normalize.go
"""
if not self.registries:
return None
# Check if image uses a custom registry (e.g., ghcr.io/org/image)
registry = get_registry_from_image(image)
if registry:
matcher = IMAGE_WITH_HOST.match(image)
if matcher:
registry = matcher.group(1)
if registry in self.registries:
return registry
else:
# No registry prefix means Docker Hub
# Support both docker.io (official) and hub.docker.com (legacy)
if DOCKER_HUB in self.registries:
return DOCKER_HUB
if DOCKER_HUB_LEGACY in self.registries:
return DOCKER_HUB_LEGACY
# If no registry prefix, check for Docker Hub credentials
elif DOCKER_HUB in self.registries:
return DOCKER_HUB
return None
@@ -308,13 +290,8 @@ class DockerAPI(CoreSysAttributes):
return self.docker.images
@property
def containers(self) -> DockerContainers:
def containers(self) -> ContainerCollection:
"""Return API containers."""
return self.docker.containers
@property
def containers_legacy(self) -> ContainerCollection:
"""Return API containers from Dockerpy."""
return self.dockerpy.containers
@property
@@ -347,137 +324,50 @@ class DockerAPI(CoreSysAttributes):
"""Stop docker events monitor."""
await self.monitor.unload()
def _create_container_config(
def run(
self,
image: str,
*,
tag: str = "latest",
dns: bool = True,
init: bool = False,
hostname: str | None = None,
detach: bool = True,
security_opt: list[str] | None = None,
restart_policy: dict[str, RestartPolicy] | None = None,
extra_hosts: dict[str, IPv4Address] | None = None,
environment: dict[str, str | None] | None = None,
mounts: list[DockerMount] | None = None,
ports: dict[str, str | int | None] | None = None,
oom_score_adj: int | None = None,
network_mode: Literal["host"] | None = None,
privileged: bool = False,
device_cgroup_rules: list[str] | None = None,
tmpfs: dict[str, str] | None = None,
entrypoint: list[str] | None = None,
cap_add: list[Capabilities] | None = None,
ulimits: list[Ulimit] | None = None,
cpu_rt_runtime: int | None = None,
stdin_open: bool = False,
pid_mode: str | None = None,
uts_mode: str | None = None,
) -> JSONObject:
"""Map kwargs to create container config.
ipv4: IPv4Address | None = None,
**kwargs: Any,
) -> Container:
"""Create a Docker container and run it.
This only covers the docker options we currently use. It is not intended
to be exhaustive as its dockerpy equivalent was. We'll add to it as we
make use of new feature.
Need run inside executor.
"""
# Set up host dependent config for container
host_config: dict[str, Any] = {
"NetworkMode": network_mode if network_mode else "default",
"Init": init,
"Privileged": privileged,
}
if security_opt:
host_config["SecurityOpt"] = security_opt
if restart_policy:
host_config["RestartPolicy"] = restart_policy
if extra_hosts:
host_config["ExtraHosts"] = [f"{k}:{v}" for k, v in extra_hosts.items()]
if mounts:
host_config["Mounts"] = [mount.to_dict() for mount in mounts]
if oom_score_adj is not None:
host_config["OomScoreAdj"] = oom_score_adj
if device_cgroup_rules:
host_config["DeviceCgroupRules"] = device_cgroup_rules
if tmpfs:
host_config["Tmpfs"] = tmpfs
if cap_add:
host_config["CapAdd"] = cap_add
if cpu_rt_runtime is not None:
host_config["CPURealtimeRuntime"] = cpu_rt_runtime
if pid_mode:
host_config["PidMode"] = pid_mode
if uts_mode:
host_config["UtsMode"] = uts_mode
if ulimits:
host_config["Ulimits"] = [limit.to_dict() for limit in ulimits]
name: str | None = kwargs.get("name")
network_mode: str | None = kwargs.get("network_mode")
hostname: str | None = kwargs.get("hostname")
# Full container config
config: dict[str, Any] = {
"Image": f"{image}:{tag}",
"Labels": {LABEL_MANAGED: ""},
"OpenStdin": stdin_open,
"StdinOnce": not detach and stdin_open,
"AttachStdin": not detach and stdin_open,
"AttachStdout": not detach,
"AttachStderr": not detach,
"HostConfig": host_config,
}
if hostname:
config["Hostname"] = hostname
if environment:
config["Env"] = [
env if val is None else f"{env}={val}"
for env, val in environment.items()
]
if entrypoint:
config["Entrypoint"] = entrypoint
if "labels" not in kwargs:
kwargs["labels"] = {}
elif isinstance(kwargs["labels"], list):
kwargs["labels"] = dict.fromkeys(kwargs["labels"], "")
# Set up networking
kwargs["labels"][LABEL_MANAGED] = ""
# Setup DNS
if dns:
host_config["Dns"] = [str(self.network.dns)]
host_config["DnsSearch"] = [DNS_SUFFIX]
kwargs["dns"] = [str(self.network.dns)]
kwargs["dns_search"] = [DNS_SUFFIX]
# CoreDNS forward plug-in fails in ~6s, then fallback triggers.
# However, the default timeout of glibc and musl is 5s. Increase
# default timeout to make sure CoreDNS fallback is working
# on first query.
host_config["DnsOptions"] = ["timeout:10"]
kwargs["dns_opt"] = ["timeout:10"]
if hostname:
config["Domainname"] = DNS_SUFFIX
kwargs["domainname"] = DNS_SUFFIX
# Setup ports
if ports:
port_bindings = {
port if "/" in port else f"{port}/tcp": [
{"HostIp": "", "HostPort": str(host_port) if host_port else ""}
]
for port, host_port in ports.items()
}
config["ExposedPorts"] = {port: {} for port in port_bindings}
host_config["PortBindings"] = port_bindings
return config
async def run(
self,
image: str,
*,
name: str,
tag: str = "latest",
hostname: str | None = None,
mounts: list[DockerMount] | None = None,
network_mode: Literal["host"] | None = None,
ipv4: IPv4Address | None = None,
**kwargs,
) -> dict[str, Any]:
"""Create a Docker container and run it."""
if not image or not name:
raise ValueError("image, name and tag cannot be an empty string!")
# Setup network
if not network_mode:
kwargs["network"] = None
# Setup cidfile and bind mount it
cidfile_path = self.coresys.config.path_cid_files / f"{name}.cid"
cidfile_path = None
if name:
cidfile_path = self.coresys.config.path_cid_files / f"{name}.cid"
def create_cidfile() -> None:
# Remove the file/directory if it exists e.g. as a leftover from unclean shutdown
# Note: Can be a directory if Docker auto-started container with restart policy
# before Supervisor could write the CID file
@@ -491,37 +381,31 @@ class DockerAPI(CoreSysAttributes):
# from creating it as a directory if container auto-starts
cidfile_path.touch()
await self.sys_run_in_executor(create_cidfile)
extern_cidfile_path = (
self.coresys.config.path_extern_cid_files / f"{name}.cid"
)
# Bind mount to /run/cid in container
extern_cidfile_path = self.coresys.config.path_extern_cid_files / f"{name}.cid"
cid_mount = DockerMount(
type=MountType.BIND,
source=extern_cidfile_path.as_posix(),
target="/run/cid",
read_only=True,
)
if mounts is None:
mounts = [cid_mount]
else:
mounts = [*mounts, cid_mount]
# Bind mount to /run/cid in container
if "volumes" not in kwargs:
kwargs["volumes"] = {}
kwargs["volumes"][str(extern_cidfile_path)] = {
"bind": "/run/cid",
"mode": "ro",
}
# Create container
config = self._create_container_config(
image,
tag=tag,
hostname=hostname,
mounts=mounts,
network_mode=network_mode,
**kwargs,
)
try:
container = await self.containers.create(config, name=name)
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
raise DockerNotFound(
f"Image {image}:{tag} does not exist for {name}", _LOGGER.error
) from err
container = self.containers.create(
f"{image}:{tag}", use_config_proxy=False, **kwargs
)
if cidfile_path:
with cidfile_path.open("w", encoding="ascii") as cidfile:
cidfile.write(str(container.id))
except docker_errors.NotFound as err:
raise DockerNotFound(
f"Image {image}:{tag} does not exist for {name}", _LOGGER.error
) from err
except docker_errors.DockerException as err:
raise DockerAPIError(
f"Can't create container from {name}: {err}", _LOGGER.error
) from err
@@ -530,61 +414,43 @@ class DockerAPI(CoreSysAttributes):
f"Dockerd connection issue for {name}: {err}", _LOGGER.error
) from err
# Setup network and store container id in cidfile
def setup_network_and_cidfile() -> None:
# Write cidfile
with cidfile_path.open("w", encoding="ascii") as cidfile:
cidfile.write(str(container.id))
# Attach network
if not network_mode:
alias = [hostname] if hostname else None
try:
self.network.attach_container(
container.id, name, alias=alias, ipv4=ipv4
)
except DockerError:
_LOGGER.warning("Can't attach %s to hassio-network!", name)
else:
with suppress(DockerError):
self.network.detach_default_bridge(container.id, name)
# Attach network
if not network_mode:
alias = [hostname] if hostname else None
try:
self.network.attach_container(container, alias=alias, ipv4=ipv4)
except DockerError:
_LOGGER.warning("Can't attach %s to hassio-network!", name)
else:
host_network: Network = self.dockerpy.networks.get(DOCKER_NETWORK_HOST)
with suppress(DockerError):
self.network.detach_default_bridge(container)
else:
host_network: Network = self.dockerpy.networks.get(DOCKER_NETWORK_HOST)
# Check if container is register on host
# https://github.com/moby/moby/issues/23302
if name and name in (
val.get("Name")
for val in host_network.attrs.get("Containers", {}).values()
):
with suppress(docker_errors.NotFound):
host_network.disconnect(name, force=True)
await self.sys_run_in_executor(setup_network_and_cidfile)
# Check if container is register on host
# https://github.com/moby/moby/issues/23302
if name and name in (
val.get("Name")
for val in host_network.attrs.get("Containers", {}).values()
):
with suppress(docker_errors.NotFound):
host_network.disconnect(name, force=True)
# Run container
try:
await container.start()
except aiodocker.DockerError as err:
container.start()
except docker_errors.DockerException as err:
raise DockerAPIError(f"Can't start {name}: {err}", _LOGGER.error) from err
except requests.RequestException as err:
raise DockerRequestError(
f"Dockerd connection issue for {name}: {err}", _LOGGER.error
) from err
# Get container metadata after the container is started
try:
container_attrs = await container.show()
except aiodocker.DockerError as err:
raise DockerAPIError(
f"Can't inspect started container {name}: {err}", _LOGGER.error
) from err
except requests.RequestException as err:
raise DockerRequestError(
f"Dockerd connection issue for {name}: {err}", _LOGGER.error
) from err
# Update metadata
with suppress(docker_errors.DockerException, requests.RequestException):
container.reload()
return container_attrs
return container
async def pull_image(
self,
@@ -601,10 +467,8 @@ class DockerAPI(CoreSysAttributes):
raises only if the get fails afterwards. Additionally it fires progress reports for the pull
on the bus so listeners can use that to update status for users.
"""
# Use timeout=None to disable timeout for pull operations, matching docker-py behavior.
# aiodocker converts None to ClientTimeout(total=None) which disables the timeout.
async for e in self.images.pull(
repository, tag=tag, platform=platform, auth=auth, stream=True, timeout=None
repository, tag=tag, platform=platform, auth=auth, stream=True
):
entry = PullLogEntry.from_pull_log_dict(job_id, e)
if entry.error:
@@ -737,24 +601,16 @@ class DockerAPI(CoreSysAttributes):
) -> bool:
"""Return True if docker container exists in good state and is built from expected image."""
try:
docker_container = await self.sys_run_in_executor(
self.containers_legacy.get, name
)
docker_container = await self.sys_run_in_executor(self.containers.get, name)
docker_image = await self.images.inspect(f"{image}:{version}")
except docker_errors.NotFound:
return False
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
return False
raise DockerError(
f"Could not get container {name} or image {image}:{version} to check state: {err!s}",
_LOGGER.error,
) from err
raise DockerError() from err
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get container {name} or image {image}:{version} to check state: {err!s}",
_LOGGER.error,
) from err
raise DockerError() from err
# Check the image is correct and state is good
return (
@@ -768,15 +624,11 @@ class DockerAPI(CoreSysAttributes):
) -> None:
"""Stop/remove Docker container."""
try:
docker_container: Container = self.containers_legacy.get(name)
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
# Generally suppressed so we don't log this
raise DockerNotFound() from None
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get container {name} for stopping: {err!s}",
_LOGGER.error,
) from err
raise DockerError() from err
if docker_container.status == "running":
_LOGGER.info("Stopping %s application", name)
@@ -795,7 +647,7 @@ class DockerAPI(CoreSysAttributes):
def start_container(self, name: str) -> None:
"""Start Docker container."""
try:
docker_container: Container = self.containers_legacy.get(name)
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"{name} not found for starting up", _LOGGER.error
@@ -814,15 +666,11 @@ class DockerAPI(CoreSysAttributes):
def restart_container(self, name: str, timeout: int) -> None:
"""Restart docker container."""
try:
container: Container = self.containers_legacy.get(name)
container: Container = self.containers.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"Container {name} not found for restarting", _LOGGER.warning
) from None
raise DockerNotFound() from None
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get container {name} for restarting: {err!s}", _LOGGER.error
) from err
raise DockerError() from err
_LOGGER.info("Restarting %s", name)
try:
@@ -833,15 +681,11 @@ class DockerAPI(CoreSysAttributes):
def container_logs(self, name: str, tail: int = 100) -> bytes:
"""Return Docker logs of container."""
try:
docker_container: Container = self.containers_legacy.get(name)
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"Container {name} not found for logs", _LOGGER.warning
) from None
raise DockerNotFound() from None
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get container {name} for logs: {err!s}", _LOGGER.error
) from err
raise DockerError() from err
try:
return docker_container.logs(tail=tail, stdout=True, stderr=True)
@@ -853,15 +697,11 @@ class DockerAPI(CoreSysAttributes):
def container_stats(self, name: str) -> dict[str, Any]:
"""Read and return stats from container."""
try:
docker_container: Container = self.containers_legacy.get(name)
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"Container {name} not found for stats", _LOGGER.warning
) from None
raise DockerNotFound() from None
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not inspect container '{name}': {err!s}", _LOGGER.error
) from err
raise DockerError() from err
# container is not running
if docker_container.status != "running":
@@ -878,23 +718,17 @@ class DockerAPI(CoreSysAttributes):
def container_run_inside(self, name: str, command: str) -> CommandReturn:
"""Execute a command inside Docker container."""
try:
docker_container: Container = self.containers_legacy.get(name)
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"Container {name} not found for running command", _LOGGER.warning
) from None
raise DockerNotFound() from None
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't get container {name} to run command: {err!s}"
) from err
raise DockerError() from err
# Execute
try:
code, output = docker_container.exec_run(command)
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't run command in container {name}: {err!s}"
) from err
raise DockerError() from err
return CommandReturn(code, output)
@@ -927,7 +761,7 @@ class DockerAPI(CoreSysAttributes):
"""Import a tar file as image."""
try:
with tar_file.open("rb") as read_tar:
resp: list[dict[str, Any]] = await self.images.import_image(read_tar)
resp: list[dict[str, Any]] = self.images.import_image(read_tar)
except (aiodocker.DockerError, OSError) as err:
raise DockerError(
f"Can't import image from tar: {err}", _LOGGER.error

View File

@@ -7,6 +7,7 @@ import logging
from typing import Self, cast
import docker
from docker.models.containers import Container
from docker.models.networks import Network
import requests
@@ -219,8 +220,7 @@ class DockerNetwork:
def attach_container(
self,
container_id: str,
name: str,
container: Container,
alias: list[str] | None = None,
ipv4: IPv4Address | None = None,
) -> None:
@@ -233,15 +233,15 @@ class DockerNetwork:
self.network.reload()
# Check stale Network
if name in (
if container.name and container.name in (
val.get("Name") for val in self.network.attrs.get("Containers", {}).values()
):
self.stale_cleanup(name)
self.stale_cleanup(container.name)
# Attach Network
try:
self.network.connect(
container_id, aliases=alias, ipv4_address=str(ipv4) if ipv4 else None
container, aliases=alias, ipv4_address=str(ipv4) if ipv4 else None
)
except (
docker.errors.NotFound,
@@ -250,7 +250,7 @@ class DockerNetwork:
requests.RequestException,
) as err:
raise DockerError(
f"Can't connect {name} to Supervisor network: {err}",
f"Can't connect {container.name} to Supervisor network: {err}",
_LOGGER.error,
) from err
@@ -274,20 +274,17 @@ class DockerNetwork:
) as err:
raise DockerError(f"Can't find {name}: {err}", _LOGGER.error) from err
if not (container_id := container.id):
raise DockerError(f"Received invalid metadata from docker for {name}")
if container.id not in self.containers:
self.attach_container(container, alias, ipv4)
if container_id not in self.containers:
self.attach_container(container_id, name, alias, ipv4)
def detach_default_bridge(self, container_id: str, name: str) -> None:
def detach_default_bridge(self, container: Container) -> None:
"""Detach default Docker bridge.
Need run inside executor.
"""
try:
default_network = self.docker.networks.get(DOCKER_NETWORK_DRIVER)
default_network.disconnect(container_id)
default_network.disconnect(container)
except docker.errors.NotFound:
pass
except (
@@ -296,7 +293,7 @@ class DockerNetwork:
requests.RequestException,
) as err:
raise DockerError(
f"Can't disconnect {name} from default network: {err}",
f"Can't disconnect {container.name} from default network: {err}",
_LOGGER.warning,
) from err

View File

@@ -0,0 +1,316 @@
"""Image pull progress tracking."""
from __future__ import annotations
from contextlib import suppress
from dataclasses import dataclass, field
from enum import Enum
import logging
from typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
from .manager import PullLogEntry
_LOGGER = logging.getLogger(__name__)
# Progress weight distribution: 70% downloading, 30% extraction
DOWNLOAD_WEIGHT = 70.0
EXTRACT_WEIGHT = 30.0
class LayerPullStatus(Enum):
"""Status values for pulling an image layer.
These are a subset of the statuses in a docker pull image log.
The order field allows comparing which stage is further along.
"""
PULLING_FS_LAYER = 1, "Pulling fs layer"
WAITING = 1, "Waiting"
RETRYING = 2, "Retrying" # Matches "Retrying in N seconds"
DOWNLOADING = 3, "Downloading"
VERIFYING_CHECKSUM = 4, "Verifying Checksum"
DOWNLOAD_COMPLETE = 5, "Download complete"
EXTRACTING = 6, "Extracting"
PULL_COMPLETE = 7, "Pull complete"
ALREADY_EXISTS = 7, "Already exists"
def __init__(self, order: int, status: str) -> None:
"""Set fields from values."""
self.order = order
self.status = status
def __eq__(self, value: object, /) -> bool:
"""Check equality, allow string comparisons on status."""
with suppress(AttributeError):
return self.status == cast(LayerPullStatus, value).status
return self.status == value
def __hash__(self) -> int:
"""Return hash based on status string."""
return hash(self.status)
def __lt__(self, other: object) -> bool:
"""Order instances by stage progression."""
with suppress(AttributeError):
return self.order < cast(LayerPullStatus, other).order
return False
@classmethod
def from_status(cls, status: str) -> LayerPullStatus | None:
"""Get enum from status string, or None if not recognized."""
# Handle "Retrying in N seconds" pattern
if status.startswith("Retrying in "):
return cls.RETRYING
for member in cls:
if member.status == status:
return member
return None
@dataclass
class LayerProgress:
"""Track progress of a single layer."""
layer_id: str
total_size: int = 0 # Size in bytes (from downloading, reused for extraction)
download_current: int = 0
extract_current: int = 0 # Extraction progress in bytes (overlay2 only)
download_complete: bool = False
extract_complete: bool = False
already_exists: bool = False # Layer was already locally available
def calculate_progress(self) -> float:
"""Calculate layer progress 0-100.
Progress is weighted: 70% download, 30% extraction.
For overlay2, we have byte-based extraction progress.
For containerd, extraction jumps from 70% to 100% on completion.
"""
if self.already_exists or self.extract_complete:
return 100.0
if self.download_complete:
# Check if we have extraction progress (overlay2)
if self.extract_current > 0 and self.total_size > 0:
extract_pct = min(1.0, self.extract_current / self.total_size)
return DOWNLOAD_WEIGHT + (extract_pct * EXTRACT_WEIGHT)
# No extraction progress yet - return 70%
return DOWNLOAD_WEIGHT
if self.total_size > 0:
download_pct = min(1.0, self.download_current / self.total_size)
return download_pct * DOWNLOAD_WEIGHT
return 0.0
@dataclass
class ImagePullProgress:
"""Track overall progress of pulling an image.
Uses count-based progress where each layer contributes equally regardless of size.
This avoids progress regression when large layers are discovered late due to
Docker's rate-limiting of concurrent downloads.
Progress is only reported after the first "Downloading" event, since Docker
sends "Already exists" and "Pulling fs layer" events before we know the full
layer count.
"""
layers: dict[str, LayerProgress] = field(default_factory=dict)
_last_reported_progress: float = field(default=0.0, repr=False)
_seen_downloading: bool = field(default=False, repr=False)
def get_or_create_layer(self, layer_id: str) -> LayerProgress:
"""Get existing layer or create new one."""
if layer_id not in self.layers:
self.layers[layer_id] = LayerProgress(layer_id=layer_id)
return self.layers[layer_id]
def process_event(self, entry: PullLogEntry) -> None:
"""Process a pull log event and update layer state."""
# Skip events without layer ID or status
if not entry.id or not entry.status:
return
# Skip metadata events that aren't layer-specific
# "Pulling from X" has id=tag but isn't a layer
if entry.status.startswith("Pulling from "):
return
# Parse status to enum (returns None for unrecognized statuses)
status = LayerPullStatus.from_status(entry.status)
if status is None:
return
layer = self.get_or_create_layer(entry.id)
# Handle "Already exists" - layer is locally available
if status is LayerPullStatus.ALREADY_EXISTS:
layer.already_exists = True
layer.download_complete = True
layer.extract_complete = True
return
# Handle "Pulling fs layer" / "Waiting" - layer is being tracked
if status in (LayerPullStatus.PULLING_FS_LAYER, LayerPullStatus.WAITING):
return
# Handle "Downloading" - update download progress
if status is LayerPullStatus.DOWNLOADING:
# Mark that we've seen downloading - now we know layer count is complete
self._seen_downloading = True
if (
entry.progress_detail
and entry.progress_detail.current is not None
and entry.progress_detail.total is not None
):
layer.download_current = entry.progress_detail.current
# Only set total_size if not already set or if this is larger
# (handles case where total changes during download)
layer.total_size = max(layer.total_size, entry.progress_detail.total)
return
# Handle "Verifying Checksum" - download is essentially complete
if status is LayerPullStatus.VERIFYING_CHECKSUM:
if layer.total_size > 0:
layer.download_current = layer.total_size
return
# Handle "Download complete" - download phase done
if status is LayerPullStatus.DOWNLOAD_COMPLETE:
layer.download_complete = True
if layer.total_size > 0:
layer.download_current = layer.total_size
elif layer.total_size == 0:
# Small layer that skipped downloading phase
# Set minimal size so it doesn't distort weighted average
layer.total_size = 1
layer.download_current = 1
return
# Handle "Extracting" - extraction in progress
if status is LayerPullStatus.EXTRACTING:
# For overlay2: progressDetail has {current, total} in bytes
# For containerd: progressDetail has {current, units: "s"} (time elapsed)
# We can only use byte-based progress (overlay2)
layer.download_complete = True
if layer.total_size > 0:
layer.download_current = layer.total_size
# Check if this is byte-based extraction progress (overlay2)
# Overlay2 has {current, total} in bytes, no units field
# Containerd has {current, units: "s"} which is useless for progress
if (
entry.progress_detail
and entry.progress_detail.current is not None
and entry.progress_detail.units is None
):
# Use layer's total_size from downloading phase (doesn't change)
layer.extract_current = entry.progress_detail.current
_LOGGER.debug(
"Layer %s extracting: %d/%d (%.1f%%)",
layer.layer_id,
layer.extract_current,
layer.total_size,
(layer.extract_current / layer.total_size * 100)
if layer.total_size > 0
else 0,
)
return
# Handle "Pull complete" - layer is fully done
if status is LayerPullStatus.PULL_COMPLETE:
layer.download_complete = True
layer.extract_complete = True
if layer.total_size > 0:
layer.download_current = layer.total_size
return
# Handle "Retrying in N seconds" - reset download progress
if status is LayerPullStatus.RETRYING:
layer.download_current = 0
layer.download_complete = False
return
def calculate_progress(self) -> float:
"""Calculate overall progress 0-100.
Uses count-based progress where each layer that needs pulling contributes
equally. Layers that already exist locally are excluded from the calculation.
Returns 0 until we've seen the first "Downloading" event, since Docker
reports "Already exists" and "Pulling fs layer" events before we know
the complete layer count.
"""
# Don't report progress until we've seen downloading start
# This ensures we know the full layer count before calculating progress
if not self._seen_downloading or not self.layers:
return 0.0
# Only count layers that need pulling (exclude already_exists)
layers_to_pull = [
layer for layer in self.layers.values() if not layer.already_exists
]
if not layers_to_pull:
# All layers already exist, nothing to download
return 100.0
# Each layer contributes equally: sum of layer progresses / total layers
total_progress = sum(layer.calculate_progress() for layer in layers_to_pull)
return total_progress / len(layers_to_pull)
def get_stage(self) -> str | None:
"""Get current stage based on layer states."""
if not self.layers:
return None
# Check if any layer is still downloading
for layer in self.layers.values():
if layer.already_exists:
continue
if not layer.download_complete:
return "Downloading"
# All downloads complete, check if extracting
for layer in self.layers.values():
if layer.already_exists:
continue
if not layer.extract_complete:
return "Extracting"
# All done
return "Pull complete"
def should_update_job(self, threshold: float = 1.0) -> tuple[bool, float]:
"""Check if job should be updated based on progress change.
Returns (should_update, current_progress).
Updates are triggered when progress changes by at least threshold%.
Progress is guaranteed to only increase (monotonic).
"""
current_progress = self.calculate_progress()
# Ensure monotonic progress - never report a decrease
# This can happen when new layers get size info and change the weighted average
if current_progress < self._last_reported_progress:
_LOGGER.debug(
"Progress decreased from %.1f%% to %.1f%%, keeping last reported",
self._last_reported_progress,
current_progress,
)
return False, self._last_reported_progress
if current_progress >= self._last_reported_progress + threshold:
_LOGGER.debug(
"Progress update: %.1f%% -> %.1f%% (delta: %.1f%%)",
self._last_reported_progress,
current_progress,
current_progress - self._last_reported_progress,
)
self._last_reported_progress = current_progress
return True, current_progress
return False, self._last_reported_progress

View File

@@ -54,7 +54,7 @@ class DockerSupervisor(DockerInterface):
"""Attach to running docker container."""
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers_legacy.get, self.name
self.sys_docker.containers.get, self.name
)
except (docker.errors.DockerException, requests.RequestException) as err:
raise DockerError() from err
@@ -74,8 +74,7 @@ class DockerSupervisor(DockerInterface):
_LOGGER.info("Connecting Supervisor to hassio-network")
await self.sys_run_in_executor(
self.sys_docker.network.attach_container,
docker_container.id,
self.name,
docker_container,
alias=["supervisor"],
ipv4=self.sys_docker.network.supervisor,
)
@@ -91,7 +90,7 @@ class DockerSupervisor(DockerInterface):
Need run inside executor.
"""
try:
docker_container = self.sys_docker.containers_legacy.get(self.name)
docker_container = self.sys_docker.containers.get(self.name)
except (docker.errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get Supervisor container for retag: {err}", _LOGGER.error
@@ -119,7 +118,7 @@ class DockerSupervisor(DockerInterface):
"""Update start tag to new version."""
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers_legacy.get, self.name
self.sys_docker.containers.get, self.name
)
docker_image = await self.sys_docker.images.inspect(f"{image}:{version!s}")
except (

View File

@@ -1,57 +0,0 @@
"""Docker utilities."""
from __future__ import annotations
import re
# Docker image reference domain regex
# Based on Docker's reference implementation:
# vendor/github.com/distribution/reference/normalize.go
#
# A domain is detected if the part before the first / contains:
# - "localhost" (with optional port)
# - Contains "." (like registry.example.com or 127.0.0.1)
# - Contains ":" (like myregistry:5000)
# - IPv6 addresses in brackets (like [::1]:5000)
#
# Note: Docker also treats uppercase letters as registry indicators since
# namespaces must be lowercase, but this regex handles lowercase matching
# and the get_registry_from_image() function validates the registry rules.
IMAGE_REGISTRY_REGEX = re.compile(
r"^(?P<registry>"
r"localhost(?::[0-9]+)?|" # localhost with optional port
r"(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])" # domain component
r"(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))*" # more components
r"(?::[0-9]+)?|" # optional port
r"\[[a-fA-F0-9:]+\](?::[0-9]+)?" # IPv6 with optional port
r")/" # must be followed by /
)
def get_registry_from_image(image_ref: str) -> str | None:
"""Extract registry from Docker image reference.
Returns the registry if the image reference contains one,
or None if the image uses Docker Hub (docker.io).
Based on Docker's reference implementation:
vendor/github.com/distribution/reference/normalize.go
Examples:
get_registry_from_image("nginx") -> None (docker.io)
get_registry_from_image("library/nginx") -> None (docker.io)
get_registry_from_image("myregistry.com/nginx") -> "myregistry.com"
get_registry_from_image("localhost/myimage") -> "localhost"
get_registry_from_image("localhost:5000/myimage") -> "localhost:5000"
get_registry_from_image("registry.io:5000/org/app:v1") -> "registry.io:5000"
get_registry_from_image("[::1]:5000/myimage") -> "[::1]:5000"
"""
match = IMAGE_REGISTRY_REGEX.match(image_ref)
if match:
registry = match.group("registry")
# Must contain '.' or ':' or be 'localhost' to be a real registry
# This prevents treating "myuser/myimage" as having registry "myuser"
if "." in registry or ":" in registry or registry == "localhost":
return registry
return None # No registry = Docker Hub (docker.io)

View File

@@ -1,25 +1,25 @@
"""Core Exceptions."""
from collections.abc import Callable, Mapping
from collections.abc import Callable
from typing import Any
MESSAGE_CHECK_SUPERVISOR_LOGS = (
"Check supervisor logs for details (check with '{logs_command}')"
)
EXTRA_FIELDS_LOGS_COMMAND = {"logs_command": "ha supervisor logs"}
class HassioError(Exception):
"""Root exception."""
error_key: str | None = None
message_template: str | None = None
extra_fields: dict[str, Any] | None = None
def __init__(
self, message: str | None = None, logger: Callable[..., None] | None = None
self,
message: str | None = None,
logger: Callable[..., None] | None = None,
*,
extra_fields: dict[str, Any] | None = None,
) -> None:
"""Raise & log."""
self.extra_fields = extra_fields or {}
if not message and self.message_template:
message = (
self.message_template.format(**self.extra_fields)
@@ -41,94 +41,6 @@ class HassioNotSupportedError(HassioError):
"""Function is not supported."""
# API
class APIError(HassioError, RuntimeError):
"""API errors."""
status = 400
headers: Mapping[str, str] | None = None
def __init__(
self,
message: str | None = None,
logger: Callable[..., None] | None = None,
*,
headers: Mapping[str, str] | None = None,
job_id: str | None = None,
) -> None:
"""Raise & log, optionally with job."""
super().__init__(message, logger)
self.headers = headers
self.job_id = job_id
class APIUnauthorized(APIError):
"""API unauthorized error."""
status = 401
class APIForbidden(APIError):
"""API forbidden error."""
status = 403
class APINotFound(APIError):
"""API not found error."""
status = 404
class APIGone(APIError):
"""API is no longer available."""
status = 410
class APITooManyRequests(APIError):
"""API too many requests error."""
status = 429
class APIInternalServerError(APIError):
"""API internal server error."""
status = 500
class APIAddonNotInstalled(APIError):
"""Not installed addon requested at addons API."""
class APIDBMigrationInProgress(APIError):
"""Service is unavailable due to an offline DB migration is in progress."""
status = 503
class APIUnknownSupervisorError(APIError):
"""Unknown error occurred within supervisor. Adds supervisor check logs rider to message template."""
status = 500
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
job_id: str | None = None,
) -> None:
"""Initialize exception."""
self.message_template = (
f"{self.message_template}. {MESSAGE_CHECK_SUPERVISOR_LOGS}"
)
self.extra_fields = (self.extra_fields or {}) | EXTRA_FIELDS_LOGS_COMMAND
super().__init__(None, logger, job_id=job_id)
# JobManager
@@ -210,13 +122,6 @@ class SupervisorAppArmorError(SupervisorError):
"""Supervisor AppArmor error."""
class SupervisorUnknownError(SupervisorError, APIUnknownSupervisorError):
"""Raise when an unknown error occurs interacting with Supervisor or its container."""
error_key = "supervisor_unknown_error"
message_template = "An unknown error occurred with Supervisor"
class SupervisorJobError(SupervisorError, JobException):
"""Raise on job errors."""
@@ -345,54 +250,6 @@ class AddonConfigurationError(AddonsError):
"""Error with add-on configuration."""
class AddonConfigurationInvalidError(AddonConfigurationError, APIError):
"""Raise if invalid configuration provided for addon."""
error_key = "addon_configuration_invalid_error"
message_template = "Add-on {addon} has invalid options: {validation_error}"
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
addon: str,
validation_error: str,
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon, "validation_error": validation_error}
super().__init__(None, logger)
class AddonBootConfigCannotChangeError(AddonsError, APIError):
"""Raise if user attempts to change addon boot config when it can't be changed."""
error_key = "addon_boot_config_cannot_change_error"
message_template = (
"Addon {addon} boot option is set to {boot_config} so it cannot be changed"
)
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str, boot_config: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon, "boot_config": boot_config}
super().__init__(None, logger)
class AddonNotRunningError(AddonsError, APIError):
"""Raise when an addon is not running."""
error_key = "addon_not_running_error"
message_template = "Add-on {addon} is not running"
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon}
super().__init__(None, logger)
class AddonNotSupportedError(HassioNotSupportedError):
"""Addon doesn't support a function."""
@@ -411,8 +268,11 @@ class AddonNotSupportedArchitectureError(AddonNotSupportedError):
architectures: list[str],
) -> None:
"""Initialize exception."""
self.extra_fields = {"slug": slug, "architectures": ", ".join(architectures)}
super().__init__(None, logger)
super().__init__(
None,
logger,
extra_fields={"slug": slug, "architectures": ", ".join(architectures)},
)
class AddonNotSupportedMachineTypeError(AddonNotSupportedError):
@@ -429,8 +289,11 @@ class AddonNotSupportedMachineTypeError(AddonNotSupportedError):
machine_types: list[str],
) -> None:
"""Initialize exception."""
self.extra_fields = {"slug": slug, "machine_types": ", ".join(machine_types)}
super().__init__(None, logger)
super().__init__(
None,
logger,
extra_fields={"slug": slug, "machine_types": ", ".join(machine_types)},
)
class AddonNotSupportedHomeAssistantVersionError(AddonNotSupportedError):
@@ -447,96 +310,11 @@ class AddonNotSupportedHomeAssistantVersionError(AddonNotSupportedError):
version: str,
) -> None:
"""Initialize exception."""
self.extra_fields = {"slug": slug, "version": version}
super().__init__(None, logger)
class AddonNotSupportedWriteStdinError(AddonNotSupportedError, APIError):
"""Addon does not support writing to stdin."""
error_key = "addon_not_supported_write_stdin_error"
message_template = "Add-on {addon} does not support writing to stdin"
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon}
super().__init__(None, logger)
class AddonBuildDockerfileMissingError(AddonNotSupportedError, APIError):
"""Raise when addon build invalid because dockerfile is missing."""
error_key = "addon_build_dockerfile_missing_error"
message_template = (
"Cannot build addon '{addon}' because dockerfile is missing. A repair "
"using '{repair_command}' will fix this if the cause is data "
"corruption. Otherwise please report this to the addon developer."
)
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon, "repair_command": "ha supervisor repair"}
super().__init__(None, logger)
class AddonBuildArchitectureNotSupportedError(AddonNotSupportedError, APIError):
"""Raise when addon cannot be built on system because it doesn't support its architecture."""
error_key = "addon_build_architecture_not_supported_error"
message_template = (
"Cannot build addon '{addon}' because its supported architectures "
"({addon_arches}) do not match the system supported architectures ({system_arches})"
)
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
addon: str,
addon_arch_list: list[str],
system_arch_list: list[str],
) -> None:
"""Initialize exception."""
self.extra_fields = {
"addon": addon,
"addon_arches": ", ".join(addon_arch_list),
"system_arches": ", ".join(system_arch_list),
}
super().__init__(None, logger)
class AddonUnknownError(AddonsError, APIUnknownSupervisorError):
"""Raise when unknown error occurs taking an action for an addon."""
error_key = "addon_unknown_error"
message_template = "An unknown error occurred with addon {addon}"
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon}
super().__init__(logger)
class AddonBuildFailedUnknownError(AddonsError, APIUnknownSupervisorError):
"""Raise when the build failed for an addon due to an unknown error."""
error_key = "addon_build_failed_unknown_error"
message_template = (
"An unknown error occurred while trying to build the image for addon {addon}"
)
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon}
super().__init__(logger)
super().__init__(
None,
logger,
extra_fields={"slug": slug, "version": version},
)
class AddonsJobError(AddonsError, JobException):
@@ -568,64 +346,13 @@ class AuthError(HassioError):
"""Auth errors."""
class AuthPasswordResetError(AuthError, APIError):
class AuthPasswordResetError(HassioError):
"""Auth error if password reset failed."""
error_key = "auth_password_reset_error"
message_template = "Username '{user}' does not exist. Check list of users using '{auth_list_command}'."
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
user: str,
) -> None:
"""Initialize exception."""
self.extra_fields = {"user": user, "auth_list_command": "ha auth list"}
super().__init__(None, logger)
class AuthListUsersError(AuthError, APIUnknownSupervisorError):
class AuthListUsersError(HassioError):
"""Auth error if listing users failed."""
error_key = "auth_list_users_error"
message_template = "Can't request listing users on Home Assistant"
class AuthListUsersNoneResponseError(AuthError, APIInternalServerError):
"""Auth error if listing users returned invalid None response."""
error_key = "auth_list_users_none_response_error"
message_template = "Home Assistant returned invalid response of `{none}` instead of a list of users. Check Home Assistant logs for details (check with `{logs_command}`)"
extra_fields = {"none": "None", "logs_command": "ha core logs"}
def __init__(self, logger: Callable[..., None] | None = None) -> None:
"""Initialize exception."""
super().__init__(None, logger)
class AuthInvalidNonStringValueError(AuthError, APIUnauthorized):
"""Auth error if something besides a string provided as username or password."""
error_key = "auth_invalid_non_string_value_error"
message_template = "Username and password must be strings"
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
headers: Mapping[str, str] | None = None,
) -> None:
"""Initialize exception."""
super().__init__(None, logger, headers=headers)
class AuthHomeAssistantAPIValidationError(AuthError, APIUnknownSupervisorError):
"""Error encountered trying to validate auth details via Home Assistant API."""
error_key = "auth_home_assistant_api_validation_error"
message_template = "Unable to validate authentication details with Home Assistant"
# Host
@@ -658,6 +385,60 @@ class HostLogError(HostError):
"""Internal error with host log."""
# API
class APIError(HassioError, RuntimeError):
"""API errors."""
status = 400
def __init__(
self,
message: str | None = None,
logger: Callable[..., None] | None = None,
*,
job_id: str | None = None,
error: HassioError | None = None,
) -> None:
"""Raise & log, optionally with job."""
# Allow these to be set from another error here since APIErrors essentially wrap others to add a status
self.error_key = error.error_key if error else None
self.message_template = error.message_template if error else None
super().__init__(
message, logger, extra_fields=error.extra_fields if error else None
)
self.job_id = job_id
class APIForbidden(APIError):
"""API forbidden error."""
status = 403
class APINotFound(APIError):
"""API not found error."""
status = 404
class APIGone(APIError):
"""API is no longer available."""
status = 410
class APIAddonNotInstalled(APIError):
"""Not installed addon requested at addons API."""
class APIDBMigrationInProgress(APIError):
"""Service is unavailable due to an offline DB migration is in progress."""
status = 503
# Service / Discovery
@@ -835,10 +616,6 @@ class DockerError(HassioError):
"""Docker API/Transport errors."""
class DockerBuildError(DockerError):
"""Docker error during build."""
class DockerAPIError(DockerError):
"""Docker API error."""
@@ -855,10 +632,6 @@ class DockerNotFound(DockerError):
"""Docker object don't Exists."""
class DockerLogOutOfOrder(DockerError):
"""Raise when log from docker action was out of order."""
class DockerNoSpaceOnDevice(DockerError):
"""Raise if a docker pull fails due to available space."""
@@ -870,7 +643,7 @@ class DockerNoSpaceOnDevice(DockerError):
super().__init__(None, logger=logger)
class DockerHubRateLimitExceeded(DockerError, APITooManyRequests):
class DockerHubRateLimitExceeded(DockerError):
"""Raise for docker hub rate limit exceeded error."""
error_key = "dockerhub_rate_limit_exceeded"
@@ -878,13 +651,16 @@ class DockerHubRateLimitExceeded(DockerError, APITooManyRequests):
"Your IP address has made too many requests to Docker Hub which activated a rate limit. "
"For more details see {dockerhub_rate_limit_url}"
)
extra_fields = {
"dockerhub_rate_limit_url": "https://www.home-assistant.io/more-info/dockerhub-rate-limit"
}
def __init__(self, logger: Callable[..., None] | None = None) -> None:
"""Raise & log."""
super().__init__(None, logger=logger)
super().__init__(
None,
logger=logger,
extra_fields={
"dockerhub_rate_limit_url": "https://www.home-assistant.io/more-info/dockerhub-rate-limit"
},
)
class DockerJobError(DockerError, JobException):
@@ -955,32 +731,6 @@ class StoreNotFound(StoreError):
"""Raise if slug is not known."""
class StoreAddonNotFoundError(StoreError, APINotFound):
"""Raise if a requested addon is not in the store."""
error_key = "store_addon_not_found_error"
message_template = "Addon {addon} does not exist in the store"
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon}
super().__init__(None, logger)
class StoreRepositoryLocalCannotReset(StoreError, APIError):
"""Raise if user requests a reset on the local addon repository."""
error_key = "store_repository_local_cannot_reset"
message_template = "Can't reset repository {local_repo} as it is not git based!"
extra_fields = {"local_repo": "local"}
def __init__(self, logger: Callable[..., None] | None = None) -> None:
"""Initialize exception."""
super().__init__(None, logger)
class StoreJobError(StoreError, JobException):
"""Raise on job error with git."""
@@ -989,18 +739,6 @@ class StoreInvalidAddonRepo(StoreError):
"""Raise on invalid addon repo."""
class StoreRepositoryUnknownError(StoreError, APIUnknownSupervisorError):
"""Raise when unknown error occurs taking an action for a store repository."""
error_key = "store_repository_unknown_error"
message_template = "An unknown error occurred with addon repository {repo}"
def __init__(self, logger: Callable[..., None] | None = None, *, repo: str) -> None:
"""Initialize exception."""
self.extra_fields = {"repo": repo}
super().__init__(logger)
# Backup
@@ -1028,7 +766,7 @@ class BackupJobError(BackupError, JobException):
"""Raise on Backup job error."""
class BackupFileNotFoundError(BackupError, APINotFound):
class BackupFileNotFoundError(BackupError):
"""Raise if the backup file hasn't been found."""
@@ -1040,55 +778,6 @@ class BackupFileExistError(BackupError):
"""Raise if the backup file already exists."""
class AddonBackupMetadataInvalidError(BackupError, APIError):
"""Raise if invalid metadata file provided for addon in backup."""
error_key = "addon_backup_metadata_invalid_error"
message_template = (
"Metadata file for add-on {addon} in backup is invalid: {validation_error}"
)
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
addon: str,
validation_error: str,
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon, "validation_error": validation_error}
super().__init__(None, logger)
class AddonPrePostBackupCommandReturnedError(BackupError, APIError):
"""Raise when addon's pre/post backup command returns an error."""
error_key = "addon_pre_post_backup_command_returned_error"
message_template = (
"Pre-/Post backup command for add-on {addon} returned error code: "
"{exit_code}. Please report this to the addon developer. Enable debug "
"logging to capture complete command output using {debug_logging_command}"
)
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str, exit_code: int
) -> None:
"""Initialize exception."""
self.extra_fields = {
"addon": addon,
"exit_code": exit_code,
"debug_logging_command": "ha supervisor options --logging debug",
}
super().__init__(None, logger)
class BackupRestoreUnknownError(BackupError, APIUnknownSupervisorError):
"""Raise when an unknown error occurs during backup or restore."""
error_key = "backup_restore_unknown_error"
message_template = "An unknown error occurred during backup/restore"
# Security

View File

@@ -6,6 +6,8 @@ from pathlib import Path
import shutil
from typing import Any
from supervisor.resolution.const import UnhealthyReason
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import (
DBusError,
@@ -13,7 +15,6 @@ from ..exceptions import (
DBusObjectError,
HardwareNotFound,
)
from ..resolution.const import UnhealthyReason
from .const import UdevSubsystem
from .data import Device

View File

@@ -48,7 +48,7 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
SECONDS_BETWEEN_API_CHECKS: Final[int] = 5
# Core Stage 1 and some wiggle room
STARTUP_API_RESPONSE_TIMEOUT: Final[timedelta] = timedelta(minutes=10)
STARTUP_API_RESPONSE_TIMEOUT: Final[timedelta] = timedelta(minutes=3)
# All stages plus event start timeout and some wiggle rooom
STARTUP_API_CHECK_RUNNING_TIMEOUT: Final[timedelta] = timedelta(minutes=15)
# While database migration is running, the timeout will be extended

View File

@@ -23,7 +23,6 @@ from ..const import (
ATTR_AUDIO_OUTPUT,
ATTR_BACKUPS_EXCLUDE_DATABASE,
ATTR_BOOT,
ATTR_DUPLICATE_LOG_FILE,
ATTR_IMAGE,
ATTR_MESSAGE,
ATTR_PORT,
@@ -300,16 +299,6 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
"""Set whether backups should exclude database by default."""
self._data[ATTR_BACKUPS_EXCLUDE_DATABASE] = value
@property
def duplicate_log_file(self) -> bool:
"""Return True if Home Assistant should duplicate logs to file."""
return self._data[ATTR_DUPLICATE_LOG_FILE]
@duplicate_log_file.setter
def duplicate_log_file(self, value: bool) -> None:
"""Set whether Home Assistant should duplicate logs to file."""
self._data[ATTR_DUPLICATE_LOG_FILE] = value
async def load(self) -> None:
"""Prepare Home Assistant object."""
await asyncio.wait(
@@ -508,7 +497,7 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
tmp_dir=self.sys_config.path_tmp,
)
else:
remove_folder(self.sys_config.path_homeassistant, content_only=True)
remove_folder(self.sys_config.path_homeassistant)
try:
shutil.copytree(

View File

@@ -10,7 +10,6 @@ from ..const import (
ATTR_AUDIO_OUTPUT,
ATTR_BACKUPS_EXCLUDE_DATABASE,
ATTR_BOOT,
ATTR_DUPLICATE_LOG_FILE,
ATTR_IMAGE,
ATTR_PORT,
ATTR_REFRESH_TOKEN,
@@ -37,7 +36,6 @@ SCHEMA_HASS_CONFIG = vol.Schema(
vol.Optional(ATTR_AUDIO_OUTPUT, default=None): vol.Maybe(str),
vol.Optional(ATTR_AUDIO_INPUT, default=None): vol.Maybe(str),
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE, default=False): vol.Boolean(),
vol.Optional(ATTR_DUPLICATE_LOG_FILE, default=False): vol.Boolean(),
vol.Optional(ATTR_OVERRIDE_IMAGE, default=False): vol.Boolean(),
},
extra=vol.REMOVE_EXTRA,

View File

@@ -6,8 +6,8 @@ import logging
import socket
from ..dbus.const import (
ConnectionState,
ConnectionStateFlags,
ConnectionStateType,
DeviceType,
InterfaceAddrGenMode as NMInterfaceAddrGenMode,
InterfaceIp6Privacy as NMInterfaceIp6Privacy,
@@ -267,47 +267,25 @@ class Interface:
return InterfaceMethod.DISABLED
@staticmethod
def _map_nm_addr_gen_mode(addr_gen_mode: int | None) -> InterfaceAddrGenMode:
"""Map IPv6 interface addr_gen_mode.
NetworkManager omits the addr_gen_mode property when set to DEFAULT, so we
treat None as DEFAULT here.
"""
def _map_nm_addr_gen_mode(addr_gen_mode: int) -> InterfaceAddrGenMode:
"""Map IPv6 interface addr_gen_mode."""
mapping = {
NMInterfaceAddrGenMode.EUI64.value: InterfaceAddrGenMode.EUI64,
NMInterfaceAddrGenMode.STABLE_PRIVACY.value: InterfaceAddrGenMode.STABLE_PRIVACY,
NMInterfaceAddrGenMode.DEFAULT_OR_EUI64.value: InterfaceAddrGenMode.DEFAULT_OR_EUI64,
NMInterfaceAddrGenMode.DEFAULT.value: InterfaceAddrGenMode.DEFAULT,
None: InterfaceAddrGenMode.DEFAULT,
}
if addr_gen_mode not in mapping:
_LOGGER.warning(
"Unknown addr_gen_mode value from NetworkManager: %s", addr_gen_mode
)
return mapping.get(addr_gen_mode, InterfaceAddrGenMode.DEFAULT)
@staticmethod
def _map_nm_ip6_privacy(ip6_privacy: int | None) -> InterfaceIp6Privacy:
"""Map IPv6 interface ip6_privacy.
NetworkManager omits the ip6_privacy property when set to DEFAULT, so we
treat None as DEFAULT here.
"""
def _map_nm_ip6_privacy(ip6_privacy: int) -> InterfaceIp6Privacy:
"""Map IPv6 interface ip6_privacy."""
mapping = {
NMInterfaceIp6Privacy.DISABLED.value: InterfaceIp6Privacy.DISABLED,
NMInterfaceIp6Privacy.ENABLED_PREFER_PUBLIC.value: InterfaceIp6Privacy.ENABLED_PREFER_PUBLIC,
NMInterfaceIp6Privacy.ENABLED.value: InterfaceIp6Privacy.ENABLED,
NMInterfaceIp6Privacy.DEFAULT.value: InterfaceIp6Privacy.DEFAULT,
None: InterfaceIp6Privacy.DEFAULT,
}
if ip6_privacy not in mapping:
_LOGGER.warning(
"Unknown ip6_privacy value from NetworkManager: %s", ip6_privacy
)
return mapping.get(ip6_privacy, InterfaceIp6Privacy.DEFAULT)
@staticmethod
@@ -317,8 +295,8 @@ class Interface:
return False
return connection.state in (
ConnectionState.ACTIVATED,
ConnectionState.ACTIVATING,
ConnectionStateType.ACTIVATED,
ConnectionStateType.ACTIVATING,
)
@staticmethod

View File

@@ -5,6 +5,8 @@ from contextlib import suppress
import logging
from typing import Any
from supervisor.utils.sentry import async_capture_exception
from ..const import ATTR_HOST_INTERNET
from ..coresys import CoreSys, CoreSysAttributes
from ..dbus.const import (
@@ -14,7 +16,7 @@ from ..dbus.const import (
DBUS_IFACE_DNS,
DBUS_IFACE_NM,
DBUS_SIGNAL_NM_CONNECTION_ACTIVE_CHANGED,
ConnectionState,
ConnectionStateType,
ConnectivityState,
DeviceType,
WirelessMethodType,
@@ -32,7 +34,6 @@ from ..exceptions import (
from ..jobs.const import JobCondition
from ..jobs.decorator import Job
from ..resolution.checks.network_interface_ipv4 import CheckNetworkInterfaceIPV4
from ..utils.sentry import async_capture_exception
from .configuration import AccessPoint, Interface
from .const import InterfaceMethod, WifiMode
@@ -337,16 +338,16 @@ class NetworkManager(CoreSysAttributes):
# the state change before this point. Get the state currently to
# avoid any race condition.
await con.update()
state: ConnectionState = con.state
state: ConnectionStateType = con.state
while state != ConnectionState.ACTIVATED:
if state == ConnectionState.DEACTIVATED:
while state != ConnectionStateType.ACTIVATED:
if state == ConnectionStateType.DEACTIVATED:
raise HostNetworkError(
"Activating connection failed, check connection settings."
)
msg = await signal.wait_for_signal()
state = ConnectionState(msg[0])
state = msg[0]
_LOGGER.debug("Active connection state changed to %s", state)
# update_only means not done by user so don't force a check afterwards

View File

@@ -102,17 +102,13 @@ class SupervisorJobError:
"Unknown error, see Supervisor logs (check with 'ha supervisor logs')"
)
stage: str | None = None
error_key: str | None = None
extra_fields: dict[str, Any] | None = None
def as_dict(self) -> dict[str, Any]:
def as_dict(self) -> dict[str, str | None]:
"""Return dictionary representation."""
return {
"type": self.type_.__name__,
"message": self.message,
"stage": self.stage,
"error_key": self.error_key,
"extra_fields": self.extra_fields,
}
@@ -162,9 +158,7 @@ class SupervisorJob:
def capture_error(self, err: HassioError | None = None) -> None:
"""Capture an error or record that an unknown error has occurred."""
if err:
new_error = SupervisorJobError(
type(err), str(err), self.stage, err.error_key, err.extra_fields
)
new_error = SupervisorJobError(type(err), str(err), self.stage)
else:
new_error = SupervisorJobError(stage=self.stage)
self.errors += [new_error]

View File

@@ -2,7 +2,7 @@
from ...const import CoreState
from ...coresys import CoreSys
from ...dbus.const import ConnectionState, ConnectionStateFlags
from ...dbus.const import ConnectionStateFlags, ConnectionStateType
from ...dbus.network.interface import NetworkInterface
from ...exceptions import NetworkInterfaceNotFound
from ..const import ContextType, IssueType
@@ -47,7 +47,7 @@ class CheckNetworkInterfaceIPV4(CheckBase):
return not (
interface.connection.state
in [ConnectionState.ACTIVATED, ConnectionState.ACTIVATING]
in [ConnectionStateType.ACTIVATED, ConnectionStateType.ACTIVATING]
and ConnectionStateFlags.IP4_READY in interface.connection.state_flags
)

View File

@@ -5,9 +5,10 @@ import logging
from docker.errors import DockerException
from requests import RequestException
from supervisor.docker.const import ADDON_BUILDER_IMAGE
from ...const import CoreState
from ...coresys import CoreSys
from ...docker.const import ADDON_BUILDER_IMAGE
from ..const import (
ContextType,
IssueType,
@@ -73,9 +74,7 @@ class EvaluateContainer(EvaluateBase):
self._images.clear()
try:
containers = await self.sys_run_in_executor(
self.sys_docker.containers_legacy.list
)
containers = await self.sys_run_in_executor(self.sys_docker.containers.list)
except (DockerException, RequestException) as err:
_LOGGER.error("Corrupt docker overlayfs detect: %s", err)
self.sys_resolution.create_issue(

View File

@@ -1,9 +1,10 @@
"""Evaluation class for restart policy."""
from supervisor.docker.const import RestartPolicy
from supervisor.docker.interface import DockerInterface
from ...const import CoreState
from ...coresys import CoreSys
from ...docker.const import RestartPolicy
from ...docker.interface import DockerInterface
from ..const import UnsupportedReason
from .base import EvaluateBase

View File

@@ -2,9 +2,10 @@
import logging
from supervisor.exceptions import BackupFileNotFoundError
from ...backups.const import BackupType
from ...coresys import CoreSys
from ...exceptions import BackupFileNotFoundError
from ..const import MINIMUM_FULL_BACKUPS, ContextType, IssueType, SuggestionType
from .base import FixupBase

View File

@@ -183,22 +183,19 @@ class GitRepo(CoreSysAttributes):
raise StoreGitError() from err
try:
repo = self.repo
branch = self.repo.active_branch.name
def _fetch_and_check() -> tuple[str, bool]:
"""Fetch from origin and check if changed."""
# This property access is I/O bound
branch = repo.active_branch.name
repo.remotes.origin.fetch(
**{"update-shallow": True, "depth": 1} # type: ignore[arg-type]
# Download data
await self.sys_run_in_executor(
ft.partial(
self.repo.remotes.origin.fetch,
**{"update-shallow": True, "depth": 1}, # type: ignore
)
changed = repo.commit(branch) != repo.commit(f"origin/{branch}")
return branch, changed
)
# Download data and check for changes
branch, changed = await self.sys_run_in_executor(_fetch_and_check)
if changed:
if changed := self.repo.commit(branch) != self.repo.commit(
f"origin/{branch}"
):
# Jump on top of that
await self.sys_run_in_executor(
ft.partial(self.repo.git.reset, f"origin/{branch}", hard=True)
@@ -227,7 +224,6 @@ class GitRepo(CoreSysAttributes):
git.CommandError,
ValueError,
AssertionError,
AttributeError,
UnicodeDecodeError,
) as err:
_LOGGER.error("Can't update %s repo: %s.", self.url, err)

View File

@@ -8,6 +8,8 @@ from pathlib import Path
import voluptuous as vol
from supervisor.utils import get_latest_mtime
from ..const import (
ATTR_MAINTAINER,
ATTR_NAME,
@@ -17,14 +19,7 @@ from ..const import (
REPOSITORY_LOCAL,
)
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import (
ConfigurationFileError,
StoreError,
StoreGitError,
StoreRepositoryLocalCannotReset,
StoreRepositoryUnknownError,
)
from ..utils import get_latest_mtime
from ..exceptions import ConfigurationFileError, StoreError
from ..utils.common import read_json_or_yaml_file
from .const import BuiltinRepository
from .git import GitRepo
@@ -203,12 +198,8 @@ class RepositoryGit(Repository, ABC):
async def reset(self) -> None:
"""Reset add-on repository to fix corruption issue with files."""
try:
await self._git.reset()
await self.load()
except StoreGitError as err:
_LOGGER.error("Can't reset repository %s: %s", self.slug, err)
raise StoreRepositoryUnknownError(repo=self.slug) from err
await self._git.reset()
await self.load()
class RepositoryLocal(RepositoryBuiltin):
@@ -247,7 +238,9 @@ class RepositoryLocal(RepositoryBuiltin):
async def reset(self) -> None:
"""Raise. Not supported for local repository."""
raise StoreRepositoryLocalCannotReset(_LOGGER.error)
raise StoreError(
"Can't reset local repository as it is not git based!", _LOGGER.error
)
class RepositoryGitBuiltin(RepositoryBuiltin, RepositoryGit):

View File

@@ -13,6 +13,8 @@ import aiohttp
from aiohttp.client_exceptions import ClientError
from awesomeversion import AwesomeVersion, AwesomeVersionException
from supervisor.jobs import ChildJobSyncFilter
from .const import (
ATTR_SUPERVISOR_INTERNET,
SUPERVISOR_VERSION,
@@ -26,11 +28,10 @@ from .exceptions import (
DockerError,
HostAppArmorError,
SupervisorAppArmorError,
SupervisorError,
SupervisorJobError,
SupervisorUnknownError,
SupervisorUpdateError,
)
from .jobs import ChildJobSyncFilter
from .jobs.const import JobCondition, JobThrottle
from .jobs.decorator import Job
from .resolution.const import ContextType, IssueType, UnhealthyReason
@@ -260,7 +261,7 @@ class Supervisor(CoreSysAttributes):
try:
return await self.instance.stats()
except DockerError as err:
raise SupervisorUnknownError() from err
raise SupervisorError() from err
async def repair(self):
"""Repair local Supervisor data."""

View File

@@ -8,6 +8,8 @@ import logging
import aiohttp
from awesomeversion import AwesomeVersion
from supervisor.jobs.const import JobConcurrency, JobThrottle
from .bus import EventListener
from .const import (
ATTR_AUDIO,
@@ -30,7 +32,6 @@ from .const import (
)
from .coresys import CoreSys, CoreSysAttributes
from .exceptions import UpdaterError, UpdaterJobError
from .jobs.const import JobConcurrency, JobThrottle
from .jobs.decorator import Job, JobCondition
from .utils.common import FileConfiguration
from .validate import SCHEMA_UPDATER_CONFIG

View File

@@ -9,8 +9,8 @@ import re
from aiohttp import ClientResponse
from ..exceptions import MalformedBinaryEntryError
from ..host.const import LogFormatter
from supervisor.exceptions import MalformedBinaryEntryError
from supervisor.host.const import LogFormatter
_RE_ANSI_CSI_COLORS_PATTERN = re.compile(r"\x1B\[[0-9;]*m")

View File

@@ -5,7 +5,6 @@ from datetime import timedelta
import errno
from http import HTTPStatus
from pathlib import Path
from typing import Any
from unittest.mock import MagicMock, PropertyMock, call, patch
import aiodocker
@@ -24,13 +23,7 @@ from supervisor.docker.addon import DockerAddon
from supervisor.docker.const import ContainerState
from supervisor.docker.manager import CommandReturn, DockerAPI
from supervisor.docker.monitor import DockerContainerStateEvent
from supervisor.exceptions import (
AddonPrePostBackupCommandReturnedError,
AddonsJobError,
AddonUnknownError,
AudioUpdateError,
HassioError,
)
from supervisor.exceptions import AddonsError, AddonsJobError, AudioUpdateError
from supervisor.hardware.helper import HwHelper
from supervisor.ingress import Ingress
from supervisor.store.repository import Repository
@@ -227,7 +220,7 @@ async def test_listener_attached_on_install(
container_collection.get.side_effect = DockerException()
with (
patch(
"supervisor.docker.manager.DockerAPI.containers_legacy",
"supervisor.docker.manager.DockerAPI.containers",
new=PropertyMock(return_value=container_collection),
),
patch("pathlib.Path.is_dir", return_value=True),
@@ -509,26 +502,31 @@ async def test_backup_with_pre_post_command(
@pytest.mark.parametrize(
("container_get_side_effect", "exec_run_side_effect", "exc_type_raised"),
"get_error,exception_on_exec",
[
(NotFound("missing"), [(1, None)], AddonUnknownError),
(DockerException(), [(1, None)], AddonUnknownError),
(None, DockerException(), AddonUnknownError),
(None, [(1, None)], AddonPrePostBackupCommandReturnedError),
(NotFound("missing"), False),
(DockerException(), False),
(None, True),
(None, False),
],
)
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_backup_with_pre_command_error(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container_get_side_effect: DockerException | None,
exec_run_side_effect: DockerException | list[tuple[int, Any]],
exc_type_raised: type[HassioError],
get_error: DockerException | None,
exception_on_exec: bool,
tmp_supervisor_data,
path_extern,
) -> None:
"""Test backing up an addon with error running pre command."""
coresys.docker.containers_legacy.get.side_effect = container_get_side_effect
container.exec_run.side_effect = exec_run_side_effect
if get_error:
coresys.docker.containers.get.side_effect = get_error
if exception_on_exec:
container.exec_run.side_effect = DockerException()
else:
container.exec_run.return_value = (1, None)
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
@@ -537,7 +535,7 @@ async def test_backup_with_pre_command_error(
with (
patch.object(DockerAddon, "is_running", return_value=True),
patch.object(Addon, "backup_pre", new=PropertyMock(return_value="backup_pre")),
pytest.raises(exc_type_raised),
pytest.raises(AddonsError),
):
assert await install_addon_ssh.backup(tarfile) is None
@@ -949,7 +947,7 @@ async def test_addon_load_succeeds_with_docker_errors(
)
caplog.clear()
await install_addon_ssh.load()
assert "Cannot build addon 'local_ssh' because dockerfile is missing" in caplog.text
assert "Invalid build environment" in caplog.text
# Image build failure
caplog.clear()

View File

@@ -6,13 +6,11 @@ from pathlib import Path
from unittest.mock import PropertyMock, patch
from awesomeversion import AwesomeVersion
import pytest
from supervisor.addons.addon import Addon
from supervisor.addons.build import AddonBuild
from supervisor.coresys import CoreSys
from supervisor.docker.const import DOCKER_HUB
from supervisor.exceptions import AddonBuildDockerfileMissingError
from tests.common import is_in_list
@@ -108,11 +106,11 @@ async def test_build_valid(coresys: CoreSys, install_addon_ssh: Addon):
type(coresys.arch), "default", new=PropertyMock(return_value="aarch64")
),
):
assert (await build.is_valid()) is None
assert await build.is_valid()
async def test_build_invalid(coresys: CoreSys, install_addon_ssh: Addon):
"""Test build not supported because Dockerfile missing for specified architecture."""
"""Test platform set in docker args."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
with (
patch.object(
@@ -121,9 +119,8 @@ async def test_build_invalid(coresys: CoreSys, install_addon_ssh: Addon):
patch.object(
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
),
pytest.raises(AddonBuildDockerfileMissingError),
):
await build.is_valid()
assert not await build.is_valid()
async def test_docker_config_no_registries(coresys: CoreSys, install_addon_ssh: Addon):

View File

@@ -5,7 +5,6 @@ from unittest.mock import MagicMock, PropertyMock, patch
from aiohttp import ClientResponse
from aiohttp.test_utils import TestClient
from docker.errors import DockerException
import pytest
from supervisor.addons.addon import Addon
@@ -478,11 +477,6 @@ async def test_addon_options_boot_mode_manual_only_invalid(
body["message"]
== "Addon local_example boot option is set to manual_only so it cannot be changed"
)
assert body["error_key"] == "addon_boot_config_cannot_change_error"
assert body["extra_fields"] == {
"addon": "local_example",
"boot_config": "manual_only",
}
async def get_message(resp: ClientResponse, json_expected: bool) -> str:
@@ -551,154 +545,3 @@ async def test_addon_not_installed(
resp = await api_client.request(method, url)
assert resp.status == 400
assert await get_message(resp, json_expected) == "Addon is not installed"
async def test_addon_set_options(api_client: TestClient, install_addon_example: Addon):
"""Test setting options for an addon."""
resp = await api_client.post(
"/addons/local_example/options", json={"options": {"message": "test"}}
)
assert resp.status == 200
assert install_addon_example.options == {"message": "test"}
async def test_addon_reset_options(
api_client: TestClient, install_addon_example: Addon
):
"""Test resetting options for an addon to defaults.
Fixes SUPERVISOR-171F.
"""
# First set some custom options
install_addon_example.options = {"message": "custom"}
assert install_addon_example.persist["options"] == {"message": "custom"}
# Reset to defaults by sending null
resp = await api_client.post(
"/addons/local_example/options", json={"options": None}
)
assert resp.status == 200
# Persisted options should be empty (meaning defaults will be used)
assert install_addon_example.persist["options"] == {}
async def test_addon_set_options_error(
api_client: TestClient, install_addon_example: Addon
):
"""Test setting options for an addon."""
resp = await api_client.post(
"/addons/local_example/options", json={"options": {"message": True}}
)
assert resp.status == 400
body = await resp.json()
assert (
body["message"]
== "Add-on local_example has invalid options: not a valid value. Got {'message': True}"
)
assert body["error_key"] == "addon_configuration_invalid_error"
assert body["extra_fields"] == {
"addon": "local_example",
"validation_error": "not a valid value. Got {'message': True}",
}
async def test_addon_start_options_error(
api_client: TestClient,
install_addon_example: Addon,
caplog: pytest.LogCaptureFixture,
):
"""Test error writing options when trying to start addon."""
install_addon_example.options = {"message": "hello"}
# Simulate OS error trying to write the file
with patch("supervisor.utils.json.atomic_write", side_effect=OSError("fail")):
resp = await api_client.post("/addons/local_example/start")
assert resp.status == 500
body = await resp.json()
assert (
body["message"]
== "An unknown error occurred with addon local_example. Check supervisor logs for details (check with 'ha supervisor logs')"
)
assert body["error_key"] == "addon_unknown_error"
assert body["extra_fields"] == {
"addon": "local_example",
"logs_command": "ha supervisor logs",
}
assert "Add-on local_example can't write options" in caplog.text
# Simulate an update with a breaking change for options schema creating failure on start
caplog.clear()
install_addon_example.data["schema"] = {"message": "bool"}
resp = await api_client.post("/addons/local_example/start")
assert resp.status == 400
body = await resp.json()
assert (
body["message"]
== "Add-on local_example has invalid options: expected boolean. Got {'message': 'hello'}"
)
assert body["error_key"] == "addon_configuration_invalid_error"
assert body["extra_fields"] == {
"addon": "local_example",
"validation_error": "expected boolean. Got {'message': 'hello'}",
}
assert (
"Add-on local_example has invalid options: expected boolean. Got {'message': 'hello'}"
in caplog.text
)
@pytest.mark.parametrize(("method", "action"), [("get", "stats"), ("post", "stdin")])
@pytest.mark.usefixtures("install_addon_example")
async def test_addon_not_running_error(
api_client: TestClient, method: str, action: str
):
"""Test addon not running error for endpoints that require that."""
with patch.object(Addon, "with_stdin", new=PropertyMock(return_value=True)):
resp = await api_client.request(method, f"/addons/local_example/{action}")
assert resp.status == 400
body = await resp.json()
assert body["message"] == "Add-on local_example is not running"
assert body["error_key"] == "addon_not_running_error"
assert body["extra_fields"] == {"addon": "local_example"}
@pytest.mark.usefixtures("install_addon_example")
async def test_addon_write_stdin_not_supported_error(api_client: TestClient):
"""Test error when trying to write stdin to addon that does not support it."""
resp = await api_client.post("/addons/local_example/stdin")
assert resp.status == 400
body = await resp.json()
assert body["message"] == "Add-on local_example does not support writing to stdin"
assert body["error_key"] == "addon_not_supported_write_stdin_error"
assert body["extra_fields"] == {"addon": "local_example"}
@pytest.mark.usefixtures("install_addon_ssh")
async def test_addon_rebuild_fails_error(api_client: TestClient, coresys: CoreSys):
"""Test error when build fails during rebuild for addon."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
coresys.docker.containers_legacy.run.side_effect = DockerException("fail")
with (
patch.object(
CpuArchManager, "supported", new=PropertyMock(return_value=["aarch64"])
),
patch.object(
CpuArchManager, "default", new=PropertyMock(return_value="aarch64")
),
patch.object(AddonBuild, "get_docker_args", return_value={}),
):
resp = await api_client.post("/addons/local_ssh/rebuild")
assert resp.status == 500
body = await resp.json()
assert (
body["message"]
== "An unknown error occurred while trying to build the image for addon local_ssh. Check supervisor logs for details (check with 'ha supervisor logs')"
)
assert body["error_key"] == "addon_build_failed_unknown_error"
assert body["extra_fields"] == {
"addon": "local_ssh",
"logs_command": "ha supervisor logs",
}

View File

@@ -1,7 +1,6 @@
"""Test auth API."""
from datetime import UTC, datetime, timedelta
from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
from aiohttp.hdrs import WWW_AUTHENTICATE
@@ -10,8 +9,6 @@ import pytest
from supervisor.addons.addon import Addon
from supervisor.coresys import CoreSys
from supervisor.exceptions import HomeAssistantAPIError, HomeAssistantWSError
from supervisor.homeassistant.api import HomeAssistantAPI
from tests.common import MockResponse
from tests.const import TEST_ADDON_SLUG
@@ -103,52 +100,6 @@ async def test_password_reset(
assert "Successful password reset for 'john'" in caplog.text
@pytest.mark.parametrize(
("post_mock", "expected_log"),
[
(
MagicMock(return_value=MockResponse(status=400)),
"The user 'john' is not registered",
),
(
MagicMock(side_effect=HomeAssistantAPIError("fail")),
"Can't request password reset on Home Assistant: fail",
),
],
)
async def test_failed_password_reset(
api_client: TestClient,
coresys: CoreSys,
caplog: pytest.LogCaptureFixture,
websession: MagicMock,
post_mock: MagicMock,
expected_log: str,
):
"""Test failed password reset."""
coresys.homeassistant.api.access_token = "abc123"
# pylint: disable-next=protected-access
coresys.homeassistant.api._access_token_expires = datetime.now(tz=UTC) + timedelta(
days=1
)
websession.post = post_mock
resp = await api_client.post(
"/auth/reset", json={"username": "john", "password": "doe"}
)
assert resp.status == 400
body = await resp.json()
assert (
body["message"]
== "Username 'john' does not exist. Check list of users using 'ha auth list'."
)
assert body["error_key"] == "auth_password_reset_error"
assert body["extra_fields"] == {
"user": "john",
"auth_list_command": "ha auth list",
}
assert expected_log in caplog.text
async def test_list_users(
api_client: TestClient, coresys: CoreSys, ha_ws_client: AsyncMock
):
@@ -169,48 +120,6 @@ async def test_list_users(
]
@pytest.mark.parametrize(
("send_command_mock", "error_response", "expected_log"),
[
(
AsyncMock(return_value=None),
{
"result": "error",
"message": "Home Assistant returned invalid response of `None` instead of a list of users. Check Home Assistant logs for details (check with `ha core logs`)",
"error_key": "auth_list_users_none_response_error",
"extra_fields": {"none": "None", "logs_command": "ha core logs"},
},
"Home Assistant returned invalid response of `None` instead of a list of users. Check Home Assistant logs for details (check with `ha core logs`)",
),
(
AsyncMock(side_effect=HomeAssistantWSError("fail")),
{
"result": "error",
"message": "Can't request listing users on Home Assistant. Check supervisor logs for details (check with 'ha supervisor logs')",
"error_key": "auth_list_users_error",
"extra_fields": {"logs_command": "ha supervisor logs"},
},
"Can't request listing users on Home Assistant: fail",
),
],
)
async def test_list_users_failure(
api_client: TestClient,
ha_ws_client: AsyncMock,
caplog: pytest.LogCaptureFixture,
send_command_mock: AsyncMock,
error_response: dict[str, Any],
expected_log: str,
):
"""Test failure listing users via API."""
ha_ws_client.async_send_command = send_command_mock
resp = await api_client.get("/auth/list")
assert resp.status == 500
result = await resp.json()
assert result == error_response
assert expected_log in caplog.text
@pytest.mark.parametrize(
("field", "api_client"),
[("username", TEST_ADDON_SLUG), ("user", TEST_ADDON_SLUG)],
@@ -247,13 +156,6 @@ async def test_auth_json_failure_none(
mock_check_login.return_value = True
resp = await api_client.post("/auth", json={"username": user, "password": password})
assert resp.status == 401
assert (
resp.headers["WWW-Authenticate"]
== 'Basic realm="Home Assistant Authentication"'
)
body = await resp.json()
assert body["message"] == "Username and password must be strings"
assert body["error_key"] == "auth_invalid_non_string_value_error"
@pytest.mark.parametrize("api_client", [TEST_ADDON_SLUG], indirect=True)
@@ -365,26 +267,3 @@ async def test_non_addon_token_no_auth_access(api_client: TestClient):
"""Test auth where add-on is not allowed to access auth API."""
resp = await api_client.post("/auth", json={"username": "test", "password": "pass"})
assert resp.status == 403
@pytest.mark.parametrize("api_client", [TEST_ADDON_SLUG], indirect=True)
@pytest.mark.usefixtures("install_addon_ssh")
async def test_auth_backend_login_failure(api_client: TestClient):
"""Test backend login failure on auth."""
with (
patch.object(HomeAssistantAPI, "check_api_state", return_value=True),
patch.object(
HomeAssistantAPI, "make_request", side_effect=HomeAssistantAPIError("fail")
),
):
resp = await api_client.post(
"/auth", json={"username": "test", "password": "pass"}
)
assert resp.status == 500
body = await resp.json()
assert (
body["message"]
== "Unable to validate authentication details with Home Assistant. Check supervisor logs for details (check with 'ha supervisor logs')"
)
assert body["error_key"] == "auth_home_assistant_api_validation_error"
assert body["extra_fields"] == {"logs_command": "ha supervisor logs"}

View File

@@ -17,7 +17,6 @@ from supervisor.const import CoreState
from supervisor.coresys import CoreSys
from supervisor.docker.manager import DockerAPI
from supervisor.exceptions import (
AddonPrePostBackupCommandReturnedError,
AddonsError,
BackupInvalidError,
HomeAssistantBackupError,
@@ -25,7 +24,6 @@ from supervisor.exceptions import (
from supervisor.homeassistant.core import HomeAssistantCore
from supervisor.homeassistant.module import HomeAssistant
from supervisor.homeassistant.websocket import HomeAssistantWebSocket
from supervisor.jobs import SupervisorJob
from supervisor.mounts.mount import Mount
from supervisor.supervisor import Supervisor
@@ -403,8 +401,6 @@ async def test_api_backup_errors(
"type": "BackupError",
"message": str(err),
"stage": None,
"error_key": None,
"extra_fields": None,
}
]
assert job["child_jobs"][2]["name"] == "backup_store_folders"
@@ -441,8 +437,6 @@ async def test_api_backup_errors(
"type": "HomeAssistantBackupError",
"message": "Backup error",
"stage": "home_assistant",
"error_key": None,
"extra_fields": None,
}
]
assert job["child_jobs"][0]["name"] == "backup_store_homeassistant"
@@ -451,8 +445,6 @@ async def test_api_backup_errors(
"type": "HomeAssistantBackupError",
"message": "Backup error",
"stage": None,
"error_key": None,
"extra_fields": None,
}
]
assert len(job["child_jobs"]) == 1
@@ -757,8 +749,6 @@ async def test_backup_to_multiple_locations_error_on_copy(
"type": "BackupError",
"message": "Could not copy backup to .cloud_backup due to: ",
"stage": None,
"error_key": None,
"extra_fields": None,
}
]
@@ -1201,8 +1191,10 @@ async def test_restore_homeassistant_adds_env(
assert docker.containers.create.call_args.kwargs["name"] == "homeassistant"
assert (
f"SUPERVISOR_RESTORE_JOB_ID={job.uuid}"
in docker.containers.create.call_args.args[0]["Env"]
docker.containers.create.call_args.kwargs["environment"][
"SUPERVISOR_RESTORE_JOB_ID"
]
== job.uuid
)
@@ -1491,44 +1483,3 @@ async def test_immediate_list_after_missing_file_restore(
result = await resp.json()
assert len(result["data"]["backups"]) == 1
assert result["data"]["backups"][0]["slug"] == "93b462f8"
@pytest.mark.parametrize("command", ["backup_pre", "backup_post"])
@pytest.mark.usefixtures("install_addon_example", "tmp_supervisor_data")
async def test_pre_post_backup_command_error(
api_client: TestClient, coresys: CoreSys, container: MagicMock, command: str
):
"""Test pre/post backup command error."""
await coresys.core.set_state(CoreState.RUNNING)
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
container.status = "running"
container.exec_run.return_value = (1, b"")
with patch.object(Addon, command, new=PropertyMock(return_value="test")):
resp = await api_client.post(
"/backups/new/partial", json={"addons": ["local_example"]}
)
assert resp.status == 200
body = await resp.json()
job_id = body["data"]["job_id"]
job: SupervisorJob | None = None
for j in coresys.jobs.jobs:
if j.name == "backup_store_addons" and j.parent_id == job_id:
job = j
break
assert job
assert job.done is True
assert job.errors[0].type_ == AddonPrePostBackupCommandReturnedError
assert job.errors[0].message == (
"Pre-/Post backup command for add-on local_example returned error code: "
"1. Please report this to the addon developer. Enable debug "
"logging to capture complete command output using ha supervisor options --logging debug"
)
assert job.errors[0].error_key == "addon_pre_post_backup_command_returned_error"
assert job.errors[0].extra_fields == {
"addon": "local_example",
"exit_code": 1,
"debug_logging_command": "ha supervisor options --logging debug",
}

View File

@@ -118,6 +118,15 @@ async def test_api_migrate_docker_storage_driver(
in coresys.resolution.suggestions
)
# Test migration back to overlay2 (graph driver)
system_service.MigrateDockerStorageDriver.calls.clear()
resp = await api_client.post(
"/docker/migrate-storage-driver",
json={"storage_driver": "overlay2"},
)
assert resp.status == 200
assert system_service.MigrateDockerStorageDriver.calls == [("overlay2",)]
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
async def test_api_migrate_docker_storage_driver_invalid_backend(

View File

@@ -35,9 +35,9 @@ async def test_api_core_logs(
async def test_api_stats(api_client: TestClient, coresys: CoreSys):
"""Test stats."""
coresys.docker.containers_legacy.get.return_value.status = "running"
coresys.docker.containers_legacy.get.return_value.stats.return_value = (
load_json_fixture("container_stats.json")
coresys.docker.containers.get.return_value.status = "running"
coresys.docker.containers.get.return_value.stats.return_value = load_json_fixture(
"container_stats.json"
)
resp = await api_client.get("/homeassistant/stats")
@@ -138,14 +138,14 @@ async def test_api_rebuild(
await api_client.post("/homeassistant/rebuild")
assert container.remove.call_count == 2
coresys.docker.containers.create.return_value.start.assert_called_once()
container.start.assert_called_once()
assert not safe_mode_marker.exists()
with patch.object(HomeAssistantCore, "_block_till_run"):
await api_client.post("/homeassistant/rebuild", json={"safe_mode": True})
assert container.remove.call_count == 4
assert coresys.docker.containers.create.return_value.start.call_count == 2
assert container.start.call_count == 2
assert safe_mode_marker.exists()
@@ -305,6 +305,8 @@ async def test_api_progress_updates_home_assistant_update(
and evt.args[0]["data"]["event"] == WSEvent.JOB
and evt.args[0]["data"]["data"]["name"] == "home_assistant_core_update"
]
# Count-based progress: 2 layers need pulling (each worth 50%)
# Layers that already exist are excluded from progress calculation
assert events[:5] == [
{
"stage": None,
@@ -318,36 +320,36 @@ async def test_api_progress_updates_home_assistant_update(
},
{
"stage": None,
"progress": 0.1,
"progress": 9.2,
"done": False,
},
{
"stage": None,
"progress": 1.7,
"progress": 25.6,
"done": False,
},
{
"stage": None,
"progress": 4.0,
"progress": 35.4,
"done": False,
},
]
assert events[-5:] == [
{
"stage": None,
"progress": 95.5,
"done": False,
},
{
"stage": None,
"progress": 96.9,
"done": False,
},
{
"stage": None,
"progress": 98.2,
"done": False,
},
{
"stage": None,
"progress": 98.3,
"done": False,
},
{
"stage": None,
"progress": 99.3,
"done": False,
},
{
"stage": None,
"progress": 100,

View File

@@ -374,8 +374,6 @@ async def test_job_with_error(
"type": "SupervisorError",
"message": "bad",
"stage": "test",
"error_key": None,
"extra_fields": None,
}
],
"child_jobs": [
@@ -393,8 +391,6 @@ async def test_job_with_error(
"type": "SupervisorError",
"message": "bad",
"stage": None,
"error_key": None,
"extra_fields": None,
}
],
"child_jobs": [],

View File

@@ -4,6 +4,7 @@ import asyncio
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
from aiohttp import ClientResponse
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
import pytest
@@ -18,11 +19,8 @@ from supervisor.docker.addon import DockerAddon
from supervisor.docker.const import ContainerState
from supervisor.docker.interface import DockerInterface
from supervisor.docker.monitor import DockerContainerStateEvent
from supervisor.exceptions import StoreGitError
from supervisor.homeassistant.const import WSEvent
from supervisor.homeassistant.module import HomeAssistant
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
from supervisor.resolution.data import Issue, Suggestion
from supervisor.store.addon import AddonStore
from supervisor.store.repository import Repository
@@ -127,81 +125,6 @@ async def test_api_store_remove_repository(
assert test_repository.slug not in coresys.store.repositories
@pytest.mark.parametrize("repo", ["core", "a474bbd1"])
@pytest.mark.usefixtures("test_repository")
async def test_api_store_repair_repository(api_client: TestClient, repo: str):
"""Test POST /store/repositories/{repository}/repair REST API."""
with patch("supervisor.store.repository.RepositoryGit.reset") as mock_reset:
response = await api_client.post(f"/store/repositories/{repo}/repair")
assert response.status == 200
mock_reset.assert_called_once()
@pytest.mark.parametrize(
"issue_type", [IssueType.CORRUPT_REPOSITORY, IssueType.FATAL_ERROR]
)
@pytest.mark.usefixtures("test_repository")
async def test_api_store_repair_repository_removes_suggestion(
api_client: TestClient,
coresys: CoreSys,
test_repository: Repository,
issue_type: IssueType,
):
"""Test POST /store/repositories/core/repair REST API removes EXECUTE_RESET suggestions."""
issue = Issue(issue_type, ContextType.STORE, reference=test_repository.slug)
suggestion = Suggestion(
SuggestionType.EXECUTE_RESET, ContextType.STORE, reference=test_repository.slug
)
coresys.resolution.add_issue(issue, suggestions=[SuggestionType.EXECUTE_RESET])
with patch("supervisor.store.repository.RepositoryGit.reset") as mock_reset:
response = await api_client.post(
f"/store/repositories/{test_repository.slug}/repair"
)
assert response.status == 200
mock_reset.assert_called_once()
assert issue not in coresys.resolution.issues
assert suggestion not in coresys.resolution.suggestions
@pytest.mark.usefixtures("test_repository")
async def test_api_store_repair_repository_local_fail(api_client: TestClient):
"""Test POST /store/repositories/local/repair REST API fails."""
response = await api_client.post("/store/repositories/local/repair")
assert response.status == 400
result = await response.json()
assert result["error_key"] == "store_repository_local_cannot_reset"
assert result["extra_fields"] == {"local_repo": "local"}
assert result["message"] == "Can't reset repository local as it is not git based!"
async def test_api_store_repair_repository_git_error(
api_client: TestClient, test_repository: Repository
):
"""Test POST /store/repositories/{repository}/repair REST API git error."""
with patch(
"supervisor.store.git.GitRepo.reset",
side_effect=StoreGitError("Git error"),
):
response = await api_client.post(
f"/store/repositories/{test_repository.slug}/repair"
)
assert response.status == 500
result = await response.json()
assert result["error_key"] == "store_repository_unknown_error"
assert result["extra_fields"] == {
"repo": test_repository.slug,
"logs_command": "ha supervisor logs",
}
assert (
result["message"]
== f"An unknown error occurred with addon repository {test_repository.slug}. Check supervisor logs for details (check with 'ha supervisor logs')"
)
async def test_api_store_update_healthcheck(
api_client: TestClient,
coresys: CoreSys,
@@ -369,6 +292,14 @@ async def test_api_detached_addon_documentation(
assert result == "Addon local_ssh does not exist in the store"
async def get_message(resp: ClientResponse, json_expected: bool) -> str:
"""Get message from response based on response type."""
if json_expected:
body = await resp.json()
return body["message"]
return await resp.text()
@pytest.mark.parametrize(
("method", "url", "json_expected"),
[
@@ -394,13 +325,7 @@ async def test_store_addon_not_found(
"""Test store addon not found error."""
resp = await api_client.request(method, url)
assert resp.status == 404
if json_expected:
body = await resp.json()
assert body["message"] == "Addon bad does not exist in the store"
assert body["error_key"] == "store_addon_not_found_error"
assert body["extra_fields"] == {"addon": "bad"}
else:
assert await resp.text() == "Addon bad does not exist in the store"
assert await get_message(resp, json_expected) == "Addon bad does not exist"
@pytest.mark.parametrize(
@@ -839,6 +764,8 @@ async def test_api_progress_updates_addon_install_update(
and evt.args[0]["data"]["data"]["name"] == job_name
and evt.args[0]["data"]["data"]["reference"] == addon_slug
]
# Count-based progress: 2 layers need pulling (each worth 50%)
# Layers that already exist are excluded from progress calculation
assert events[:4] == [
{
"stage": None,
@@ -847,36 +774,36 @@ async def test_api_progress_updates_addon_install_update(
},
{
"stage": None,
"progress": 0.1,
"progress": 9.2,
"done": False,
},
{
"stage": None,
"progress": 1.7,
"progress": 25.6,
"done": False,
},
{
"stage": None,
"progress": 4.0,
"progress": 35.4,
"done": False,
},
]
assert events[-5:] == [
{
"stage": None,
"progress": 95.5,
"done": False,
},
{
"stage": None,
"progress": 96.9,
"done": False,
},
{
"stage": None,
"progress": 98.2,
"done": False,
},
{
"stage": None,
"progress": 98.3,
"done": False,
},
{
"stage": None,
"progress": 99.3,
"done": False,
},
{
"stage": None,
"progress": 100,

View File

@@ -7,7 +7,6 @@ from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
from blockbuster import BlockingError
from docker.errors import DockerException
import pytest
from supervisor.const import CoreState
@@ -359,6 +358,8 @@ async def test_api_progress_updates_supervisor_update(
and evt.args[0]["data"]["event"] == WSEvent.JOB
and evt.args[0]["data"]["data"]["name"] == "supervisor_update"
]
# Count-based progress: 2 layers need pulling (each worth 50%)
# Layers that already exist are excluded from progress calculation
assert events[:4] == [
{
"stage": None,
@@ -367,36 +368,36 @@ async def test_api_progress_updates_supervisor_update(
},
{
"stage": None,
"progress": 0.1,
"progress": 9.2,
"done": False,
},
{
"stage": None,
"progress": 1.7,
"progress": 25.6,
"done": False,
},
{
"stage": None,
"progress": 4.0,
"progress": 35.4,
"done": False,
},
]
assert events[-5:] == [
{
"stage": None,
"progress": 95.5,
"done": False,
},
{
"stage": None,
"progress": 96.9,
"done": False,
},
{
"stage": None,
"progress": 98.2,
"done": False,
},
{
"stage": None,
"progress": 98.3,
"done": False,
},
{
"stage": None,
"progress": 99.3,
"done": False,
},
{
"stage": None,
"progress": 100,
@@ -408,37 +409,3 @@ async def test_api_progress_updates_supervisor_update(
"done": True,
},
]
async def test_api_supervisor_stats(api_client: TestClient, coresys: CoreSys):
"""Test supervisor stats."""
coresys.docker.containers_legacy.get.return_value.status = "running"
coresys.docker.containers_legacy.get.return_value.stats.return_value = (
load_json_fixture("container_stats.json")
)
resp = await api_client.get("/supervisor/stats")
assert resp.status == 200
result = await resp.json()
assert result["data"]["cpu_percent"] == 90.0
assert result["data"]["memory_usage"] == 59700000
assert result["data"]["memory_limit"] == 4000000000
assert result["data"]["memory_percent"] == 1.49
async def test_supervisor_api_stats_failure(
api_client: TestClient, coresys: CoreSys, caplog: pytest.LogCaptureFixture
):
"""Test supervisor stats failure."""
coresys.docker.containers_legacy.get.side_effect = DockerException("fail")
resp = await api_client.get("/supervisor/stats")
assert resp.status == 500
body = await resp.json()
assert (
body["message"]
== "An unknown error occurred with Supervisor. Check supervisor logs for details (check with 'ha supervisor logs')"
)
assert body["error_key"] == "supervisor_unknown_error"
assert body["extra_fields"] == {"logs_command": "ha supervisor logs"}
assert "Could not inspect container 'hassio_supervisor': fail" in caplog.text

View File

@@ -9,7 +9,6 @@ import subprocess
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
from uuid import uuid4
from aiodocker.containers import DockerContainer, DockerContainers
from aiodocker.docker import DockerImages
from aiohttp import ClientSession, web
from aiohttp.test_utils import TestClient
@@ -121,13 +120,11 @@ async def docker() -> DockerAPI:
"Id": "test123",
"RepoTags": ["ghcr.io/home-assistant/amd64-hassio-supervisor:latest"],
}
container_inspect = image_inspect | {"State": {"ExitCode": 0}}
with (
patch("supervisor.docker.manager.DockerClient", return_value=MagicMock()),
patch(
"supervisor.docker.manager.DockerAPI.containers_legacy",
return_value=MagicMock(),
"supervisor.docker.manager.DockerAPI.containers", return_value=MagicMock()
),
patch("supervisor.docker.manager.DockerAPI.api", return_value=MagicMock()),
patch("supervisor.docker.manager.DockerAPI.info", return_value=MagicMock()),
@@ -139,12 +136,6 @@ async def docker() -> DockerAPI:
return_value=(docker_images := MagicMock(spec=DockerImages))
),
),
patch(
"supervisor.docker.manager.DockerAPI.containers",
new=PropertyMock(
return_value=(docker_containers := MagicMock(spec=DockerContainers))
),
),
):
docker_obj = await DockerAPI(MagicMock()).post_init()
docker_obj.config._data = {"registries": {}}
@@ -153,17 +144,11 @@ async def docker() -> DockerAPI:
docker_images.inspect.return_value = image_inspect
docker_images.list.return_value = [image_inspect]
docker_images.import_image = AsyncMock(
return_value=[{"stream": "Loaded image: test:latest\n"}]
)
docker_images.pull.return_value = AsyncIterator([{}])
docker_images.import_image.return_value = [
{"stream": "Loaded image: test:latest\n"}
]
docker_containers.get.return_value = docker_container = MagicMock(
spec=DockerContainer
)
docker_containers.list.return_value = [docker_container]
docker_containers.create.return_value = docker_container
docker_container.show.return_value = container_inspect
docker_images.pull.return_value = AsyncIterator([{}])
docker_obj.info.logging = "journald"
docker_obj.info.storage = "overlay2"
@@ -805,7 +790,7 @@ async def docker_logs(docker: DockerAPI, supervisor_name) -> MagicMock:
"""Mock log output for a container from docker."""
container_mock = MagicMock()
container_mock.logs.return_value = load_binary_fixture("logs_docker_container.txt")
docker.containers_legacy.get.return_value = container_mock
docker.containers.get.return_value = container_mock
yield container_mock.logs
@@ -839,7 +824,7 @@ async def os_available(request: pytest.FixtureRequest) -> None:
@pytest.fixture
async def mount_propagation(docker: DockerAPI, coresys: CoreSys) -> None:
"""Mock supervisor connected to container with propagation set."""
docker.containers_legacy.get.return_value = supervisor = MagicMock()
docker.containers.get.return_value = supervisor = MagicMock()
supervisor.attrs = {
"Mounts": [
{
@@ -859,11 +844,10 @@ async def mount_propagation(docker: DockerAPI, coresys: CoreSys) -> None:
@pytest.fixture
async def container(docker: DockerAPI) -> MagicMock:
"""Mock attrs and status for container on attach."""
attrs = {"State": {"ExitCode": 0}}
docker.containers_legacy.get.return_value = addon = MagicMock(
status="stopped", attrs=attrs
)
docker.containers.create.return_value.show.return_value = attrs
docker.containers.get.return_value = addon = MagicMock()
docker.containers.create.return_value = addon
addon.status = "stopped"
addon.attrs = {"State": {"ExitCode": 0}}
yield addon

View File

@@ -6,7 +6,7 @@ from unittest.mock import Mock, PropertyMock, patch
from dbus_fast.aio.message_bus import MessageBus
import pytest
from supervisor.dbus.const import ConnectionState
from supervisor.dbus.const import ConnectionStateType
from supervisor.dbus.network import NetworkManager
from supervisor.dbus.network.interface import NetworkInterface
from supervisor.exceptions import (
@@ -93,7 +93,7 @@ async def test_activate_connection(
"/org/freedesktop/NetworkManager/Settings/1",
"/org/freedesktop/NetworkManager/Devices/1",
)
assert connection.state == ConnectionState.ACTIVATED
assert connection.state == ConnectionStateType.ACTIVATED
assert (
connection.settings.object_path == "/org/freedesktop/NetworkManager/Settings/1"
)
@@ -117,7 +117,7 @@ async def test_add_and_activate_connection(
)
assert settings.connection.uuid == "0c23631e-2118-355c-bbb0-8943229cb0d6"
assert settings.ipv4.method == "auto"
assert connection.state == ConnectionState.ACTIVATED
assert connection.state == ConnectionStateType.ACTIVATED
assert (
connection.settings.object_path == "/org/freedesktop/NetworkManager/Settings/1"
)

View File

@@ -35,8 +35,8 @@ class System(DBusServiceMock):
"""Migrate Docker storage driver."""
if isinstance(self.response_migrate_docker_storage_driver, DBusError):
raise self.response_migrate_docker_storage_driver # pylint: disable=raising-bad-type
if backend != "overlayfs":
if backend not in ("overlayfs", "overlay2"):
raise DBusError(
ErrorType.FAILED,
f"unsupported driver: {backend} (only 'overlayfs' is currently supported)",
f"unsupported driver: {backend} (only 'overlayfs' and 'overlay2' are supported)",
)

View File

@@ -1,12 +1,7 @@
"""Docker tests."""
from supervisor.docker.const import DockerMount, MountBindOptions, MountType
from docker.types import Mount
# dev mount with equivalent of bind-recursive=writable specified via dict value
DEV_MOUNT = DockerMount(
type=MountType.BIND,
source="/dev",
target="/dev",
read_only=True,
bind_options=MountBindOptions(read_only_non_recursive=True),
)
DEV_MOUNT = Mount(type="bind", source="/dev", target="/dev", read_only=True)
DEV_MOUNT["BindOptions"] = {"ReadOnlyNonRecursive": True}

View File

@@ -1,13 +1,13 @@
"""Test docker addon setup."""
import asyncio
from http import HTTPStatus
from ipaddress import IPv4Address
from pathlib import Path
from typing import Any
from unittest.mock import MagicMock, Mock, PropertyMock, patch
import aiodocker
from docker.errors import NotFound
from docker.types import Mount
import pytest
from supervisor.addons import validate as vd
@@ -18,12 +18,6 @@ from supervisor.const import BusEvent
from supervisor.coresys import CoreSys
from supervisor.dbus.agent.cgroup import CGroup
from supervisor.docker.addon import DockerAddon
from supervisor.docker.const import (
DockerMount,
MountBindOptions,
MountType,
PropagationMode,
)
from supervisor.docker.manager import DockerAPI
from supervisor.exceptions import CoreDNSError, DockerNotFound
from supervisor.hardware.data import Device
@@ -86,8 +80,8 @@ def test_base_volumes_included(
# Data added as rw
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=docker_addon.addon.path_extern_data.as_posix(),
target="/data",
read_only=False,
@@ -105,8 +99,8 @@ def test_addon_map_folder_defaults(
)
# Config added and is marked rw
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.config.path_extern_homeassistant.as_posix(),
target="/config",
read_only=False,
@@ -116,8 +110,8 @@ def test_addon_map_folder_defaults(
# SSL added and defaults to ro
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.config.path_extern_ssl.as_posix(),
target="/ssl",
read_only=True,
@@ -127,30 +121,30 @@ def test_addon_map_folder_defaults(
# Media added and propagation set
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.config.path_extern_media.as_posix(),
target="/media",
read_only=True,
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
propagation="rslave",
)
in docker_addon.mounts
)
# Share added and propagation set
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.config.path_extern_share.as_posix(),
target="/share",
read_only=True,
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
propagation="rslave",
)
in docker_addon.mounts
)
# Backup not added
assert "/backup" not in [mount.target for mount in docker_addon.mounts]
assert "/backup" not in [mount["Target"] for mount in docker_addon.mounts]
def test_addon_map_homeassistant_folder(
@@ -163,8 +157,8 @@ def test_addon_map_homeassistant_folder(
# Home Assistant config folder mounted to /homeassistant, not /config
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.config.path_extern_homeassistant.as_posix(),
target="/homeassistant",
read_only=True,
@@ -183,8 +177,8 @@ def test_addon_map_addon_configs_folder(
# Addon configs folder included
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.config.path_extern_addon_configs.as_posix(),
target="/addon_configs",
read_only=True,
@@ -203,8 +197,8 @@ def test_addon_map_addon_config_folder(
# Addon config folder included
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=docker_addon.addon.path_extern_config.as_posix(),
target="/config",
read_only=True,
@@ -226,8 +220,8 @@ def test_addon_map_addon_config_folder_with_custom_target(
# Addon config folder included
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=docker_addon.addon.path_extern_config.as_posix(),
target="/custom/target/path",
read_only=False,
@@ -246,8 +240,8 @@ def test_addon_map_data_folder_with_custom_target(
# Addon config folder included
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=docker_addon.addon.path_extern_data.as_posix(),
target="/custom/data/path",
read_only=False,
@@ -266,8 +260,8 @@ def test_addon_ignore_on_config_map(
# Config added and is marked rw
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.config.path_extern_homeassistant.as_posix(),
target="/config",
read_only=False,
@@ -277,10 +271,11 @@ def test_addon_ignore_on_config_map(
# Mount for addon's specific config folder omitted since config in map field
assert (
len([mount for mount in docker_addon.mounts if mount.target == "/config"]) == 1
len([mount for mount in docker_addon.mounts if mount["Target"] == "/config"])
== 1
)
# Home Assistant mount omitted since config in map field
assert "/homeassistant" not in [mount.target for mount in docker_addon.mounts]
assert "/homeassistant" not in [mount["Target"] for mount in docker_addon.mounts]
def test_journald_addon(
@@ -292,8 +287,8 @@ def test_journald_addon(
)
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source="/var/log/journal",
target="/var/log/journal",
read_only=True,
@@ -301,8 +296,8 @@ def test_journald_addon(
in docker_addon.mounts
)
assert (
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source="/run/log/journal",
target="/run/log/journal",
read_only=True,
@@ -319,7 +314,7 @@ def test_not_journald_addon(
coresys, addonsdata_system, "basic-addon-config.json"
)
assert "/var/log/journal" not in [mount.target for mount in docker_addon.mounts]
assert "/var/log/journal" not in [mount["Target"] for mount in docker_addon.mounts]
async def test_addon_run_docker_error(
@@ -330,9 +325,7 @@ async def test_addon_run_docker_error(
):
"""Test docker error when addon is run."""
await coresys.dbus.timedate.connect(coresys.dbus.bus)
coresys.docker.containers.create.side_effect = aiodocker.DockerError(
HTTPStatus.NOT_FOUND, {"message": "missing"}
)
coresys.docker.containers.create.side_effect = NotFound("Missing")
docker_addon = get_docker_addon(
coresys, addonsdata_system, "basic-addon-config.json"
)

View File

@@ -2,24 +2,22 @@
from ipaddress import IPv4Address
from pathlib import Path
from unittest.mock import MagicMock, patch
from unittest.mock import patch
import pytest
from docker.types import Mount
from supervisor.coresys import CoreSys
from supervisor.docker.const import DockerMount, MountType, Ulimit
from supervisor.docker.manager import DockerAPI
from . import DEV_MOUNT
@pytest.mark.usefixtures("path_extern")
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, container: MagicMock):
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, path_extern):
"""Test starting audio plugin."""
config_file = tmp_supervisor_data / "audio" / "pulse_audio.json"
assert not config_file.exists()
with patch.object(DockerAPI, "run", return_value=container.attrs) as run:
with patch.object(DockerAPI, "run") as run:
await coresys.plugins.audio.start()
run.assert_called_once()
@@ -28,31 +26,21 @@ async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, container: Mag
assert run.call_args.kwargs["hostname"] == "hassio-audio"
assert run.call_args.kwargs["cap_add"] == ["SYS_NICE", "SYS_RESOURCE"]
assert run.call_args.kwargs["ulimits"] == [
Ulimit(name="rtprio", soft=10, hard=10)
{"Name": "rtprio", "Soft": 10, "Hard": 10}
]
assert run.call_args.kwargs["mounts"] == [
DEV_MOUNT,
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.config.path_extern_audio.as_posix(),
target="/data",
read_only=False,
),
DockerMount(
type=MountType.BIND,
source="/run/dbus",
target="/run/dbus",
read_only=True,
),
DockerMount(
type=MountType.BIND,
source="/run/udev",
target="/run/udev",
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(type="bind", source="/run/dbus", target="/run/dbus", read_only=True),
Mount(type="bind", source="/run/udev", target="/run/udev", read_only=True),
Mount(
type="bind",
source="/etc/machine-id",
target="/etc/machine-id",
read_only=True,

View File

@@ -1,49 +1,9 @@
"""Test docker login."""
import pytest
# pylint: disable=protected-access
from supervisor.coresys import CoreSys
from supervisor.docker.const import DOCKER_HUB, DOCKER_HUB_LEGACY
from supervisor.docker.const import DOCKER_HUB
from supervisor.docker.interface import DockerInterface
from supervisor.docker.utils import get_registry_from_image
@pytest.mark.parametrize(
("image_ref", "expected_registry"),
[
# No registry - Docker Hub images
("nginx", None),
("nginx:latest", None),
("library/nginx", None),
("library/nginx:latest", None),
("homeassistant/amd64-supervisor", None),
("homeassistant/amd64-supervisor:1.2.3", None),
# Registry with dot
("ghcr.io/homeassistant/amd64-supervisor", "ghcr.io"),
("ghcr.io/homeassistant/amd64-supervisor:latest", "ghcr.io"),
("myregistry.com/nginx", "myregistry.com"),
("registry.example.com/org/image:v1", "registry.example.com"),
("127.0.0.1/myimage", "127.0.0.1"),
# Registry with port
("myregistry:5000/myimage", "myregistry:5000"),
("localhost:5000/myimage", "localhost:5000"),
("registry.io:5000/org/app:v1", "registry.io:5000"),
# localhost special case
("localhost/myimage", "localhost"),
("localhost/myimage:tag", "localhost"),
# IPv6
("[::1]:5000/myimage", "[::1]:5000"),
("[2001:db8::1]:5000/myimage:tag", "[2001:db8::1]:5000"),
],
)
def test_get_registry_from_image(image_ref: str, expected_registry: str | None):
"""Test get_registry_from_image extracts registry from image reference.
Based on Docker's reference implementation:
vendor/github.com/distribution/reference/normalize.go
"""
assert get_registry_from_image(image_ref) == expected_registry
def test_no_credentials(coresys: CoreSys, test_docker_interface: DockerInterface):
@@ -87,36 +47,3 @@ def test_matching_credentials(coresys: CoreSys, test_docker_interface: DockerInt
)
assert credentials["username"] == "Spongebob Squarepants"
assert "registry" not in credentials
def test_legacy_docker_hub_credentials(
coresys: CoreSys, test_docker_interface: DockerInterface
):
"""Test legacy hub.docker.com credentials are used for Docker Hub images."""
coresys.docker.config._data["registries"] = {
DOCKER_HUB_LEGACY: {"username": "LegacyUser", "password": "Password1!"},
}
credentials = test_docker_interface._get_credentials(
"homeassistant/amd64-supervisor"
)
assert credentials["username"] == "LegacyUser"
# No registry should be included for Docker Hub
assert "registry" not in credentials
def test_docker_hub_preferred_over_legacy(
coresys: CoreSys, test_docker_interface: DockerInterface
):
"""Test docker.io is preferred over legacy hub.docker.com when both exist."""
coresys.docker.config._data["registries"] = {
DOCKER_HUB: {"username": "NewUser", "password": "Password1!"},
DOCKER_HUB_LEGACY: {"username": "LegacyUser", "password": "Password2!"},
}
credentials = test_docker_interface._get_credentials(
"homeassistant/amd64-supervisor"
)
# docker.io should be preferred
assert credentials["username"] == "NewUser"
assert "registry" not in credentials

View File

@@ -2,22 +2,20 @@
from ipaddress import IPv4Address
from pathlib import Path
from unittest.mock import MagicMock, patch
from unittest.mock import patch
import pytest
from docker.types import Mount
from supervisor.coresys import CoreSys
from supervisor.docker.const import DockerMount, MountType
from supervisor.docker.manager import DockerAPI
@pytest.mark.usefixtures("path_extern")
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, container: MagicMock):
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, path_extern):
"""Test starting dns plugin."""
config_file = tmp_supervisor_data / "dns" / "coredns.json"
assert not config_file.exists()
with patch.object(DockerAPI, "run", return_value=container.attrs) as run:
with patch.object(DockerAPI, "run") as run:
await coresys.plugins.dns.start()
run.assert_called_once()
@@ -27,18 +25,13 @@ async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, container: Mag
assert run.call_args.kwargs["dns"] is False
assert run.call_args.kwargs["oom_score_adj"] == -300
assert run.call_args.kwargs["mounts"] == [
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.config.path_extern_dns.as_posix(),
target="/config",
read_only=False,
),
DockerMount(
type=MountType.BIND,
source="/run/dbus",
target="/run/dbus",
read_only=True,
),
Mount(type="bind", source="/run/dbus", target="/run/dbus", read_only=True),
]
assert "volumes" not in run.call_args.kwargs

View File

@@ -1,18 +1,13 @@
"""Test Home Assistant container."""
from ipaddress import IPv4Address
from pathlib import Path
from unittest.mock import ANY, MagicMock, patch
from awesomeversion import AwesomeVersion
import pytest
from docker.types import Mount
from supervisor.coresys import CoreSys
from supervisor.docker.const import (
DockerMount,
MountBindOptions,
MountType,
PropagationMode,
)
from supervisor.docker.homeassistant import DockerHomeAssistant
from supervisor.docker.manager import DockerAPI
from supervisor.homeassistant.const import LANDINGPAGE
@@ -20,13 +15,14 @@ from supervisor.homeassistant.const import LANDINGPAGE
from . import DEV_MOUNT
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_homeassistant_start(coresys: CoreSys, container: MagicMock):
async def test_homeassistant_start(
coresys: CoreSys, tmp_supervisor_data: Path, path_extern
):
"""Test starting homeassistant."""
coresys.homeassistant.version = AwesomeVersion("2023.8.1")
with (
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
patch.object(DockerAPI, "run") as run,
patch.object(
DockerHomeAssistant, "is_running", side_effect=[False, False, True]
),
@@ -50,68 +46,57 @@ async def test_homeassistant_start(coresys: CoreSys, container: MagicMock):
"TZ": ANY,
"SUPERVISOR_TOKEN": ANY,
"HASSIO_TOKEN": ANY,
# no "HA_DUPLICATE_LOG_FILE"
}
assert run.call_args.kwargs["mounts"] == [
DEV_MOUNT,
DockerMount(
type=MountType.BIND,
source="/run/dbus",
target="/run/dbus",
read_only=True,
),
DockerMount(
type=MountType.BIND,
source="/run/udev",
target="/run/udev",
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(type="bind", source="/run/dbus", target="/run/dbus", read_only=True),
Mount(type="bind", source="/run/udev", target="/run/udev", read_only=True),
Mount(
type="bind",
source=coresys.config.path_extern_homeassistant.as_posix(),
target="/config",
read_only=False,
),
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.config.path_extern_ssl.as_posix(),
target="/ssl",
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.config.path_extern_share.as_posix(),
target="/share",
read_only=False,
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
propagation="rslave",
),
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.config.path_extern_media.as_posix(),
target="/media",
read_only=False,
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
propagation="rslave",
),
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.homeassistant.path_extern_pulse.as_posix(),
target="/etc/pulse/client.conf",
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.plugins.audio.path_extern_pulse.as_posix(),
target="/run/audio",
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source=coresys.plugins.audio.path_extern_asound.as_posix(),
target="/etc/asound.conf",
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source="/etc/machine-id",
target="/etc/machine-id",
read_only=True,
@@ -120,36 +105,14 @@ async def test_homeassistant_start(coresys: CoreSys, container: MagicMock):
assert "volumes" not in run.call_args.kwargs
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_homeassistant_start_with_duplicate_log_file(
coresys: CoreSys, container: MagicMock
async def test_landingpage_start(
coresys: CoreSys, tmp_supervisor_data: Path, path_extern
):
"""Test starting homeassistant with duplicate_log_file enabled."""
coresys.homeassistant.version = AwesomeVersion("2025.12.0")
coresys.homeassistant.duplicate_log_file = True
with (
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
patch.object(
DockerHomeAssistant, "is_running", side_effect=[False, False, True]
),
patch("supervisor.homeassistant.core.asyncio.sleep"),
):
await coresys.homeassistant.core.start()
run.assert_called_once()
env = run.call_args.kwargs["environment"]
assert "HA_DUPLICATE_LOG_FILE" in env
assert env["HA_DUPLICATE_LOG_FILE"] == "1"
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_landingpage_start(coresys: CoreSys, container: MagicMock):
"""Test starting landingpage."""
coresys.homeassistant.version = LANDINGPAGE
with (
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
patch.object(DockerAPI, "run") as run,
patch.object(DockerHomeAssistant, "is_running", return_value=False),
):
await coresys.homeassistant.core.start()
@@ -170,30 +133,19 @@ async def test_landingpage_start(coresys: CoreSys, container: MagicMock):
"TZ": ANY,
"SUPERVISOR_TOKEN": ANY,
"HASSIO_TOKEN": ANY,
# no "HA_DUPLICATE_LOG_FILE"
}
assert run.call_args.kwargs["mounts"] == [
DEV_MOUNT,
DockerMount(
type=MountType.BIND,
source="/run/dbus",
target="/run/dbus",
read_only=True,
),
DockerMount(
type=MountType.BIND,
source="/run/udev",
target="/run/udev",
read_only=True,
),
DockerMount(
type=MountType.BIND,
Mount(type="bind", source="/run/dbus", target="/run/dbus", read_only=True),
Mount(type="bind", source="/run/udev", target="/run/udev", read_only=True),
Mount(
type="bind",
source=coresys.config.path_extern_homeassistant.as_posix(),
target="/config",
read_only=False,
),
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source="/etc/machine-id",
target="/etc/machine-id",
read_only=True,

View File

@@ -1,7 +1,6 @@
"""Test Docker interface."""
import asyncio
from http import HTTPStatus
from pathlib import Path
from typing import Any
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, PropertyMock, call, patch
@@ -55,7 +54,7 @@ async def test_docker_image_platform(
coresys.docker.images.inspect.return_value = {"Id": "test:1.2.3"}
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test", arch=cpu_arch)
coresys.docker.images.pull.assert_called_once_with(
"test", tag="1.2.3", platform=platform, auth=None, stream=True, timeout=None
"test", tag="1.2.3", platform=platform, auth=None, stream=True
)
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
@@ -72,12 +71,7 @@ async def test_docker_image_default_platform(
):
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
coresys.docker.images.pull.assert_called_once_with(
"test",
tag="1.2.3",
platform="linux/386",
auth=None,
stream=True,
timeout=None,
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
)
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
@@ -117,12 +111,7 @@ async def test_private_registry_credentials_passed_to_pull(
expected_auth["registry"] = registry_key
coresys.docker.images.pull.assert_called_once_with(
image,
tag="1.2.3",
platform="linux/amd64",
auth=expected_auth,
stream=True,
timeout=None,
image, tag="1.2.3", platform="linux/amd64", auth=expected_auth, stream=True
)
@@ -149,7 +138,7 @@ async def test_current_state(
container_collection = MagicMock()
container_collection.get.return_value = Container(attrs)
with patch(
"supervisor.docker.manager.DockerAPI.containers_legacy",
"supervisor.docker.manager.DockerAPI.containers",
new=PropertyMock(return_value=container_collection),
):
assert await coresys.homeassistant.core.instance.current_state() == expected
@@ -159,7 +148,7 @@ async def test_current_state_failures(coresys: CoreSys):
"""Test failure states for current state."""
container_collection = MagicMock()
with patch(
"supervisor.docker.manager.DockerAPI.containers_legacy",
"supervisor.docker.manager.DockerAPI.containers",
new=PropertyMock(return_value=container_collection),
):
container_collection.get.side_effect = NotFound("dne")
@@ -212,7 +201,7 @@ async def test_attach_existing_container(
container_collection.get.return_value = Container(attrs)
with (
patch(
"supervisor.docker.manager.DockerAPI.containers_legacy",
"supervisor.docker.manager.DockerAPI.containers",
new=PropertyMock(return_value=container_collection),
),
patch.object(type(coresys.bus), "fire_event") as fire_event,
@@ -254,7 +243,7 @@ async def test_attach_existing_container(
async def test_attach_container_failure(coresys: CoreSys):
"""Test attach fails to find container but finds image."""
coresys.docker.containers_legacy.get.side_effect = DockerException()
coresys.docker.containers.get.side_effect = DockerException()
coresys.docker.images.inspect.return_value.setdefault("Config", {})["Image"] = (
"sha256:abc123"
)
@@ -272,7 +261,7 @@ async def test_attach_container_failure(coresys: CoreSys):
async def test_attach_total_failure(coresys: CoreSys):
"""Test attach fails to find container or image."""
coresys.docker.containers_legacy.get.side_effect = DockerException
coresys.docker.containers.get.side_effect = DockerException
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
400, {"message": ""}
)
@@ -305,10 +294,8 @@ async def test_run_missing_image(
tmp_supervisor_data: Path,
):
"""Test run captures the exception when image is missing."""
coresys.docker.containers.create.side_effect = [
aiodocker.DockerError(HTTPStatus.NOT_FOUND, {"message": "missing"}),
MagicMock(),
]
coresys.docker.containers.create.side_effect = [NotFound("missing"), MagicMock()]
container.status = "stopped"
install_addon_ssh.data["image"] = "test_image"
with pytest.raises(DockerNotFound):
@@ -373,12 +360,7 @@ async def test_install_fires_progress_events(
):
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
coresys.docker.images.pull.assert_called_once_with(
"test",
tag="1.2.3",
platform="linux/386",
auth=None,
stream=True,
timeout=None,
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
)
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
@@ -727,11 +709,18 @@ async def test_install_progress_handles_layers_skipping_download(
await install_task
await event.wait()
# First update from layer download should have rather low progress ((260937/25445459) / 2 ~ 0.5%)
assert install_job_snapshots[0]["progress"] < 1
# With the new progress calculation approach:
# - Progress is weighted by layer size
# - Small layers that skip downloading get minimal size (1 byte)
# - Progress should increase monotonically
assert len(install_job_snapshots) > 0
# Total 8 events should lead to a progress update on the install job
assert len(install_job_snapshots) == 8
# Verify progress is monotonically increasing (or stable)
for i in range(1, len(install_job_snapshots)):
assert (
install_job_snapshots[i]["progress"]
>= install_job_snapshots[i - 1]["progress"]
)
# Job should complete successfully
assert job.done is True
@@ -835,12 +824,7 @@ async def test_install_progress_containerd_snapshot(
with patch.object(Supervisor, "arch", PropertyMock(return_value="i386")):
await test_docker_interface.mock_install()
coresys.docker.images.pull.assert_called_once_with(
"test",
tag="1.2.3",
platform="linux/386",
auth=None,
stream=True,
timeout=None,
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
)
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
@@ -867,24 +851,24 @@ async def test_install_progress_containerd_snapshot(
}
assert [c.args[0] for c in ha_ws_client.async_send_command.call_args_list] == [
# During downloading we get continuous progress updates from download status
# Count-based progress: 2 layers, each = 50%. Download = 0-35%, Extract = 35-50%
job_event(0),
job_event(1.7),
job_event(3.4),
job_event(8.5),
job_event(8.4),
job_event(10.2),
job_event(15.3),
job_event(18.8),
job_event(29.0),
job_event(35.8),
job_event(42.6),
job_event(49.5),
job_event(56.0),
job_event(62.8),
# Downloading phase is considered 70% of total. After we only get one update
# per image downloaded when extraction is finished. It uses the total size
# received during downloading to determine percent complete then.
job_event(15.2),
job_event(18.7),
job_event(28.8),
job_event(35.7),
job_event(42.4),
job_event(49.3),
job_event(55.8),
job_event(62.7),
# Downloading phase is considered 70% of layer's progress.
# After download complete, extraction takes remaining 30% per layer.
job_event(70.0),
job_event(84.8),
job_event(85.0),
job_event(100),
job_event(100, True),
]

View File

@@ -2,9 +2,8 @@
import asyncio
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, patch
from unittest.mock import MagicMock, patch
from aiodocker.containers import DockerContainer
from docker.errors import APIError, DockerException, NotFound
import pytest
from requests import RequestException
@@ -140,38 +139,40 @@ async def test_run_command_custom_stdout_stderr(docker: DockerAPI):
assert result.output == b"output"
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
async def test_run_container_with_cidfile(coresys: CoreSys, docker: DockerAPI):
async def test_run_container_with_cidfile(
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
):
"""Test container creation with cidfile and bind mount."""
# Mock container
mock_container = MagicMock(spec=DockerContainer, id="test_container_id_12345")
mock_container.show.return_value = mock_metadata = {"Id": mock_container.id}
mock_container = MagicMock()
mock_container.id = "test_container_id_12345"
container_name = "test_container"
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
extern_cidfile_path = coresys.config.path_extern_cid_files / f"{container_name}.cid"
docker.containers.create.return_value = mock_container
docker.dockerpy.containers.run.return_value = mock_container
# Mock container creation
with patch.object(
docker.containers, "create", return_value=mock_container
) as create_mock:
# Execute run with a container name
result = await docker.run("test_image", tag="latest", name=container_name)
loop = asyncio.get_event_loop()
result = await loop.run_in_executor(
None,
lambda kwrgs: docker.run(**kwrgs),
{"image": "test_image", "tag": "latest", "name": container_name},
)
# Check the container creation parameters
create_mock.assert_called_once()
create_config = create_mock.call_args.args[0]
kwargs = create_mock.call_args[1]
assert "HostConfig" in create_config
assert "Mounts" in create_config["HostConfig"]
assert {
"Type": "bind",
"Source": str(extern_cidfile_path),
"Target": "/run/cid",
"ReadOnly": True,
} in create_config["HostConfig"]["Mounts"]
assert "volumes" in kwargs
assert str(extern_cidfile_path) in kwargs["volumes"]
assert kwargs["volumes"][str(extern_cidfile_path)]["bind"] == "/run/cid"
assert kwargs["volumes"][str(extern_cidfile_path)]["mode"] == "ro"
# Verify container start was called
mock_container.start.assert_called_once()
@@ -180,15 +181,16 @@ async def test_run_container_with_cidfile(coresys: CoreSys, docker: DockerAPI):
assert cidfile_path.exists()
assert cidfile_path.read_text() == mock_container.id
assert result == mock_metadata
assert result == mock_container
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
async def test_run_container_with_leftover_cidfile(coresys: CoreSys, docker: DockerAPI):
async def test_run_container_with_leftover_cidfile(
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
):
"""Test container creation removes leftover cidfile before creating new one."""
# Mock container
mock_container = MagicMock(spec=DockerContainer, id="test_container_id_new")
mock_container.show.return_value = mock_metadata = {"Id": mock_container.id}
mock_container = MagicMock()
mock_container.id = "test_container_id_new"
container_name = "test_container"
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
@@ -201,7 +203,12 @@ async def test_run_container_with_leftover_cidfile(coresys: CoreSys, docker: Doc
docker.containers, "create", return_value=mock_container
) as create_mock:
# Execute run with a container name
result = await docker.run("test_image", tag="latest", name=container_name)
loop = asyncio.get_event_loop()
result = await loop.run_in_executor(
None,
lambda kwrgs: docker.run(**kwrgs),
{"image": "test_image", "tag": "latest", "name": container_name},
)
# Verify container was created
create_mock.assert_called_once()
@@ -210,7 +217,7 @@ async def test_run_container_with_leftover_cidfile(coresys: CoreSys, docker: Doc
assert cidfile_path.exists()
assert cidfile_path.read_text() == mock_container.id
assert result == mock_metadata
assert result == mock_container
async def test_stop_container_with_cidfile_cleanup(
@@ -229,7 +236,7 @@ async def test_stop_container_with_cidfile_cleanup(
# Mock the containers.get method and cidfile cleanup
with (
patch.object(docker.containers_legacy, "get", return_value=mock_container),
patch.object(docker.containers, "get", return_value=mock_container),
):
# Call stop_container with remove_container=True
loop = asyncio.get_event_loop()
@@ -256,7 +263,7 @@ async def test_stop_container_without_removal_no_cidfile_cleanup(docker: DockerA
# Mock the containers.get method and cidfile cleanup
with (
patch.object(docker.containers_legacy, "get", return_value=mock_container),
patch.object(docker.containers, "get", return_value=mock_container),
patch("pathlib.Path.unlink") as mock_unlink,
):
# Call stop_container with remove_container=False
@@ -270,8 +277,9 @@ async def test_stop_container_without_removal_no_cidfile_cleanup(docker: DockerA
mock_unlink.assert_not_called()
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
async def test_cidfile_cleanup_handles_oserror(coresys: CoreSys, docker: DockerAPI):
async def test_cidfile_cleanup_handles_oserror(
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
):
"""Test that cidfile cleanup handles OSError gracefully."""
# Mock container
mock_container = MagicMock()
@@ -285,7 +293,7 @@ async def test_cidfile_cleanup_handles_oserror(coresys: CoreSys, docker: DockerA
# Mock the containers.get method and cidfile cleanup to raise OSError
with (
patch.object(docker.containers_legacy, "get", return_value=mock_container),
patch.object(docker.containers, "get", return_value=mock_container),
patch("pathlib.Path.is_dir", return_value=False),
patch("pathlib.Path.is_file", return_value=True),
patch(
@@ -303,9 +311,8 @@ async def test_cidfile_cleanup_handles_oserror(coresys: CoreSys, docker: DockerA
mock_unlink.assert_called_once_with(missing_ok=True)
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
async def test_run_container_with_leftover_cidfile_directory(
coresys: CoreSys, docker: DockerAPI
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
):
"""Test container creation removes leftover cidfile directory before creating new one.
@@ -314,8 +321,8 @@ async def test_run_container_with_leftover_cidfile_directory(
the bind mount source as a directory.
"""
# Mock container
mock_container = MagicMock(spec=DockerContainer, id="test_container_id_new")
mock_container.show.return_value = mock_metadata = {"Id": mock_container.id}
mock_container = MagicMock()
mock_container.id = "test_container_id_new"
container_name = "test_container"
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
@@ -329,7 +336,12 @@ async def test_run_container_with_leftover_cidfile_directory(
docker.containers, "create", return_value=mock_container
) as create_mock:
# Execute run with a container name
result = await docker.run("test_image", tag="latest", name=container_name)
loop = asyncio.get_event_loop()
result = await loop.run_in_executor(
None,
lambda kwrgs: docker.run(**kwrgs),
{"image": "test_image", "tag": "latest", "name": container_name},
)
# Verify container was created
create_mock.assert_called_once()
@@ -339,7 +351,7 @@ async def test_run_container_with_leftover_cidfile_directory(
assert cidfile_path.is_file()
assert cidfile_path.read_text() == mock_container.id
assert result == mock_metadata
assert result == mock_container
async def test_repair(coresys: CoreSys, caplog: pytest.LogCaptureFixture):
@@ -400,9 +412,9 @@ async def test_repair_failures(coresys: CoreSys, caplog: pytest.LogCaptureFixtur
async def test_import_image(coresys: CoreSys, tmp_path: Path, log_starter: str):
"""Test importing an image into docker."""
(test_tar := tmp_path / "test.tar").touch()
coresys.docker.images.import_image = AsyncMock(
return_value=[{"stream": f"{log_starter}: imported"}]
)
coresys.docker.images.import_image.return_value = [
{"stream": f"{log_starter}: imported"}
]
coresys.docker.images.inspect.return_value = {"Id": "imported"}
image = await coresys.docker.import_image(test_tar)
@@ -414,9 +426,9 @@ async def test_import_image(coresys: CoreSys, tmp_path: Path, log_starter: str):
async def test_import_image_error(coresys: CoreSys, tmp_path: Path):
"""Test failure importing an image into docker."""
(test_tar := tmp_path / "test.tar").touch()
coresys.docker.images.import_image = AsyncMock(
return_value=[{"errorDetail": {"message": "fail"}}]
)
coresys.docker.images.import_image.return_value = [
{"errorDetail": {"message": "fail"}}
]
with pytest.raises(DockerError, match="Can't import image from tar: fail"):
await coresys.docker.import_image(test_tar)
@@ -429,12 +441,10 @@ async def test_import_multiple_images_in_tar(
):
"""Test importing an image into docker."""
(test_tar := tmp_path / "test.tar").touch()
coresys.docker.images.import_image = AsyncMock(
return_value=[
{"stream": "Loaded image: imported-1"},
{"stream": "Loaded image: imported-2"},
]
)
coresys.docker.images.import_image.return_value = [
{"stream": "Loaded image: imported-1"},
{"stream": "Loaded image: imported-2"},
]
assert await coresys.docker.import_image(test_tar) is None

View File

@@ -120,7 +120,7 @@ async def test_unlabeled_container(coresys: CoreSys):
}
)
with patch(
"supervisor.docker.manager.DockerAPI.containers_legacy",
"supervisor.docker.manager.DockerAPI.containers",
new=PropertyMock(return_value=container_collection),
):
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))

View File

@@ -1,16 +1,17 @@
"""Test Observer plugin container."""
from ipaddress import IPv4Address, ip_network
from unittest.mock import MagicMock, patch
from unittest.mock import patch
from docker.types import Mount
from supervisor.coresys import CoreSys
from supervisor.docker.const import DockerMount, MountType
from supervisor.docker.manager import DockerAPI
async def test_start(coresys: CoreSys, container: MagicMock):
async def test_start(coresys: CoreSys):
"""Test starting observer plugin."""
with patch.object(DockerAPI, "run", return_value=container.attrs) as run:
with patch.object(DockerAPI, "run") as run:
await coresys.plugins.observer.start()
run.assert_called_once()
@@ -27,8 +28,8 @@ async def test_start(coresys: CoreSys, container: MagicMock):
)
assert run.call_args.kwargs["ports"] == {"80/tcp": 4357}
assert run.call_args.kwargs["mounts"] == [
DockerMount(
type=MountType.BIND,
Mount(
type="bind",
source="/run/docker.sock",
target="/run/docker.sock",
read_only=True,

View File

@@ -0,0 +1,786 @@
"""Tests for image pull progress tracking."""
import pytest
from supervisor.docker.manager import PullLogEntry, PullProgressDetail
from supervisor.docker.pull_progress import (
DOWNLOAD_WEIGHT,
EXTRACT_WEIGHT,
ImagePullProgress,
LayerProgress,
)
class TestLayerProgress:
"""Tests for LayerProgress class."""
def test_already_exists_layer(self):
"""Test that already existing layer returns 100%."""
layer = LayerProgress(layer_id="abc123", already_exists=True)
assert layer.calculate_progress() == 100.0
def test_extract_complete_layer(self):
"""Test that extracted layer returns 100%."""
layer = LayerProgress(
layer_id="abc123",
total_size=1000,
download_current=1000,
download_complete=True,
extract_complete=True,
)
assert layer.calculate_progress() == 100.0
def test_download_complete_not_extracted(self):
"""Test layer that finished downloading but not extracting."""
layer = LayerProgress(
layer_id="abc123",
total_size=1000,
download_current=1000,
download_complete=True,
extract_complete=False,
)
assert layer.calculate_progress() == DOWNLOAD_WEIGHT # 70%
def test_extraction_progress_overlay2(self):
"""Test layer with byte-based extraction progress (overlay2)."""
layer = LayerProgress(
layer_id="abc123",
total_size=1000,
download_current=1000,
extract_current=500, # 50% extracted
download_complete=True,
extract_complete=False,
)
# 70% + (50% of 30%) = 70% + 15% = 85%
assert layer.calculate_progress() == DOWNLOAD_WEIGHT + (0.5 * EXTRACT_WEIGHT)
def test_downloading_progress(self):
"""Test layer during download phase."""
layer = LayerProgress(
layer_id="abc123",
total_size=1000,
download_current=500, # 50% downloaded
download_complete=False,
)
# 50% of 70% = 35%
assert layer.calculate_progress() == 35.0
def test_no_size_info_yet(self):
"""Test layer with no size information."""
layer = LayerProgress(layer_id="abc123")
assert layer.calculate_progress() == 0.0
class TestImagePullProgress:
"""Tests for ImagePullProgress class."""
def test_empty_progress(self):
"""Test progress with no layers."""
progress = ImagePullProgress()
assert progress.calculate_progress() == 0.0
def test_all_layers_already_exist(self):
"""Test when all layers already exist locally.
When an image is fully cached, there are no "Downloading" events.
Progress stays at 0 until the job completes and sets 100%.
"""
progress = ImagePullProgress()
# Simulate "Already exists" events
entry1 = PullLogEntry(
job_id="test",
id="layer1",
status="Already exists",
progress_detail=PullProgressDetail(),
)
entry2 = PullLogEntry(
job_id="test",
id="layer2",
status="Already exists",
progress_detail=PullProgressDetail(),
)
progress.process_event(entry1)
progress.process_event(entry2)
# No downloading events = no progress reported (job completion sets 100%)
assert progress.calculate_progress() == 0.0
def test_single_layer_download(self):
"""Test progress tracking for single layer download."""
progress = ImagePullProgress()
# Pull fs layer
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
# Start downloading
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Downloading",
progress_detail=PullProgressDetail(current=500, total=1000),
)
)
# 50% of download phase = 35%
assert progress.calculate_progress() == pytest.approx(35.0)
# Download complete
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Download complete",
progress_detail=PullProgressDetail(),
)
)
assert progress.calculate_progress() == 70.0
# Pull complete
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pull complete",
progress_detail=PullProgressDetail(),
)
)
assert progress.calculate_progress() == 100.0
def test_multiple_layers_equal_weight_progress(self):
"""Test count-based progress where each layer contributes equally."""
progress = ImagePullProgress()
# Two layers: sizes don't matter for weight, each layer = 50%
# Pulling fs layer for both
progress.process_event(
PullLogEntry(
job_id="test",
id="large",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
progress.process_event(
PullLogEntry(
job_id="test",
id="small",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
# Large layer: 50% downloaded = 35% layer progress (50% of 70%)
progress.process_event(
PullLogEntry(
job_id="test",
id="large",
status="Downloading",
progress_detail=PullProgressDetail(current=500, total=1000),
)
)
# Small layer: 100% downloaded, waiting for extraction = 70% layer progress
progress.process_event(
PullLogEntry(
job_id="test",
id="small",
status="Download complete",
progress_detail=PullProgressDetail(),
)
)
progress.process_event(
PullLogEntry(
job_id="test",
id="small",
status="Downloading",
progress_detail=PullProgressDetail(current=100, total=100),
)
)
# Progress calculation (count-based, equal weight per layer):
# Large layer: 35% (50% of 70% download weight)
# Small layer: 70% (download complete)
# Each layer = 50% weight
# Total: (35 + 70) / 2 = 52.5%
assert progress.calculate_progress() == pytest.approx(52.5)
def test_download_retry(self):
"""Test that download retry resets progress."""
progress = ImagePullProgress()
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
# Download 50%
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Downloading",
progress_detail=PullProgressDetail(current=500, total=1000),
)
)
assert progress.calculate_progress() == pytest.approx(35.0)
# Retry
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Retrying in 5 seconds",
)
)
assert progress.calculate_progress() == 0.0
def test_layer_skips_download(self):
"""Test small layer that goes straight to Download complete."""
progress = ImagePullProgress()
progress.process_event(
PullLogEntry(
job_id="test",
id="small",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
# Goes directly to Download complete (skipping Downloading events)
progress.process_event(
PullLogEntry(
job_id="test",
id="small",
status="Download complete",
progress_detail=PullProgressDetail(),
)
)
# Should still work - sets minimal size
layer = progress.layers["small"]
assert layer.total_size == 1
assert layer.download_complete is True
def test_containerd_extract_progress(self):
"""Test extraction progress with containerd snapshotter (time-based)."""
progress = ImagePullProgress()
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
# Download complete
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Downloading",
progress_detail=PullProgressDetail(current=1000, total=1000),
)
)
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Download complete",
progress_detail=PullProgressDetail(),
)
)
# Containerd extraction progress (time-based, not byte-based)
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Extracting",
progress_detail=PullProgressDetail(current=5, units="s"),
)
)
# Should be at 70% (download complete, time-based extraction not tracked)
assert progress.calculate_progress() == 70.0
# Pull complete
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pull complete",
progress_detail=PullProgressDetail(),
)
)
assert progress.calculate_progress() == 100.0
def test_overlay2_extract_progress(self):
"""Test extraction progress with overlay2 (byte-based)."""
progress = ImagePullProgress()
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
# Download complete
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Downloading",
progress_detail=PullProgressDetail(current=1000, total=1000),
)
)
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Download complete",
progress_detail=PullProgressDetail(),
)
)
# At download complete, progress should be 70%
assert progress.calculate_progress() == 70.0
# Overlay2 extraction progress (byte-based, 50% extracted)
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Extracting",
progress_detail=PullProgressDetail(current=500, total=1000),
)
)
# Should be at 70% + (50% of 30%) = 85%
assert progress.calculate_progress() == pytest.approx(85.0)
# Extraction continues to 80%
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Extracting",
progress_detail=PullProgressDetail(current=800, total=1000),
)
)
# Should be at 70% + (80% of 30%) = 94%
assert progress.calculate_progress() == pytest.approx(94.0)
# Pull complete
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pull complete",
progress_detail=PullProgressDetail(),
)
)
assert progress.calculate_progress() == 100.0
def test_get_stage(self):
"""Test stage detection."""
progress = ImagePullProgress()
assert progress.get_stage() is None
# Add a layer that needs downloading
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Downloading",
progress_detail=PullProgressDetail(current=500, total=1000),
)
)
assert progress.get_stage() == "Downloading"
# Download complete
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Download complete",
progress_detail=PullProgressDetail(),
)
)
assert progress.get_stage() == "Extracting"
# Pull complete
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pull complete",
progress_detail=PullProgressDetail(),
)
)
assert progress.get_stage() == "Pull complete"
def test_should_update_job(self):
"""Test update threshold logic."""
progress = ImagePullProgress()
# Initial state - no updates
should_update, _ = progress.should_update_job()
assert not should_update
# Add a layer and start downloading
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
# Small progress - 1%
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Downloading",
progress_detail=PullProgressDetail(current=20, total=1000),
)
)
# 2% of download = 1.4% total
should_update, current = progress.should_update_job()
assert should_update
assert current == pytest.approx(1.4)
# Tiny increment - shouldn't trigger update
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Downloading",
progress_detail=PullProgressDetail(current=25, total=1000),
)
)
should_update, _ = progress.should_update_job()
assert not should_update
# Larger increment - should trigger
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Downloading",
progress_detail=PullProgressDetail(current=100, total=1000),
)
)
should_update, _ = progress.should_update_job()
assert should_update
def test_verifying_checksum(self):
"""Test that Verifying Checksum marks download as nearly complete."""
progress = ImagePullProgress()
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Downloading",
progress_detail=PullProgressDetail(current=800, total=1000),
)
)
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Verifying Checksum",
progress_detail=PullProgressDetail(),
)
)
layer = progress.layers["layer1"]
assert layer.download_current == 1000 # Should be set to total
def test_events_without_status_ignored(self):
"""Test that events without status are ignored."""
progress = ImagePullProgress()
# Event without status (just id field)
progress.process_event(
PullLogEntry(
job_id="test",
id="abc123",
)
)
# Event without id
progress.process_event(
PullLogEntry(
job_id="test",
status="Digest: sha256:abc123",
)
)
# They shouldn't create layers or cause errors
assert len(progress.layers) == 0
def test_mixed_already_exists_and_pull(self):
"""Test combination of cached and pulled layers."""
progress = ImagePullProgress()
# Layer 1 already exists
progress.process_event(
PullLogEntry(
job_id="test",
id="cached",
status="Already exists",
progress_detail=PullProgressDetail(),
)
)
# Layer 2 needs to be pulled
progress.process_event(
PullLogEntry(
job_id="test",
id="pulled",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
progress.process_event(
PullLogEntry(
job_id="test",
id="pulled",
status="Downloading",
progress_detail=PullProgressDetail(current=500, total=1000),
)
)
# Only 1 layer needs pulling (cached layer excluded)
# pulled: 35% (50% of 70% download weight)
assert progress.calculate_progress() == pytest.approx(35.0)
# Complete the pulled layer
progress.process_event(
PullLogEntry(
job_id="test",
id="pulled",
status="Download complete",
progress_detail=PullProgressDetail(),
)
)
progress.process_event(
PullLogEntry(
job_id="test",
id="pulled",
status="Pull complete",
progress_detail=PullProgressDetail(),
)
)
assert progress.calculate_progress() == 100.0
def test_pending_layers_prevent_premature_100(self):
"""Test that layers without size info scale down progress."""
progress = ImagePullProgress()
# First batch of layers - they complete
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
progress.process_event(
PullLogEntry(
job_id="test",
id="layer2",
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
# Layer1 downloads and completes
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Downloading",
progress_detail=PullProgressDetail(current=1000, total=1000),
)
)
progress.process_event(
PullLogEntry(
job_id="test",
id="layer1",
status="Pull complete",
progress_detail=PullProgressDetail(),
)
)
# Layer2 is still pending (no size info yet) - simulating Docker rate limiting
# Progress should NOT be 100% because layer2 hasn't started
# Layer1 is 100% complete, layer2 is 0%
# With scaling: 1 known layer at 100%, 1 pending layer
# Scale factor = 1/(1+1) = 0.5, so progress = 100 * 0.5 = 50%
assert progress.calculate_progress() == pytest.approx(50.0)
# Now layer2 starts downloading
progress.process_event(
PullLogEntry(
job_id="test",
id="layer2",
status="Downloading",
progress_detail=PullProgressDetail(current=500, total=1000),
)
)
# Now both layers have size info, no scaling needed
# Layer1: 100%, Layer2: 35% (50% of 70%)
# Weighted by equal size: (100 + 35) / 2 = 67.5%
assert progress.calculate_progress() == pytest.approx(67.5)
# Complete layer2
progress.process_event(
PullLogEntry(
job_id="test",
id="layer2",
status="Pull complete",
progress_detail=PullProgressDetail(),
)
)
assert progress.calculate_progress() == 100.0
def test_large_layers_appearing_late_dont_cause_regression(self):
"""Test that large layers discovered late don't cause progress to drop.
This simulates Docker's rate-limiting behavior where small layers complete
first, then large layers start downloading later.
"""
progress = ImagePullProgress()
# All layers announced upfront (Docker does this)
for layer_id in ["small1", "small2", "big1", "big2"]:
progress.process_event(
PullLogEntry(
job_id="test",
id=layer_id,
status="Pulling fs layer",
progress_detail=PullProgressDetail(),
)
)
# Big layers are "Waiting" (rate limited)
for layer_id in ["big1", "big2"]:
progress.process_event(
PullLogEntry(
job_id="test",
id=layer_id,
status="Waiting",
progress_detail=PullProgressDetail(),
)
)
# Small layers download quickly (1KB each)
for layer_id in ["small1", "small2"]:
progress.process_event(
PullLogEntry(
job_id="test",
id=layer_id,
status="Downloading",
progress_detail=PullProgressDetail(current=1000, total=1000),
)
)
progress.process_event(
PullLogEntry(
job_id="test",
id=layer_id,
status="Pull complete",
progress_detail=PullProgressDetail(),
)
)
# At this point, 2 small layers are complete, 2 big layers are unknown size
progress_before_big = progress.calculate_progress()
# Now big layers start downloading - they're 100MB each!
progress.process_event(
PullLogEntry(
job_id="test",
id="big1",
status="Downloading",
progress_detail=PullProgressDetail(current=1000000, total=100000000),
)
)
progress_after_big1 = progress.calculate_progress()
# Progress should NOT drop significantly when big layer appears
# The monotonic tracking in should_update_job will help, but the
# raw calculation should also not regress too badly
assert progress_after_big1 >= progress_before_big * 0.5, (
f"Progress dropped too much: {progress_before_big} -> {progress_after_big1}"
)
# Second big layer appears
progress.process_event(
PullLogEntry(
job_id="test",
id="big2",
status="Downloading",
progress_detail=PullProgressDetail(current=1000000, total=100000000),
)
)
# Should still make forward progress overall
# Complete all layers
for layer_id in ["big1", "big2"]:
progress.process_event(
PullLogEntry(
job_id="test",
id=layer_id,
status="Pull complete",
progress_detail=PullProgressDetail(),
)
)
assert progress.calculate_progress() == 100.0

View File

@@ -238,7 +238,6 @@ async def test_install_other_error(
@pytest.mark.usefixtures("path_extern")
async def test_start(
coresys: CoreSys,
container: MagicMock,
container_exc: DockerException | None,
image_exc: aiodocker.DockerError | None,
remove_calls: list[call],
@@ -246,8 +245,8 @@ async def test_start(
"""Test starting Home Assistant."""
coresys.docker.images.inspect.return_value = {"Id": "123"}
coresys.docker.images.inspect.side_effect = image_exc
coresys.docker.containers_legacy.get.return_value.id = "123"
coresys.docker.containers_legacy.get.side_effect = container_exc
coresys.docker.containers.get.return_value.id = "123"
coresys.docker.containers.get.side_effect = container_exc
with (
patch.object(
@@ -255,7 +254,7 @@ async def test_start(
"version",
new=PropertyMock(return_value=AwesomeVersion("2023.7.0")),
),
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
patch.object(DockerAPI, "run") as run,
patch.object(HomeAssistantCore, "_block_till_run") as block_till_run,
):
await coresys.homeassistant.core.start()
@@ -269,18 +268,17 @@ async def test_start(
assert run.call_args.kwargs["name"] == "homeassistant"
assert run.call_args.kwargs["hostname"] == "homeassistant"
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
coresys.docker.containers.get.return_value.stop.assert_not_called()
assert (
coresys.docker.containers_legacy.get.return_value.remove.call_args_list
== remove_calls
coresys.docker.containers.get.return_value.remove.call_args_list == remove_calls
)
async def test_start_existing_container(coresys: CoreSys, path_extern):
"""Test starting Home Assistant when container exists and is viable."""
coresys.docker.images.inspect.return_value = {"Id": "123"}
coresys.docker.containers_legacy.get.return_value.image.id = "123"
coresys.docker.containers_legacy.get.return_value.status = "exited"
coresys.docker.containers.get.return_value.image.id = "123"
coresys.docker.containers.get.return_value.status = "exited"
with (
patch.object(
@@ -293,29 +291,29 @@ async def test_start_existing_container(coresys: CoreSys, path_extern):
await coresys.homeassistant.core.start()
block_till_run.assert_called_once()
coresys.docker.containers_legacy.get.return_value.start.assert_called_once()
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
coresys.docker.containers_legacy.get.return_value.remove.assert_not_called()
coresys.docker.containers_legacy.get.return_value.run.assert_not_called()
coresys.docker.containers.get.return_value.start.assert_called_once()
coresys.docker.containers.get.return_value.stop.assert_not_called()
coresys.docker.containers.get.return_value.remove.assert_not_called()
coresys.docker.containers.get.return_value.run.assert_not_called()
@pytest.mark.parametrize("exists", [True, False])
async def test_stop(coresys: CoreSys, exists: bool):
"""Test stoppping Home Assistant."""
if exists:
coresys.docker.containers_legacy.get.return_value.status = "running"
coresys.docker.containers.get.return_value.status = "running"
else:
coresys.docker.containers_legacy.get.side_effect = NotFound("missing")
coresys.docker.containers.get.side_effect = NotFound("missing")
await coresys.homeassistant.core.stop()
coresys.docker.containers_legacy.get.return_value.remove.assert_not_called()
coresys.docker.containers.get.return_value.remove.assert_not_called()
if exists:
coresys.docker.containers_legacy.get.return_value.stop.assert_called_once_with(
coresys.docker.containers.get.return_value.stop.assert_called_once_with(
timeout=260
)
else:
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
coresys.docker.containers.get.return_value.stop.assert_not_called()
async def test_restart(coresys: CoreSys):
@@ -324,20 +322,18 @@ async def test_restart(coresys: CoreSys):
await coresys.homeassistant.core.restart()
block_till_run.assert_called_once()
coresys.docker.containers_legacy.get.return_value.restart.assert_called_once_with(
coresys.docker.containers.get.return_value.restart.assert_called_once_with(
timeout=260
)
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
coresys.docker.containers.get.return_value.stop.assert_not_called()
@pytest.mark.parametrize("get_error", [NotFound("missing"), DockerException(), None])
async def test_restart_failures(coresys: CoreSys, get_error: DockerException | None):
"""Test restart fails when container missing or can't be restarted."""
coresys.docker.containers_legacy.get.return_value.restart.side_effect = (
DockerException()
)
coresys.docker.containers.get.return_value.restart.side_effect = DockerException()
if get_error:
coresys.docker.containers_legacy.get.side_effect = get_error
coresys.docker.containers.get.side_effect = get_error
with pytest.raises(HomeAssistantError):
await coresys.homeassistant.core.restart()
@@ -356,12 +352,10 @@ async def test_stats_failures(
coresys: CoreSys, get_error: DockerException | None, status: str
):
"""Test errors when getting stats."""
coresys.docker.containers_legacy.get.return_value.status = status
coresys.docker.containers_legacy.get.return_value.stats.side_effect = (
DockerException()
)
coresys.docker.containers.get.return_value.status = status
coresys.docker.containers.get.return_value.stats.side_effect = DockerException()
if get_error:
coresys.docker.containers_legacy.get.side_effect = get_error
coresys.docker.containers.get.side_effect = get_error
with pytest.raises(HomeAssistantError):
await coresys.homeassistant.core.stats()
@@ -393,7 +387,7 @@ async def test_api_check_timeout(
):
await coresys.homeassistant.core.start()
assert coresys.homeassistant.api.get_api_state.call_count == 10
assert coresys.homeassistant.api.get_api_state.call_count == 3
assert (
"No Home Assistant Core response, assuming a fatal startup error" in caplog.text
)

View File

@@ -200,8 +200,6 @@ async def test_notify_on_change(coresys: CoreSys, ha_ws_client: AsyncMock):
"type": "HassioError",
"message": "Unknown error, see Supervisor logs (check with 'ha supervisor logs')",
"stage": "test",
"error_key": None,
"extra_fields": None,
}
],
"created": ANY,
@@ -230,8 +228,6 @@ async def test_notify_on_change(coresys: CoreSys, ha_ws_client: AsyncMock):
"type": "HassioError",
"message": "Unknown error, see Supervisor logs (check with 'ha supervisor logs')",
"stage": "test",
"error_key": None,
"extra_fields": None,
}
],
"created": ANY,

View File

@@ -1,6 +1,7 @@
"""Test base plugin functionality."""
import asyncio
from pathlib import Path
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, call, patch
from awesomeversion import AwesomeVersion
@@ -158,13 +159,15 @@ async def test_plugin_watchdog(coresys: CoreSys, plugin: PluginBase) -> None:
],
indirect=["plugin"],
)
@pytest.mark.usefixtures("coresys", "tmp_supervisor_data", "path_extern")
async def test_plugin_watchdog_max_failed_attempts(
coresys: CoreSys,
capture_exception: Mock,
plugin: PluginBase,
error: PluginError,
container: MagicMock,
caplog: pytest.LogCaptureFixture,
tmp_supervisor_data: Path,
path_extern,
) -> None:
"""Test plugin watchdog gives up after max failed attempts."""
with patch.object(type(plugin.instance), "attach"):

View File

@@ -76,7 +76,7 @@ async def test_check(
docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon, folder: str
):
"""Test check reports issue when containers have incorrect config."""
docker.containers_legacy.get = _make_mock_container_get(
docker.containers.get = _make_mock_container_get(
["homeassistant", "hassio_audio", "addon_local_ssh"], folder
)
# Use state used in setup()
@@ -132,7 +132,7 @@ async def test_check(
assert await docker_config.approve_check()
# IF config issue is resolved, all issues are removed except the main one. Which will be removed if check isn't approved
docker.containers_legacy.get = _make_mock_container_get([])
docker.containers.get = _make_mock_container_get([])
with patch.object(DockerInterface, "is_running", return_value=True):
await coresys.plugins.load()
await coresys.homeassistant.load()
@@ -159,7 +159,7 @@ async def test_addon_volume_mount_not_flagged(
] # No media/share
# Mock container that has VOLUME mount to media/share with wrong propagation
docker.containers_legacy.get = _make_mock_container_get_with_volume_mount(
docker.containers.get = _make_mock_container_get_with_volume_mount(
["addon_local_ssh"], folder
)
@@ -221,7 +221,7 @@ async def test_addon_configured_mount_still_flagged(
out.attrs["Mounts"].append(mount)
return out
docker.containers_legacy.get = mock_container_get
docker.containers.get = mock_container_get
await coresys.core.set_state(CoreState.SETUP)
with patch.object(DockerInterface, "is_running", return_value=True):
@@ -275,7 +275,7 @@ async def test_addon_custom_target_path_flagged(
out.attrs["Mounts"].append(mount)
return out
docker.containers_legacy.get = mock_container_get
docker.containers.get = mock_container_get
await coresys.core.set_state(CoreState.SETUP)
with patch.object(DockerInterface, "is_running", return_value=True):

View File

@@ -30,7 +30,7 @@ async def test_evaluation(coresys: CoreSys):
assert container.reason not in coresys.resolution.unsupported
assert UnhealthyReason.DOCKER not in coresys.resolution.unhealthy
coresys.docker.containers_legacy.list.return_value = [
coresys.docker.containers.list.return_value = [
_make_image_attr("armhfbuild/watchtower:latest"),
_make_image_attr("concerco/watchtowerv6:10.0.2"),
_make_image_attr("containrrr/watchtower:1.1"),
@@ -47,7 +47,7 @@ async def test_evaluation(coresys: CoreSys):
"pyouroboros/ouroboros:1.4.3",
}
coresys.docker.containers_legacy.list.return_value = []
coresys.docker.containers.list.return_value = []
await container()
assert container.reason not in coresys.resolution.unsupported
@@ -62,7 +62,7 @@ async def test_corrupt_docker(coresys: CoreSys):
corrupt_docker = Issue(IssueType.CORRUPT_DOCKER, ContextType.SYSTEM)
assert corrupt_docker not in coresys.resolution.issues
coresys.docker.containers_legacy.list.side_effect = DockerException
coresys.docker.containers.list.side_effect = DockerException
await container()
assert corrupt_docker in coresys.resolution.issues

View File

@@ -33,7 +33,7 @@ async def test_evaluation(coresys: CoreSys, install_addon_ssh: Addon):
meta.attrs = observer_attrs if name == "hassio_observer" else addon_attrs
return meta
coresys.docker.containers_legacy.get = get_container
coresys.docker.containers.get = get_container
await coresys.plugins.observer.instance.attach(TEST_VERSION)
await install_addon_ssh.instance.attach(TEST_VERSION)

View File

@@ -31,7 +31,7 @@ async def _mock_wait_for_container() -> None:
async def test_fixup(docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon):
"""Test fixup rebuilds addon's container."""
docker.containers_legacy.get = make_mock_container_get("running")
docker.containers.get = make_mock_container_get("running")
addon_execute_rebuild = FixupAddonExecuteRebuild(coresys)
@@ -61,7 +61,7 @@ async def test_fixup_stopped_core(
):
"""Test fixup just removes addon's container when it is stopped."""
caplog.clear()
docker.containers_legacy.get = make_mock_container_get("stopped")
docker.containers.get = make_mock_container_get("stopped")
addon_execute_rebuild = FixupAddonExecuteRebuild(coresys)
coresys.resolution.create_issue(
@@ -76,7 +76,7 @@ async def test_fixup_stopped_core(
assert not coresys.resolution.issues
assert not coresys.resolution.suggestions
docker.containers_legacy.get("addon_local_ssh").remove.assert_called_once_with(
docker.containers.get("addon_local_ssh").remove.assert_called_once_with(
force=True, v=True
)
assert "Addon local_ssh is stopped" in caplog.text
@@ -90,7 +90,7 @@ async def test_fixup_unknown_core(
):
"""Test fixup does nothing if addon's container has already been removed."""
caplog.clear()
docker.containers_legacy.get.side_effect = NotFound("")
docker.containers.get.side_effect = NotFound("")
addon_execute_rebuild = FixupAddonExecuteRebuild(coresys)
coresys.resolution.create_issue(

Some files were not shown because too many files have changed in this diff Show More