Compare commits

..

3 Commits

Author SHA1 Message Date
Stefan Agner
66a3766b5a Merge branch 'main' into remove-deprecated-info-fields 2025-10-08 15:28:32 +02:00
Mike Degatano
7031a58083 Keep addons until core can be refactored 2025-10-02 17:48:40 +00:00
Mike Degatano
3c0e62f6ba Remove deprecated fields and options from Supervisor API 2025-10-02 17:48:38 +00:00
156 changed files with 2956 additions and 6497 deletions

View File

@@ -1,7 +1,6 @@
# General files
.git
.github
.gitkeep
.devcontainer
.vscode

View File

@@ -34,9 +34,6 @@ on:
env:
DEFAULT_PYTHON: "3.13"
COSIGN_VERSION: "v2.5.3"
CRANE_VERSION: "v0.20.7"
CRANE_SHA256: "8ef3564d264e6b5ca93f7b7f5652704c4dd29d33935aff6947dd5adefd05953e"
BUILD_NAME: supervisor
BUILD_TYPE: supervisor
@@ -53,10 +50,10 @@ jobs:
version: ${{ steps.version.outputs.version }}
channel: ${{ steps.version.outputs.channel }}
publish: ${{ steps.version.outputs.publish }}
build_wheels: ${{ steps.requirements.outputs.build_wheels }}
requirements: ${{ steps.requirements.outputs.changed }}
steps:
- name: Checkout the repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
@@ -72,25 +69,20 @@ jobs:
- name: Get changed files
id: changed_files
if: github.event_name != 'release'
if: steps.version.outputs.publish == 'false'
uses: masesgroup/retrieve-changed-files@491e80760c0e28d36ca6240a27b1ccb8e1402c13 # v3.0.0
- name: Check if requirements files changed
id: requirements
run: |
# No wheels build necessary for releases
if [[ "${{ github.event_name }}" == "release" ]]; then
echo "build_wheels=false" >> "$GITHUB_OUTPUT"
elif [[ "${{ steps.changed_files.outputs.all }}" =~ (requirements\.txt|build\.yaml|\.github/workflows/builder\.yml) ]]; then
echo "build_wheels=true" >> "$GITHUB_OUTPUT"
else
echo "build_wheels=false" >> "$GITHUB_OUTPUT"
if [[ "${{ steps.changed_files.outputs.all }}" =~ (requirements.txt|build.yaml) ]]; then
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
build:
name: Build ${{ matrix.arch }} supervisor
needs: init
runs-on: ${{ matrix.runs-on }}
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
@@ -98,66 +90,34 @@ jobs:
strategy:
matrix:
arch: ${{ fromJson(needs.init.outputs.architectures) }}
include:
- runs-on: ubuntu-24.04
- runs-on: ubuntu-24.04-arm
arch: aarch64
env:
WHEELS_ABI: cp313
WHEELS_TAG: musllinux_1_2
WHEELS_APK_DEPS: "libffi-dev;openssl-dev;yaml-dev"
WHEELS_SKIP_BINARY: aiohttp
steps:
- name: Checkout the repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
- name: Write env-file for wheels build
if: needs.init.outputs.build_wheels == 'true'
- name: Write env-file
if: needs.init.outputs.requirements == 'true'
run: |
(
# Fix out of memory issues with rust
echo "CARGO_NET_GIT_FETCH_WITH_CLI=true"
) > .env_file
- name: Build and publish wheels
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'true'
uses: home-assistant/wheels@e5742a69d69f0e274e2689c998900c7d19652c21 # 2025.12.0
# home-assistant/wheels doesn't support sha pinning
- name: Build wheels
if: needs.init.outputs.requirements == 'true'
uses: home-assistant/wheels@2025.09.1
with:
abi: cp313
tag: musllinux_1_2
arch: ${{ matrix.arch }}
wheels-key: ${{ secrets.WHEELS_KEY }}
abi: ${{ env.WHEELS_ABI }}
tag: ${{ env.WHEELS_TAG }}
arch: ${{ matrix.arch }}
apk: ${{ env.WHEELS_APK_DEPS }}
skip-binary: ${{ env.WHEELS_SKIP_BINARY }}
apk: "libffi-dev;openssl-dev;yaml-dev"
skip-binary: aiohttp
env-file: true
requirements: "requirements.txt"
- name: Build local wheels
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
uses: home-assistant/wheels@e5742a69d69f0e274e2689c998900c7d19652c21 # 2025.12.0
with:
wheels-host: ""
wheels-user: ""
wheels-key: ""
local-wheels-repo-path: "wheels/"
abi: ${{ env.WHEELS_ABI }}
tag: ${{ env.WHEELS_TAG }}
arch: ${{ matrix.arch }}
apk: ${{ env.WHEELS_APK_DEPS }}
skip-binary: ${{ env.WHEELS_SKIP_BINARY }}
env-file: true
requirements: "requirements.txt"
- name: Upload local wheels artifact
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: wheels-${{ matrix.arch }}
path: wheels
retention-days: 1
- name: Set version
if: needs.init.outputs.publish == 'true'
uses: home-assistant/actions/helpers/version@master
@@ -166,15 +126,15 @@ jobs:
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
if: needs.init.outputs.publish == 'true'
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: ${{ env.DEFAULT_PYTHON }}
- name: Install Cosign
if: needs.init.outputs.publish == 'true'
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0
with:
cosign-release: ${{ env.COSIGN_VERSION }}
cosign-release: "v2.5.3"
- name: Install dirhash and calc hash
if: needs.init.outputs.publish == 'true'
@@ -202,24 +162,25 @@ jobs:
# home-assistant/builder doesn't support sha pinning
- name: Build supervisor
uses: home-assistant/builder@2025.11.0
uses: home-assistant/builder@2025.09.0
with:
image: ${{ matrix.arch }}
args: |
$BUILD_ARGS \
--${{ matrix.arch }} \
--target /data \
--cosign \
--generic ${{ needs.init.outputs.version }}
env:
CAS_API_KEY: ${{ secrets.CAS_TOKEN }}
version:
name: Update version
needs: ["init", "run_supervisor", "retag_deprecated"]
needs: ["init", "run_supervisor"]
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
if: needs.init.outputs.publish == 'true'
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Initialize git
if: needs.init.outputs.publish == 'true'
@@ -244,19 +205,12 @@ jobs:
timeout-minutes: 60
steps:
- name: Checkout the repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Download local wheels artifact
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
with:
name: wheels-amd64
path: wheels
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
# home-assistant/builder doesn't support sha pinning
- name: Build the Supervisor
if: needs.init.outputs.publish != 'true'
uses: home-assistant/builder@2025.11.0
uses: home-assistant/builder@2025.09.0
with:
args: |
--test \
@@ -339,6 +293,33 @@ jobs:
exit 1
fi
- name: Check the Supervisor code sign
if: needs.init.outputs.publish == 'true'
run: |
echo "Enable Content-Trust"
test=$(docker exec hassio_cli ha security options --content-trust=true --no-progress --raw-json | jq -r '.result')
if [ "$test" != "ok" ]; then
exit 1
fi
echo "Run supervisor health check"
test=$(docker exec hassio_cli ha resolution healthcheck --no-progress --raw-json | jq -r '.result')
if [ "$test" != "ok" ]; then
exit 1
fi
echo "Check supervisor unhealthy"
test=$(docker exec hassio_cli ha resolution info --no-progress --raw-json | jq -r '.data.unhealthy[]')
if [ "$test" != "" ]; then
exit 1
fi
echo "Check supervisor supported"
test=$(docker exec hassio_cli ha resolution info --no-progress --raw-json | jq -r '.data.unsupported[]')
if [[ "$test" =~ source_mods ]]; then
exit 1
fi
- name: Create full backup
id: backup
run: |
@@ -400,50 +381,3 @@ jobs:
- name: Get supervisor logs on failiure
if: ${{ cancelled() || failure() }}
run: docker logs hassio_supervisor
retag_deprecated:
needs: ["build", "init"]
name: Re-tag deprecated ${{ matrix.arch }} images
if: needs.init.outputs.publish == 'true'
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
packages: write
strategy:
matrix:
arch: ["armhf", "armv7", "i386"]
env:
# Last available release for deprecated architectures
FROZEN_VERSION: "2025.11.5"
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install Cosign
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
with:
cosign-release: ${{ env.COSIGN_VERSION }}
- name: Install crane
run: |
curl -sLO https://github.com/google/go-containerregistry/releases/download/${{ env.CRANE_VERSION }}/go-containerregistry_Linux_x86_64.tar.gz
echo "${{ env.CRANE_SHA256 }} go-containerregistry_Linux_x86_64.tar.gz" | sha256sum -c -
tar xzf go-containerregistry_Linux_x86_64.tar.gz crane
sudo mv crane /usr/local/bin/
- name: Re-tag deprecated image with updated version label
run: |
crane auth login ghcr.io -u ${{ github.repository_owner }} -p ${{ secrets.GITHUB_TOKEN }}
crane mutate \
--label io.hass.version=${{ needs.init.outputs.version }} \
--tag ghcr.io/home-assistant/${{ matrix.arch }}-hassio-supervisor:${{ needs.init.outputs.version }} \
ghcr.io/home-assistant/${{ matrix.arch }}-hassio-supervisor:${{ env.FROZEN_VERSION }}
- name: Sign image with Cosign
run: |
cosign sign --yes ghcr.io/home-assistant/${{ matrix.arch }}-hassio-supervisor:${{ needs.init.outputs.version }}

View File

@@ -26,10 +26,10 @@ jobs:
name: Prepare Python dependencies
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python
id: python
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: ${{ env.DEFAULT_PYTHON }}
- name: Restore Python virtual environment
@@ -68,9 +68,9 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -111,9 +111,9 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -154,7 +154,7 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Register hadolint problem matcher
run: |
echo "::add-matcher::.github/workflows/matchers/hadolint.json"
@@ -169,9 +169,9 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -213,9 +213,9 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -257,9 +257,9 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -293,9 +293,9 @@ jobs:
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -339,14 +339,14 @@ jobs:
name: Run tests Python ${{ needs.prepare.outputs.python-version }}
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Install Cosign
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0
with:
cosign-release: "v2.5.3"
- name: Restore Python virtual environment
@@ -386,7 +386,7 @@ jobs:
-o console_output_style=count \
tests
- name: Upload coverage artifact
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: coverage
path: .coverage
@@ -398,9 +398,9 @@ jobs:
needs: ["pytest", "prepare"]
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
@@ -417,7 +417,7 @@ jobs:
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Download all coverage artifacts
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
with:
name: coverage
path: coverage/

View File

@@ -11,7 +11,7 @@ jobs:
name: Release Drafter
steps:
- name: Checkout the repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0

View File

@@ -10,9 +10,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code from GitHub
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Sentry Release
uses: getsentry/action-release@128c5058bbbe93c8e02147fe0a9c713f166259a6 # v3.4.0
uses: getsentry/action-release@4f502acc1df792390abe36f2dcb03612ef144818 # v3.3.0
env:
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}

View File

@@ -9,14 +9,13 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-stale: 30
days-before-close: 7
stale-issue-label: "stale"
exempt-issue-labels: "no-stale,Help%20wanted,help-wanted,pinned,rfc,security"
only-issue-types: "bug"
stale-issue-message: >
There hasn't been any activity on this issue recently. Due to the
high number of incoming GitHub notifications, we have to clean some

View File

@@ -14,7 +14,7 @@ jobs:
latest_version: ${{ steps.latest_frontend_version.outputs.latest_tag }}
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Get latest frontend release
id: latest_frontend_version
uses: abatilo/release-info-action@32cb932219f1cee3fc4f4a298fd65ead5d35b661 # v1.3.3
@@ -49,7 +49,7 @@ jobs:
if: needs.check-version.outputs.skip != 'true'
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Clear www folder
run: |
rm -rf supervisor/api/panel/*
@@ -68,7 +68,7 @@ jobs:
run: |
rm -f supervisor/api/panel/home_assistant_frontend_supervisor-*.tar.gz
- name: Create PR
uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
commit-message: "Update frontend to version ${{ needs.check-version.outputs.latest_version }}"
branch: autoupdate-frontend

5
.gitignore vendored
View File

@@ -24,9 +24,6 @@ var/
.installed.cfg
*.egg
# Local wheels
wheels/**/*.whl
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
@@ -105,4 +102,4 @@ ENV/
/.dmypy.json
# Mac
.DS_Store
.DS_Store

View File

@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.14.3
rev: v0.11.10
hooks:
- id: ruff
args:

View File

@@ -8,7 +8,9 @@ ENV \
UV_SYSTEM_PYTHON=true
ARG \
COSIGN_VERSION
COSIGN_VERSION \
BUILD_ARCH \
QEMU_CPU
# Install base
WORKDIR /usr/src
@@ -30,19 +32,15 @@ RUN \
&& pip3 install uv==0.8.9
# Install requirements
COPY requirements.txt .
RUN \
--mount=type=bind,source=./requirements.txt,target=/usr/src/requirements.txt \
--mount=type=bind,source=./wheels,target=/usr/src/wheels \
if ls /usr/src/wheels/musllinux/* >/dev/null 2>&1; then \
LOCAL_WHEELS=/usr/src/wheels/musllinux; \
echo "Using local wheels from: $LOCAL_WHEELS"; \
if [ "${BUILD_ARCH}" = "i386" ]; then \
setarch="linux32"; \
else \
LOCAL_WHEELS=; \
echo "No local wheels found"; \
fi && \
uv pip install --compile-bytecode --no-cache --no-build \
-r requirements.txt \
${LOCAL_WHEELS:+--find-links $LOCAL_WHEELS}
setarch=""; \
fi \
&& ${setarch} uv pip install --compile-bytecode --no-cache --no-build -r requirements.txt \
&& rm -f requirements.txt
# Install Home Assistant Supervisor
COPY . supervisor

View File

@@ -1,7 +1,13 @@
image: ghcr.io/home-assistant/{arch}-hassio-supervisor
build_from:
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.13-alpine3.22-2025.11.1
amd64: ghcr.io/home-assistant/amd64-base-python:3.13-alpine3.22-2025.11.1
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.13-alpine3.22
armhf: ghcr.io/home-assistant/armhf-base-python:3.13-alpine3.22
armv7: ghcr.io/home-assistant/armv7-base-python:3.13-alpine3.22
amd64: ghcr.io/home-assistant/amd64-base-python:3.13-alpine3.22
i386: ghcr.io/home-assistant/i386-base-python:3.13-alpine3.22
codenotary:
signer: notary@home-assistant.io
base_image: notary@home-assistant.io
cosign:
base_identity: https://github.com/home-assistant/docker-base/.*
identity: https://github.com/home-assistant/supervisor/.*

View File

@@ -321,6 +321,8 @@ lint.ignore = [
"PLW2901", # Outer {outer_kind} variable {name} overwritten by inner {inner_kind} target
"UP006", # keep type annotation style as is
"UP007", # keep type annotation style as is
# Ignored due to performance: https://github.com/charliermarsh/ruff/issues/2923
"UP038", # Use `X | Y` in `isinstance` call instead of `(X, Y)`
# May conflict with the formatter, https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules
"W191",

View File

@@ -1,16 +1,14 @@
aiodns==3.5.0
aiodocker==0.24.0
aiohttp==3.13.2
aiohttp==3.13.0
atomicwrites-homeassistant==1.4.1
attrs==25.4.0
awesomeversion==25.8.0
backports.zstd==1.1.0
blockbuster==1.5.26
brotli==1.2.0
blockbuster==1.5.25
brotli==1.1.0
ciso8601==2.3.3
colorlog==6.10.1
colorlog==6.9.0
cpe==1.3.1
cryptography==46.0.3
cryptography==46.0.2
debugpy==1.8.17
deepmerge==2.0
dirhash==0.5.0
@@ -19,14 +17,14 @@ faust-cchardet==2.1.19
gitpython==3.1.45
jinja2==3.1.6
log-rate-limit==1.4.2
orjson==3.11.4
orjson==3.11.3
pulsectl==24.12.0
pyudev==0.24.4
pyudev==0.24.3
PyYAML==6.0.3
requests==2.32.5
securetar==2025.12.0
sentry-sdk==2.47.0
securetar==2025.2.1
sentry-sdk==2.40.0
setuptools==80.9.0
voluptuous==0.15.2
dbus-fast==3.1.2
dbus-fast==2.44.5
zlib-fast==0.2.1

View File

@@ -1,16 +1,16 @@
astroid==4.0.2
coverage==7.12.0
mypy==1.19.0
pre-commit==4.5.0
pylint==4.0.4
astroid==3.3.11
coverage==7.10.7
mypy==1.18.2
pre-commit==4.3.0
pylint==3.3.9
pytest-aiohttp==1.1.0
pytest-asyncio==1.3.0
pytest-asyncio==0.25.2
pytest-cov==7.0.0
pytest-timeout==2.4.0
pytest==9.0.1
ruff==0.14.8
time-machine==3.1.0
types-docker==7.1.0.20251202
pytest==8.4.2
ruff==0.14.0
time-machine==2.19.0
types-docker==7.1.0.20250916
types-pyyaml==6.0.12.20250915
types-requests==2.32.4.20250913
urllib3==2.6.0
urllib3==2.5.0

View File

@@ -66,22 +66,13 @@ from ..docker.const import ContainerState
from ..docker.monitor import DockerContainerStateEvent
from ..docker.stats import DockerStats
from ..exceptions import (
AddonBackupMetadataInvalidError,
AddonBuildFailedUnknownError,
AddonConfigurationInvalidError,
AddonNotRunningError,
AddonConfigurationError,
AddonNotSupportedError,
AddonNotSupportedWriteStdinError,
AddonPrePostBackupCommandReturnedError,
AddonsError,
AddonsJobError,
AddonUnknownError,
BackupRestoreUnknownError,
ConfigurationFileError,
DockerBuildError,
DockerError,
HostAppArmorError,
StoreAddonNotFoundError,
)
from ..hardware.data import Device
from ..homeassistant.const import WSEvent
@@ -244,7 +235,7 @@ class Addon(AddonModel):
await self.instance.check_image(self.version, default_image, self.arch)
except DockerError:
_LOGGER.info("No %s addon Docker image %s found", self.slug, self.image)
with suppress(DockerError, AddonNotSupportedError):
with suppress(DockerError):
await self.instance.install(self.version, default_image, arch=self.arch)
self.persist[ATTR_IMAGE] = default_image
@@ -727,16 +718,18 @@ class Addon(AddonModel):
options = self.schema.validate(self.options)
await self.sys_run_in_executor(write_json_file, self.path_options, options)
except vol.Invalid as ex:
raise AddonConfigurationInvalidError(
_LOGGER.error,
addon=self.slug,
validation_error=humanize_error(self.options, ex),
) from None
except ConfigurationFileError as err:
_LOGGER.error(
"Add-on %s has invalid options: %s",
self.slug,
humanize_error(self.options, ex),
)
except ConfigurationFileError:
_LOGGER.error("Add-on %s can't write options", self.slug)
raise AddonUnknownError(addon=self.slug) from err
else:
_LOGGER.debug("Add-on %s write options: %s", self.slug, options)
return
_LOGGER.debug("Add-on %s write options: %s", self.slug, options)
raise AddonConfigurationError()
@Job(
name="addon_unload",
@@ -779,7 +772,7 @@ class Addon(AddonModel):
async def install(self) -> None:
"""Install and setup this addon."""
if not self.addon_store:
raise StoreAddonNotFoundError(addon=self.slug)
raise AddonsError("Missing from store, cannot install!")
await self.sys_addons.data.install(self.addon_store)
@@ -800,17 +793,9 @@ class Addon(AddonModel):
await self.instance.install(
self.latest_version, self.addon_store.image, arch=self.arch
)
except AddonsError:
await self.sys_addons.data.uninstall(self)
raise
except DockerBuildError as err:
_LOGGER.error("Could not build image for addon %s: %s", self.slug, err)
await self.sys_addons.data.uninstall(self)
raise AddonBuildFailedUnknownError(addon=self.slug) from err
except DockerError as err:
_LOGGER.error("Could not pull image to update addon %s: %s", self.slug, err)
await self.sys_addons.data.uninstall(self)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
# Finish initialization and set up listeners
await self.load()
@@ -834,8 +819,7 @@ class Addon(AddonModel):
try:
await self.instance.remove(remove_image=remove_image)
except DockerError as err:
_LOGGER.error("Could not remove image for addon %s: %s", self.slug, err)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
self.state = AddonState.UNKNOWN
@@ -900,7 +884,7 @@ class Addon(AddonModel):
if it was running. Else nothing is returned.
"""
if not self.addon_store:
raise StoreAddonNotFoundError(addon=self.slug)
raise AddonsError("Missing from store, cannot update!")
old_image = self.image
# Cache data to prevent races with other updates to global
@@ -908,12 +892,8 @@ class Addon(AddonModel):
try:
await self.instance.update(store.version, store.image, arch=self.arch)
except DockerBuildError as err:
_LOGGER.error("Could not build image for addon %s: %s", self.slug, err)
raise AddonBuildFailedUnknownError(addon=self.slug) from err
except DockerError as err:
_LOGGER.error("Could not pull image to update addon %s: %s", self.slug, err)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
# Stop the addon if running
if (last_state := self.state) in {AddonState.STARTED, AddonState.STARTUP}:
@@ -955,23 +935,12 @@ class Addon(AddonModel):
"""
last_state: AddonState = self.state
try:
# remove docker container and image but not addon config
# remove docker container but not addon config
try:
await self.instance.remove()
except DockerError as err:
_LOGGER.error("Could not remove image for addon %s: %s", self.slug, err)
raise AddonUnknownError(addon=self.slug) from err
try:
await self.instance.install(self.version)
except DockerBuildError as err:
_LOGGER.error("Could not build image for addon %s: %s", self.slug, err)
raise AddonBuildFailedUnknownError(addon=self.slug) from err
except DockerError as err:
_LOGGER.error(
"Could not pull image to update addon %s: %s", self.slug, err
)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
if self.addon_store:
await self.sys_addons.data.update(self.addon_store)
@@ -1142,9 +1111,8 @@ class Addon(AddonModel):
try:
await self.instance.run()
except DockerError as err:
_LOGGER.error("Could not start container for addon %s: %s", self.slug, err)
self.state = AddonState.ERROR
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
return self.sys_create_task(self._wait_for_startup())
@@ -1159,9 +1127,8 @@ class Addon(AddonModel):
try:
await self.instance.stop()
except DockerError as err:
_LOGGER.error("Could not stop container for addon %s: %s", self.slug, err)
self.state = AddonState.ERROR
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
@Job(
name="addon_restart",
@@ -1194,15 +1161,9 @@ class Addon(AddonModel):
async def stats(self) -> DockerStats:
"""Return stats of container."""
try:
if not await self.is_running():
raise AddonNotRunningError(_LOGGER.warning, addon=self.slug)
return await self.instance.stats()
except DockerError as err:
_LOGGER.error(
"Could not get stats of container for addon %s: %s", self.slug, err
)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
@Job(
name="addon_write_stdin",
@@ -1212,18 +1173,14 @@ class Addon(AddonModel):
async def write_stdin(self, data) -> None:
"""Write data to add-on stdin."""
if not self.with_stdin:
raise AddonNotSupportedWriteStdinError(_LOGGER.error, addon=self.slug)
raise AddonNotSupportedError(
f"Add-on {self.slug} does not support writing to stdin!", _LOGGER.error
)
try:
if not await self.is_running():
raise AddonNotRunningError(_LOGGER.warning, addon=self.slug)
await self.instance.write_stdin(data)
return await self.instance.write_stdin(data)
except DockerError as err:
_LOGGER.error(
"Could not write stdin to container for addon %s: %s", self.slug, err
)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
async def _backup_command(self, command: str) -> None:
try:
@@ -1232,14 +1189,15 @@ class Addon(AddonModel):
_LOGGER.debug(
"Pre-/Post backup command failed with: %s", command_return.output
)
raise AddonPrePostBackupCommandReturnedError(
_LOGGER.error, addon=self.slug, exit_code=command_return.exit_code
raise AddonsError(
f"Pre-/Post backup command returned error code: {command_return.exit_code}",
_LOGGER.error,
)
except DockerError as err:
_LOGGER.error(
"Failed running pre-/post backup command %s: %s", command, err
)
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError(
f"Failed running pre-/post backup command {command}: {str(err)}",
_LOGGER.error,
) from err
@Job(
name="addon_begin_backup",
@@ -1328,14 +1286,15 @@ class Addon(AddonModel):
try:
self.instance.export_image(temp_path.joinpath("image.tar"))
except DockerError as err:
raise BackupRestoreUnknownError() from err
raise AddonsError() from err
# Store local configs/state
try:
write_json_file(temp_path.joinpath("addon.json"), metadata)
except ConfigurationFileError as err:
_LOGGER.error("Can't save meta for %s: %s", self.slug, err)
raise BackupRestoreUnknownError() from err
raise AddonsError(
f"Can't save meta for {self.slug}", _LOGGER.error
) from err
# Store AppArmor Profile
if apparmor_profile:
@@ -1345,7 +1304,9 @@ class Addon(AddonModel):
apparmor_profile, profile_backup_file
)
except HostAppArmorError as err:
raise BackupRestoreUnknownError() from err
raise AddonsError(
"Can't backup AppArmor profile", _LOGGER.error
) from err
# Write tarfile
with tar_file as backup:
@@ -1399,8 +1360,7 @@ class Addon(AddonModel):
)
_LOGGER.info("Finish backup for addon %s", self.slug)
except (tarfile.TarError, OSError, AddFileError) as err:
_LOGGER.error("Can't write backup tarfile for addon %s: %s", self.slug, err)
raise BackupRestoreUnknownError() from err
raise AddonsError(f"Can't write tarfile: {err}", _LOGGER.error) from err
finally:
if was_running:
wait_for_start = await self.end_backup()
@@ -1442,24 +1402,28 @@ class Addon(AddonModel):
try:
tmp, data = await self.sys_run_in_executor(_extract_tarfile)
except tarfile.TarError as err:
_LOGGER.error("Can't extract backup tarfile for %s: %s", self.slug, err)
raise BackupRestoreUnknownError() from err
raise AddonsError(
f"Can't read tarfile {tar_file}: {err}", _LOGGER.error
) from err
except ConfigurationFileError as err:
raise AddonUnknownError(addon=self.slug) from err
raise AddonsError() from err
try:
# Validate
try:
data = SCHEMA_ADDON_BACKUP(data)
except vol.Invalid as err:
raise AddonBackupMetadataInvalidError(
raise AddonsError(
f"Can't validate {self.slug}, backup data: {humanize_error(data, err)}",
_LOGGER.error,
addon=self.slug,
validation_error=humanize_error(data, err),
) from err
# Validate availability. Raises if not
self._validate_availability(data[ATTR_SYSTEM], logger=_LOGGER.error)
# If available
if not self._available(data[ATTR_SYSTEM]):
raise AddonNotSupportedError(
f"Add-on {self.slug} is not available for this platform",
_LOGGER.error,
)
# Restore local add-on information
_LOGGER.info("Restore config for addon %s", self.slug)
@@ -1518,10 +1482,9 @@ class Addon(AddonModel):
try:
await self.sys_run_in_executor(_restore_data)
except shutil.Error as err:
_LOGGER.error(
"Can't restore origin data for %s: %s", self.slug, err
)
raise BackupRestoreUnknownError() from err
raise AddonsError(
f"Can't restore origin data: {err}", _LOGGER.error
) from err
# Restore AppArmor
profile_file = Path(tmp.name, "apparmor.txt")
@@ -1532,11 +1495,10 @@ class Addon(AddonModel):
)
except HostAppArmorError as err:
_LOGGER.error(
"Can't restore AppArmor profile for add-on %s: %s",
"Can't restore AppArmor profile for add-on %s",
self.slug,
err,
)
raise BackupRestoreUnknownError() from err
raise AddonsError() from err
finally:
# Is add-on loaded
@@ -1551,6 +1513,13 @@ class Addon(AddonModel):
_LOGGER.info("Finished restore for add-on %s", self.slug)
return wait_for_start
def check_trust(self) -> Awaitable[None]:
"""Calculate Addon docker content trust.
Return Coroutine.
"""
return self.instance.check_trust()
@Job(
name="addon_restart_after_problem",
throttle_period=WATCHDOG_THROTTLE_PERIOD,
@@ -1593,15 +1562,7 @@ class Addon(AddonModel):
)
break
# Exponential backoff to spread retries over the throttle window
delay = WATCHDOG_RETRY_SECONDS * (1 << max(attempts - 1, 0))
_LOGGER.debug(
"Watchdog will retry addon %s in %s seconds (attempt %s)",
self.name,
delay,
attempts + 1,
)
await asyncio.sleep(delay)
await asyncio.sleep(WATCHDOG_RETRY_SECONDS)
async def container_state_changed(self, event: DockerContainerStateEvent) -> None:
"""Set addon state from container state."""

View File

@@ -2,10 +2,7 @@
from __future__ import annotations
import base64
from functools import cached_property
import json
import logging
from pathlib import Path
from typing import TYPE_CHECKING, Any
@@ -15,31 +12,20 @@ from ..const import (
ATTR_ARGS,
ATTR_BUILD_FROM,
ATTR_LABELS,
ATTR_PASSWORD,
ATTR_SQUASH,
ATTR_USERNAME,
FILE_SUFFIX_CONFIGURATION,
META_ADDON,
SOCKET_DOCKER,
CpuArch,
)
from ..coresys import CoreSys, CoreSysAttributes
from ..docker.const import DOCKER_HUB, DOCKER_HUB_LEGACY
from ..docker.interface import MAP_ARCH
from ..exceptions import (
AddonBuildArchitectureNotSupportedError,
AddonBuildDockerfileMissingError,
ConfigurationFileError,
HassioArchNotFound,
)
from ..exceptions import ConfigurationFileError, HassioArchNotFound
from ..utils.common import FileConfiguration, find_one_filetype
from .validate import SCHEMA_BUILD_CONFIG
if TYPE_CHECKING:
from .manager import AnyAddon
_LOGGER: logging.Logger = logging.getLogger(__name__)
class AddonBuild(FileConfiguration, CoreSysAttributes):
"""Handle build options for add-ons."""
@@ -76,7 +62,7 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
raise RuntimeError()
@cached_property
def arch(self) -> CpuArch:
def arch(self) -> str:
"""Return arch of the add-on."""
return self.sys_arch.match([self.addon.arch])
@@ -120,7 +106,7 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
return self.addon.path_location.joinpath(f"Dockerfile.{self.arch}")
return self.addon.path_location.joinpath("Dockerfile")
async def is_valid(self) -> None:
async def is_valid(self) -> bool:
"""Return true if the build env is valid."""
def build_is_valid() -> bool:
@@ -132,58 +118,12 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
)
try:
if not await self.sys_run_in_executor(build_is_valid):
raise AddonBuildDockerfileMissingError(
_LOGGER.error, addon=self.addon.slug
)
return await self.sys_run_in_executor(build_is_valid)
except HassioArchNotFound:
raise AddonBuildArchitectureNotSupportedError(
_LOGGER.error,
addon=self.addon.slug,
addon_arch_list=self.addon.supported_arch,
system_arch_list=[arch.value for arch in self.sys_arch.supported],
) from None
def get_docker_config_json(self) -> str | None:
"""Generate Docker config.json content with registry credentials for base image.
Returns a JSON string with registry credentials for the base image's registry,
or None if no matching registry is configured.
Raises:
HassioArchNotFound: If the add-on is not supported on the current architecture.
"""
# Early return before accessing base_image to avoid unnecessary arch lookup
if not self.sys_docker.config.registries:
return None
registry = self.sys_docker.config.get_registry_for_image(self.base_image)
if not registry:
return None
stored = self.sys_docker.config.registries[registry]
username = stored[ATTR_USERNAME]
password = stored[ATTR_PASSWORD]
# Docker config.json uses base64-encoded "username:password" for auth
auth_string = base64.b64encode(f"{username}:{password}".encode()).decode()
# Use the actual registry URL for the key
# Docker Hub uses "https://index.docker.io/v1/" as the key
# Support both docker.io (official) and hub.docker.com (legacy)
registry_key = (
"https://index.docker.io/v1/"
if registry in (DOCKER_HUB, DOCKER_HUB_LEGACY)
else registry
)
config = {"auths": {registry_key: {"auth": auth_string}}}
return json.dumps(config)
return False
def get_docker_args(
self, version: AwesomeVersion, image_tag: str, docker_config_path: Path | None
self, version: AwesomeVersion, image_tag: str
) -> dict[str, Any]:
"""Create a dict with Docker run args."""
dockerfile_path = self.get_dockerfile().relative_to(self.addon.path_location)
@@ -232,24 +172,12 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
self.addon.path_location
)
volumes = {
SOCKET_DOCKER: {"bind": "/var/run/docker.sock", "mode": "rw"},
addon_extern_path: {"bind": "/addon", "mode": "ro"},
}
# Mount Docker config with registry credentials if available
if docker_config_path:
docker_config_extern_path = self.sys_config.local_to_extern_path(
docker_config_path
)
volumes[docker_config_extern_path] = {
"bind": "/root/.docker/config.json",
"mode": "ro",
}
return {
"command": build_cmd,
"volumes": volumes,
"volumes": {
SOCKET_DOCKER: {"bind": "/var/run/docker.sock", "mode": "rw"},
addon_extern_path: {"bind": "/addon", "mode": "ro"},
},
"working_dir": "/addon",
}

View File

@@ -87,7 +87,6 @@ from ..const import (
AddonBootConfig,
AddonStage,
AddonStartup,
CpuArch,
)
from ..coresys import CoreSys
from ..docker.const import Capabilities
@@ -104,6 +103,7 @@ from .configuration import FolderMapping
from .const import (
ATTR_BACKUP,
ATTR_BREAKING_VERSIONS,
ATTR_CODENOTARY,
ATTR_PATH,
ATTR_READ_ONLY,
AddonBackupMode,
@@ -316,12 +316,12 @@ class AddonModel(JobGroup, ABC):
@property
def panel_title(self) -> str:
"""Return panel title for Ingress frame."""
"""Return panel icon for Ingress frame."""
return self.data.get(ATTR_PANEL_TITLE, self.name)
@property
def panel_admin(self) -> bool:
"""Return if panel is only available for admin users."""
def panel_admin(self) -> str:
"""Return panel icon for Ingress frame."""
return self.data[ATTR_PANEL_ADMIN]
@property
@@ -489,7 +489,7 @@ class AddonModel(JobGroup, ABC):
return self.data[ATTR_DEVICETREE]
@property
def with_tmpfs(self) -> bool:
def with_tmpfs(self) -> str | None:
"""Return if tmp is in memory of add-on."""
return self.data[ATTR_TMPFS]
@@ -509,7 +509,7 @@ class AddonModel(JobGroup, ABC):
return self.data[ATTR_VIDEO]
@property
def homeassistant_version(self) -> AwesomeVersion | None:
def homeassistant_version(self) -> str | None:
"""Return min Home Assistant version they needed by Add-on."""
return self.data.get(ATTR_HOMEASSISTANT)
@@ -549,7 +549,7 @@ class AddonModel(JobGroup, ABC):
return self.data.get(ATTR_MACHINE, [])
@property
def arch(self) -> CpuArch:
def arch(self) -> str:
"""Return architecture to use for the addon's image."""
if ATTR_IMAGE in self.data:
return self.sys_arch.match(self.data[ATTR_ARCH])
@@ -632,8 +632,13 @@ class AddonModel(JobGroup, ABC):
@property
def signed(self) -> bool:
"""Currently no signing support."""
return False
"""Return True if the image is signed."""
return ATTR_CODENOTARY in self.data
@property
def codenotary(self) -> str | None:
"""Return Signer email address for CAS."""
return self.data.get(ATTR_CODENOTARY)
@property
def breaking_versions(self) -> list[AwesomeVersion]:

View File

@@ -75,7 +75,7 @@ class AddonOptions(CoreSysAttributes):
"""Create a schema for add-on options."""
return vol.Schema(vol.All(dict, self))
def __call__(self, struct: dict[str, Any]) -> dict[str, Any]:
def __call__(self, struct):
"""Create schema validator for add-ons options."""
options = {}
@@ -193,7 +193,9 @@ class AddonOptions(CoreSysAttributes):
f"Fatal error for option '{key}' with type '{typ}' in {self._name} ({self._slug})"
) from None
def _nested_validate_list(self, typ: Any, data_list: Any, key: str) -> list[Any]:
def _nested_validate_list(
self, typ: Any, data_list: list[Any], key: str
) -> list[Any]:
"""Validate nested items."""
options = []
@@ -211,7 +213,7 @@ class AddonOptions(CoreSysAttributes):
return options
def _nested_validate_dict(
self, typ: dict[Any, Any], data_dict: Any, key: str
self, typ: dict[Any, Any], data_dict: dict[Any, Any], key: str
) -> dict[Any, Any]:
"""Validate nested items."""
options = {}
@@ -262,7 +264,7 @@ class UiOptions(CoreSysAttributes):
def __init__(self, coresys: CoreSys) -> None:
"""Initialize UI option render."""
self.coresys: CoreSys = coresys
self.coresys = coresys
def __call__(self, raw_schema: dict[str, Any]) -> list[dict[str, Any]]:
"""Generate UI schema."""
@@ -277,10 +279,10 @@ class UiOptions(CoreSysAttributes):
def _ui_schema_element(
self,
ui_schema: list[dict[str, Any]],
value: str | list[Any] | dict[str, Any],
value: str,
key: str,
multiple: bool = False,
) -> None:
):
if isinstance(value, list):
# nested value list
assert not multiple

View File

@@ -207,12 +207,6 @@ def _warn_addon_config(config: dict[str, Any]):
name,
)
if ATTR_CODENOTARY in config:
_LOGGER.warning(
"Add-on '%s' uses deprecated 'codenotary' field in config. This field is no longer used and will be ignored. Please report this to the maintainer.",
name,
)
return config
@@ -423,6 +417,7 @@ _SCHEMA_ADDON_CONFIG = vol.Schema(
vol.Optional(ATTR_BACKUP, default=AddonBackupMode.HOT): vol.Coerce(
AddonBackupMode
),
vol.Optional(ATTR_CODENOTARY): vol.Email(),
vol.Optional(ATTR_OPTIONS, default={}): dict,
vol.Optional(ATTR_SCHEMA, default={}): vol.Any(
vol.Schema({str: SCHEMA_ELEMENT}),

View File

@@ -152,7 +152,6 @@ class RestAPI(CoreSysAttributes):
self._api_host.advanced_logs,
identifier=syslog_identifier,
latest=True,
no_colors=True,
),
),
web.get(
@@ -450,7 +449,6 @@ class RestAPI(CoreSysAttributes):
await async_capture_exception(err)
kwargs.pop("follow", None) # Follow is not supported for Docker logs
kwargs.pop("latest", None) # Latest is not supported for Docker logs
kwargs.pop("no_colors", None) # no_colors not supported for Docker logs
return await api_supervisor.logs(*args, **kwargs)
self.webapp.add_routes(
@@ -462,7 +460,7 @@ class RestAPI(CoreSysAttributes):
),
web.get(
"/supervisor/logs/latest",
partial(get_supervisor_logs, latest=True, no_colors=True),
partial(get_supervisor_logs, latest=True),
),
web.get("/supervisor/logs/boots/{bootid}", get_supervisor_logs),
web.get(
@@ -578,7 +576,7 @@ class RestAPI(CoreSysAttributes):
),
web.get(
"/addons/{addon}/logs/latest",
partial(get_addon_logs, latest=True, no_colors=True),
partial(get_addon_logs, latest=True),
),
web.get("/addons/{addon}/logs/boots/{bootid}", get_addon_logs),
web.get(
@@ -813,10 +811,6 @@ class RestAPI(CoreSysAttributes):
self.webapp.add_routes(
[
web.get("/docker/info", api_docker.info),
web.post(
"/docker/migrate-storage-driver",
api_docker.migrate_docker_storage_driver,
),
web.post("/docker/options", api_docker.options),
web.get("/docker/registries", api_docker.registries),
web.post("/docker/registries", api_docker.create_registry),

View File

@@ -100,9 +100,6 @@ from ..const import (
from ..coresys import CoreSysAttributes
from ..docker.stats import DockerStats
from ..exceptions import (
AddonBootConfigCannotChangeError,
AddonConfigurationInvalidError,
AddonNotSupportedWriteStdinError,
APIAddonNotInstalled,
APIError,
APIForbidden,
@@ -128,7 +125,6 @@ SCHEMA_OPTIONS = vol.Schema(
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
vol.Optional(ATTR_INGRESS_PANEL): vol.Boolean(),
vol.Optional(ATTR_WATCHDOG): vol.Boolean(),
vol.Optional(ATTR_OPTIONS): vol.Maybe(dict),
}
)
@@ -304,24 +300,19 @@ class APIAddons(CoreSysAttributes):
# Update secrets for validation
await self.sys_homeassistant.secrets.reload()
# Extend schema with add-on specific validation
addon_schema = SCHEMA_OPTIONS.extend(
{vol.Optional(ATTR_OPTIONS): vol.Maybe(addon.schema)}
)
# Validate/Process Body
body = await api_validate(SCHEMA_OPTIONS, request)
body = await api_validate(addon_schema, request)
if ATTR_OPTIONS in body:
# None resets options to defaults, otherwise validate the options
if body[ATTR_OPTIONS] is None:
addon.options = None
else:
try:
addon.options = addon.schema(body[ATTR_OPTIONS])
except vol.Invalid as ex:
raise AddonConfigurationInvalidError(
addon=addon.slug,
validation_error=humanize_error(body[ATTR_OPTIONS], ex),
) from None
addon.options = body[ATTR_OPTIONS]
if ATTR_BOOT in body:
if addon.boot_config == AddonBootConfig.MANUAL_ONLY:
raise AddonBootConfigCannotChangeError(
addon=addon.slug, boot_config=addon.boot_config.value
raise APIError(
f"Addon {addon.slug} boot option is set to {addon.boot_config} so it cannot be changed"
)
addon.boot = body[ATTR_BOOT]
if ATTR_AUTO_UPDATE in body:
@@ -394,7 +385,7 @@ class APIAddons(CoreSysAttributes):
return data
@api_process
async def options_config(self, request: web.Request) -> dict[str, Any]:
async def options_config(self, request: web.Request) -> None:
"""Validate user options for add-on."""
slug: str = request.match_info["addon"]
if slug != "self":
@@ -439,11 +430,11 @@ class APIAddons(CoreSysAttributes):
}
@api_process
async def uninstall(self, request: web.Request) -> None:
async def uninstall(self, request: web.Request) -> Awaitable[None]:
"""Uninstall add-on."""
addon = self.get_addon_for_request(request)
body: dict[str, Any] = await api_validate(SCHEMA_UNINSTALL, request)
await asyncio.shield(
return await asyncio.shield(
self.sys_addons.uninstall(
addon.slug, remove_config=body[ATTR_REMOVE_CONFIG]
)
@@ -485,7 +476,7 @@ class APIAddons(CoreSysAttributes):
"""Write to stdin of add-on."""
addon = self.get_addon_for_request(request)
if not addon.with_stdin:
raise AddonNotSupportedWriteStdinError(_LOGGER.error, addon=addon.slug)
raise APIError(f"STDIN not supported the {addon.slug} add-on")
data = await request.read()
await asyncio.shield(addon.write_stdin(data))

View File

@@ -15,7 +15,7 @@ import voluptuous as vol
from ..addons.addon import Addon
from ..const import ATTR_NAME, ATTR_PASSWORD, ATTR_USERNAME, REQUEST_FROM
from ..coresys import CoreSysAttributes
from ..exceptions import APIForbidden, AuthInvalidNonStringValueError
from ..exceptions import APIForbidden
from .const import (
ATTR_GROUP_IDS,
ATTR_IS_ACTIVE,
@@ -69,9 +69,7 @@ class APIAuth(CoreSysAttributes):
try:
_ = username.encode and password.encode # type: ignore
except AttributeError:
raise AuthInvalidNonStringValueError(
_LOGGER.error, headers=REALM_HEADER
) from None
raise HTTPUnauthorized(headers=REALM_HEADER) from None
return self.sys_auth.check_login(
addon, cast(str, username), cast(str, password)

View File

@@ -211,7 +211,7 @@ class APIBackups(CoreSysAttributes):
await self.sys_backups.save_data()
@api_process
async def reload(self, _: web.Request) -> bool:
async def reload(self, _):
"""Reload backup list."""
await asyncio.shield(self.sys_backups.reload())
return True
@@ -421,7 +421,7 @@ class APIBackups(CoreSysAttributes):
await self.sys_backups.remove(backup, locations=locations)
@api_process
async def download(self, request: web.Request) -> web.StreamResponse:
async def download(self, request: web.Request):
"""Download a backup file."""
backup = self._extract_slug(request)
# Query will give us '' for /backups, convert value to None
@@ -451,7 +451,7 @@ class APIBackups(CoreSysAttributes):
return response
@api_process
async def upload(self, request: web.Request) -> dict[str, str] | bool:
async def upload(self, request: web.Request):
"""Upload a backup file."""
location: LOCATION_TYPE = None
locations: list[LOCATION_TYPE] | None = None

View File

@@ -4,7 +4,6 @@ import logging
from typing import Any
from aiohttp import web
from awesomeversion import AwesomeVersion
import voluptuous as vol
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
@@ -17,7 +16,6 @@ from ..const import (
ATTR_PASSWORD,
ATTR_REGISTRIES,
ATTR_STORAGE,
ATTR_STORAGE_DRIVER,
ATTR_USERNAME,
ATTR_VERSION,
)
@@ -44,18 +42,12 @@ SCHEMA_OPTIONS = vol.Schema(
}
)
SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER = vol.Schema(
{
vol.Required(ATTR_STORAGE_DRIVER): vol.In(["overlayfs"]),
}
)
class APIDocker(CoreSysAttributes):
"""Handle RESTful API for Docker configuration."""
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
async def info(self, request: web.Request):
"""Get docker info."""
data_registries = {}
for hostname, registry in self.sys_docker.config.registries.items():
@@ -113,7 +105,7 @@ class APIDocker(CoreSysAttributes):
return {ATTR_REGISTRIES: data_registries}
@api_process
async def create_registry(self, request: web.Request) -> None:
async def create_registry(self, request: web.Request):
"""Create a new docker registry."""
body = await api_validate(SCHEMA_DOCKER_REGISTRY, request)
@@ -123,7 +115,7 @@ class APIDocker(CoreSysAttributes):
await self.sys_docker.config.save_data()
@api_process
async def remove_registry(self, request: web.Request) -> None:
async def remove_registry(self, request: web.Request):
"""Delete a docker registry."""
hostname = request.match_info.get(ATTR_HOSTNAME)
if hostname not in self.sys_docker.config.registries:
@@ -131,27 +123,3 @@ class APIDocker(CoreSysAttributes):
del self.sys_docker.config.registries[hostname]
await self.sys_docker.config.save_data()
@api_process
async def migrate_docker_storage_driver(self, request: web.Request) -> None:
"""Migrate Docker storage driver."""
if (
not self.coresys.os.available
or not self.coresys.os.version
or self.coresys.os.version < AwesomeVersion("17.0.dev0")
):
raise APINotFound(
"Home Assistant OS 17.0 or newer required for Docker storage driver migration"
)
body = await api_validate(SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER, request)
await self.sys_dbus.agent.system.migrate_docker_storage_driver(
body[ATTR_STORAGE_DRIVER]
)
_LOGGER.info("Host system reboot required to apply Docker storage migration")
self.sys_resolution.create_issue(
IssueType.REBOOT_REQUIRED,
ContextType.SYSTEM,
suggestions=[SuggestionType.EXECUTE_REBOOT],
)

View File

@@ -154,7 +154,7 @@ class APIHomeAssistant(CoreSysAttributes):
await self.sys_homeassistant.save_data()
@api_process
async def stats(self, request: web.Request) -> dict[str, Any]:
async def stats(self, request: web.Request) -> dict[Any, str]:
"""Return resource information."""
stats = await self.sys_homeassistant.core.stats()
if not stats:
@@ -191,7 +191,7 @@ class APIHomeAssistant(CoreSysAttributes):
return await update_task
@api_process
async def stop(self, request: web.Request) -> None:
async def stop(self, request: web.Request) -> Awaitable[None]:
"""Stop Home Assistant."""
body = await api_validate(SCHEMA_STOP, request)
await self._check_offline_migration(force=body[ATTR_FORCE])

View File

@@ -1,7 +1,6 @@
"""Init file for Supervisor host RESTful API."""
import asyncio
from collections.abc import Awaitable
from contextlib import suppress
import json
import logging
@@ -100,7 +99,7 @@ class APIHost(CoreSysAttributes):
)
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
async def info(self, request):
"""Return host information."""
return {
ATTR_AGENT_VERSION: self.sys_dbus.agent.version,
@@ -129,7 +128,7 @@ class APIHost(CoreSysAttributes):
}
@api_process
async def options(self, request: web.Request) -> None:
async def options(self, request):
"""Edit host settings."""
body = await api_validate(SCHEMA_OPTIONS, request)
@@ -140,7 +139,7 @@ class APIHost(CoreSysAttributes):
)
@api_process
async def reboot(self, request: web.Request) -> None:
async def reboot(self, request):
"""Reboot host."""
body = await api_validate(SCHEMA_SHUTDOWN, request)
await self._check_ha_offline_migration(force=body[ATTR_FORCE])
@@ -148,7 +147,7 @@ class APIHost(CoreSysAttributes):
return await asyncio.shield(self.sys_host.control.reboot())
@api_process
async def shutdown(self, request: web.Request) -> None:
async def shutdown(self, request):
"""Poweroff host."""
body = await api_validate(SCHEMA_SHUTDOWN, request)
await self._check_ha_offline_migration(force=body[ATTR_FORCE])
@@ -156,12 +155,12 @@ class APIHost(CoreSysAttributes):
return await asyncio.shield(self.sys_host.control.shutdown())
@api_process
def reload(self, request: web.Request) -> Awaitable[None]:
def reload(self, request):
"""Reload host data."""
return asyncio.shield(self.sys_host.reload())
@api_process
async def services(self, request: web.Request) -> dict[str, Any]:
async def services(self, request):
"""Return list of available services."""
services = []
for unit in self.sys_host.services:
@@ -176,7 +175,7 @@ class APIHost(CoreSysAttributes):
return {ATTR_SERVICES: services}
@api_process
async def list_boots(self, _: web.Request) -> dict[str, Any]:
async def list_boots(self, _: web.Request):
"""Return a list of boot IDs."""
boot_ids = await self.sys_host.logs.get_boot_ids()
return {
@@ -187,7 +186,7 @@ class APIHost(CoreSysAttributes):
}
@api_process
async def list_identifiers(self, _: web.Request) -> dict[str, list[str]]:
async def list_identifiers(self, _: web.Request):
"""Return a list of syslog identifiers."""
return {ATTR_IDENTIFIERS: await self.sys_host.logs.get_identifiers()}
@@ -207,7 +206,6 @@ class APIHost(CoreSysAttributes):
identifier: str | None = None,
follow: bool = False,
latest: bool = False,
no_colors: bool = False,
) -> web.StreamResponse:
"""Return systemd-journald logs."""
log_formatter = LogFormatter.PLAIN
@@ -253,9 +251,6 @@ class APIHost(CoreSysAttributes):
if "verbose" in request.query or request.headers[ACCEPT] == CONTENT_TYPE_X_LOG:
log_formatter = LogFormatter.VERBOSE
if "no_colors" in request.query:
no_colors = True
if "lines" in request.query:
lines = request.query.get("lines", DEFAULT_LINES)
try:
@@ -285,9 +280,7 @@ class APIHost(CoreSysAttributes):
response = web.StreamResponse()
response.content_type = CONTENT_TYPE_TEXT
headers_returned = False
async for cursor, line in journal_logs_reader(
resp, log_formatter, no_colors
):
async for cursor, line in journal_logs_reader(resp, log_formatter):
try:
if not headers_returned:
if cursor:
@@ -325,15 +318,12 @@ class APIHost(CoreSysAttributes):
identifier: str | None = None,
follow: bool = False,
latest: bool = False,
no_colors: bool = False,
) -> web.StreamResponse:
"""Return systemd-journald logs. Wrapped as standard API handler."""
return await self.advanced_logs_handler(
request, identifier, follow, latest, no_colors
)
return await self.advanced_logs_handler(request, identifier, follow, latest)
@api_process
async def disk_usage(self, request: web.Request) -> dict[str, Any]:
async def disk_usage(self, request: web.Request) -> dict:
"""Return a breakdown of storage usage for the system."""
max_depth = request.query.get(ATTR_MAX_DEPTH, 1)
@@ -344,14 +334,10 @@ class APIHost(CoreSysAttributes):
disk = self.sys_hardware.disk
total, _, free = await self.sys_run_in_executor(
total, used, _ = await self.sys_run_in_executor(
disk.disk_usage, self.sys_config.path_supervisor
)
# Calculate used by subtracting free makes sure we include reserved space
# in used space reporting.
used = total - free
known_paths = await self.sys_run_in_executor(
disk.get_dir_sizes,
{

View File

@@ -253,28 +253,18 @@ class APIIngress(CoreSysAttributes):
skip_auto_headers={hdrs.CONTENT_TYPE},
) as result:
headers = _response_header(result)
# Avoid parsing content_type in simple cases for better performance
if maybe_content_type := result.headers.get(hdrs.CONTENT_TYPE):
content_type = (maybe_content_type.partition(";"))[0].strip()
else:
content_type = result.content_type
# Empty body responses (304, 204, HEAD, etc.) should not be streamed,
# otherwise aiohttp < 3.9.0 may generate an invalid "0\r\n\r\n" chunk
# This also avoids setting content_type for empty responses.
if must_be_empty_body(request.method, result.status):
# If upstream contains content-type, preserve it (e.g. for HEAD requests)
if maybe_content_type:
headers[hdrs.CONTENT_TYPE] = content_type
return web.Response(
headers=headers,
status=result.status,
)
# Simple request
if (
hdrs.CONTENT_LENGTH in result.headers
# empty body responses should not be streamed,
# otherwise aiohttp < 3.9.0 may generate
# an invalid "0\r\n\r\n" chunk instead of an empty response.
must_be_empty_body(request.method, result.status)
or hdrs.CONTENT_LENGTH in result.headers
and int(result.headers.get(hdrs.CONTENT_LENGTH, 0)) < 4_194_000
):
# Return Response

View File

@@ -1,12 +1,12 @@
"""Handle security part of this API."""
from collections.abc import Awaitable, Callable
from collections.abc import Callable
import logging
import re
from typing import Final
from urllib.parse import unquote
from aiohttp.web import Request, StreamResponse, middleware
from aiohttp.web import Request, Response, middleware
from aiohttp.web_exceptions import HTTPBadRequest, HTTPForbidden, HTTPUnauthorized
from awesomeversion import AwesomeVersion
@@ -89,7 +89,7 @@ CORE_ONLY_PATHS: Final = re.compile(
)
# Policy role add-on API access
ADDONS_ROLE_ACCESS: dict[str, re.Pattern[str]] = {
ADDONS_ROLE_ACCESS: dict[str, re.Pattern] = {
ROLE_DEFAULT: re.compile(
r"^(?:"
r"|/.+/info"
@@ -180,9 +180,7 @@ class SecurityMiddleware(CoreSysAttributes):
return unquoted
@middleware
async def block_bad_requests(
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
) -> StreamResponse:
async def block_bad_requests(self, request: Request, handler: Callable) -> Response:
"""Process request and tblock commonly known exploit attempts."""
if FILTERS.search(self._recursive_unquote(request.path)):
_LOGGER.warning(
@@ -200,9 +198,7 @@ class SecurityMiddleware(CoreSysAttributes):
return await handler(request)
@middleware
async def system_validation(
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
) -> StreamResponse:
async def system_validation(self, request: Request, handler: Callable) -> Response:
"""Check if core is ready to response."""
if self.sys_core.state not in VALID_API_STATES:
return api_return_error(
@@ -212,9 +208,7 @@ class SecurityMiddleware(CoreSysAttributes):
return await handler(request)
@middleware
async def token_validation(
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
) -> StreamResponse:
async def token_validation(self, request: Request, handler: Callable) -> Response:
"""Check security access of this layer."""
request_from: CoreSysAttributes | None = None
supervisor_token = extract_supervisor_token(request)
@@ -285,9 +279,7 @@ class SecurityMiddleware(CoreSysAttributes):
raise HTTPForbidden()
@middleware
async def core_proxy(
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
) -> StreamResponse:
async def core_proxy(self, request: Request, handler: Callable) -> Response:
"""Validate user from Core API proxy."""
if (
request[REQUEST_FROM] != self.sys_homeassistant

View File

@@ -1,20 +1,24 @@
"""Init file for Supervisor Security RESTful API."""
import asyncio
import logging
from typing import Any
from aiohttp import web
import attr
import voluptuous as vol
from supervisor.exceptions import APIGone
from ..const import ATTR_FORCE_SECURITY, ATTR_PWNED
from ..const import ATTR_CONTENT_TRUST, ATTR_FORCE_SECURITY, ATTR_PWNED
from ..coresys import CoreSysAttributes
from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
SCHEMA_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_PWNED): vol.Boolean(),
vol.Optional(ATTR_CONTENT_TRUST): vol.Boolean(),
vol.Optional(ATTR_FORCE_SECURITY): vol.Boolean(),
}
)
@@ -27,6 +31,7 @@ class APISecurity(CoreSysAttributes):
async def info(self, request: web.Request) -> dict[str, Any]:
"""Return Security information."""
return {
ATTR_CONTENT_TRUST: self.sys_security.content_trust,
ATTR_PWNED: self.sys_security.pwned,
ATTR_FORCE_SECURITY: self.sys_security.force,
}
@@ -38,6 +43,8 @@ class APISecurity(CoreSysAttributes):
if ATTR_PWNED in body:
self.sys_security.pwned = body[ATTR_PWNED]
if ATTR_CONTENT_TRUST in body:
self.sys_security.content_trust = body[ATTR_CONTENT_TRUST]
if ATTR_FORCE_SECURITY in body:
self.sys_security.force = body[ATTR_FORCE_SECURITY]
@@ -47,9 +54,6 @@ class APISecurity(CoreSysAttributes):
@api_process
async def integrity_check(self, request: web.Request) -> dict[str, Any]:
"""Run backend integrity check.
CodeNotary integrity checking has been removed. This endpoint now returns
an error indicating the feature is gone.
"""
raise APIGone("Integrity check feature has been removed.")
"""Run backend integrity check."""
result = await asyncio.shield(self.sys_security.integrity_check())
return attr.asdict(result)

View File

@@ -1,9 +1,5 @@
"""Init file for Supervisor network RESTful API."""
from typing import Any
from aiohttp import web
from ..const import (
ATTR_AVAILABLE,
ATTR_PROVIDERS,
@@ -29,7 +25,7 @@ class APIServices(CoreSysAttributes):
return service
@api_process
async def list_services(self, request: web.Request) -> dict[str, Any]:
async def list_services(self, request):
"""Show register services."""
services = []
for service in self.sys_services.list_services:
@@ -44,7 +40,7 @@ class APIServices(CoreSysAttributes):
return {ATTR_SERVICES: services}
@api_process
async def set_service(self, request: web.Request) -> None:
async def set_service(self, request):
"""Write data into a service."""
service = self._extract_service(request)
body = await api_validate(service.schema, request)
@@ -54,7 +50,7 @@ class APIServices(CoreSysAttributes):
await service.set_service_data(addon, body)
@api_process
async def get_service(self, request: web.Request) -> dict[str, Any]:
async def get_service(self, request):
"""Read data into a service."""
service = self._extract_service(request)
@@ -66,7 +62,7 @@ class APIServices(CoreSysAttributes):
return service.get_service_data()
@api_process
async def del_service(self, request: web.Request) -> None:
async def del_service(self, request):
"""Delete data into a service."""
service = self._extract_service(request)
addon = request[REQUEST_FROM]

View File

@@ -53,7 +53,7 @@ from ..const import (
REQUEST_FROM,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError, APIForbidden, APINotFound, StoreAddonNotFoundError
from ..exceptions import APIError, APIForbidden, APINotFound
from ..store.addon import AddonStore
from ..store.repository import Repository
from ..store.validate import validate_repository
@@ -104,7 +104,7 @@ class APIStore(CoreSysAttributes):
addon_slug: str = request.match_info["addon"]
if not (addon := self.sys_addons.get(addon_slug)):
raise StoreAddonNotFoundError(addon=addon_slug)
raise APINotFound(f"Addon {addon_slug} does not exist")
if installed and not addon.is_installed:
raise APIError(f"Addon {addon_slug} is not installed")
@@ -112,7 +112,7 @@ class APIStore(CoreSysAttributes):
if not installed and addon.is_installed:
addon = cast(Addon, addon)
if not addon.addon_store:
raise StoreAddonNotFoundError(addon=addon_slug)
raise APINotFound(f"Addon {addon_slug} does not exist in the store")
return addon.addon_store
return addon
@@ -349,13 +349,13 @@ class APIStore(CoreSysAttributes):
return self._generate_repository_information(repository)
@api_process
async def add_repository(self, request: web.Request) -> None:
async def add_repository(self, request: web.Request):
"""Add repository to the store."""
body = await api_validate(SCHEMA_ADD_REPOSITORY, request)
await asyncio.shield(self.sys_store.add_repository(body[ATTR_REPOSITORY]))
@api_process
async def remove_repository(self, request: web.Request) -> None:
async def remove_repository(self, request: web.Request):
"""Remove repository from the store."""
repository: Repository = self._extract_repository(request)
await asyncio.shield(self.sys_store.remove_repository(repository))

View File

@@ -16,12 +16,14 @@ from ..const import (
ATTR_BLK_READ,
ATTR_BLK_WRITE,
ATTR_CHANNEL,
ATTR_CONTENT_TRUST,
ATTR_COUNTRY,
ATTR_CPU_PERCENT,
ATTR_DEBUG,
ATTR_DEBUG_BLOCK,
ATTR_DETECT_BLOCKING_IO,
ATTR_DIAGNOSTICS,
ATTR_FORCE_SECURITY,
ATTR_HEALTHY,
ATTR_ICON,
ATTR_IP_ADDRESS,
@@ -67,6 +69,8 @@ SCHEMA_OPTIONS = vol.Schema(
vol.Optional(ATTR_DEBUG): vol.Boolean(),
vol.Optional(ATTR_DEBUG_BLOCK): vol.Boolean(),
vol.Optional(ATTR_DIAGNOSTICS): vol.Boolean(),
vol.Optional(ATTR_CONTENT_TRUST): vol.Boolean(),
vol.Optional(ATTR_FORCE_SECURITY): vol.Boolean(),
vol.Optional(ATTR_AUTO_UPDATE): vol.Boolean(),
vol.Optional(ATTR_DETECT_BLOCKING_IO): vol.Coerce(DetectBlockingIO),
vol.Optional(ATTR_COUNTRY): str,
@@ -80,7 +84,7 @@ class APISupervisor(CoreSysAttributes):
"""Handle RESTful API for Supervisor functions."""
@api_process
async def ping(self, request: web.Request) -> bool:
async def ping(self, request):
"""Return ok for signal that the API is ready."""
return True
@@ -104,8 +108,7 @@ class APISupervisor(CoreSysAttributes):
ATTR_AUTO_UPDATE: self.sys_updater.auto_update,
ATTR_DETECT_BLOCKING_IO: BlockBusterManager.is_enabled(),
ATTR_COUNTRY: self.sys_config.country,
# Depricated
ATTR_WAIT_BOOT: self.sys_config.wait_boot,
# Deprecated
ATTR_ADDONS: [
{
ATTR_NAME: addon.name,
@@ -119,10 +122,6 @@ class APISupervisor(CoreSysAttributes):
}
for addon in self.sys_addons.local.values()
],
ATTR_ADDONS_REPOSITORIES: [
{ATTR_NAME: store.name, ATTR_SLUG: store.slug}
for store in self.sys_store.all
],
}
@api_process
@@ -178,20 +177,10 @@ class APISupervisor(CoreSysAttributes):
self.sys_config.detect_blocking_io = False
BlockBusterManager.deactivate()
# Deprecated
if ATTR_WAIT_BOOT in body:
self.sys_config.wait_boot = body[ATTR_WAIT_BOOT]
# Save changes before processing addons in case of errors
await self.sys_updater.save_data()
await self.sys_config.save_data()
# Remove: 2022.9
if ATTR_ADDONS_REPOSITORIES in body:
await asyncio.shield(
self.sys_store.update_repositories(set(body[ATTR_ADDONS_REPOSITORIES]))
)
await self.sys_resolution.evaluate.evaluate_system()
@api_process

View File

@@ -1,7 +1,7 @@
"""Init file for Supervisor util for RESTful API."""
import asyncio
from collections.abc import Callable, Mapping
from collections.abc import Callable
import json
from typing import Any, cast
@@ -26,7 +26,7 @@ from ..const import (
RESULT_OK,
)
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import APIError, DockerAPIError, HassioError
from ..exceptions import APIError, BackupFileNotFoundError, DockerAPIError, HassioError
from ..jobs import JobSchedulerOptions, SupervisorJob
from ..utils import check_exception_chain, get_message_from_exception_chain
from ..utils.json import json_dumps, json_loads as json_loads_util
@@ -63,14 +63,16 @@ def json_loads(data: Any) -> dict[str, Any]:
def api_process(method):
"""Wrap function with true/false calls to rest api."""
async def wrap_api(*args, **kwargs) -> web.Response | web.StreamResponse:
async def wrap_api(
api: CoreSysAttributes, *args, **kwargs
) -> web.Response | web.StreamResponse:
"""Return API information."""
try:
answer = await method(*args, **kwargs)
answer = await method(api, *args, **kwargs)
except BackupFileNotFoundError as err:
return api_return_error(err, status=404)
except APIError as err:
return api_return_error(
err, status=err.status, job_id=err.job_id, headers=err.headers
)
return api_return_error(err, status=err.status, job_id=err.job_id)
except HassioError as err:
return api_return_error(err)
@@ -107,10 +109,12 @@ def api_process_raw(content, *, error_type=None):
def wrap_method(method):
"""Wrap function with raw output to rest api."""
async def wrap_api(*args, **kwargs) -> web.Response | web.StreamResponse:
async def wrap_api(
api: CoreSysAttributes, *args, **kwargs
) -> web.Response | web.StreamResponse:
"""Return api information."""
try:
msg_data = await method(*args, **kwargs)
msg_data = await method(api, *args, **kwargs)
except APIError as err:
return api_return_error(
err,
@@ -139,7 +143,6 @@ def api_return_error(
error_type: str | None = None,
status: int = 400,
*,
headers: Mapping[str, str] | None = None,
job_id: str | None = None,
) -> web.Response:
"""Return an API error message."""
@@ -148,19 +151,14 @@ def api_return_error(
if check_exception_chain(error, DockerAPIError):
message = format_message(message)
if not message:
message = "Unknown error, see Supervisor logs (check with 'ha supervisor logs')"
message = "Unknown error, see supervisor"
match error_type:
case const.CONTENT_TYPE_TEXT:
return web.Response(
body=message, content_type=error_type, status=status, headers=headers
)
return web.Response(body=message, content_type=error_type, status=status)
case const.CONTENT_TYPE_BINARY:
return web.Response(
body=message.encode(),
content_type=error_type,
status=status,
headers=headers,
body=message.encode(), content_type=error_type, status=status
)
case _:
result: dict[str, Any] = {
@@ -178,7 +176,6 @@ def api_return_error(
result,
status=status,
dumps=json_dumps,
headers=headers,
)

View File

@@ -4,7 +4,6 @@ import logging
from pathlib import Path
import platform
from .const import CpuArch
from .coresys import CoreSys, CoreSysAttributes
from .exceptions import ConfigurationFileError, HassioArchNotFound
from .utils.json import read_json_file
@@ -13,40 +12,38 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
ARCH_JSON: Path = Path(__file__).parent.joinpath("data/arch.json")
MAP_CPU: dict[str, CpuArch] = {
"armv7": CpuArch.ARMV7,
"armv6": CpuArch.ARMHF,
"armv8": CpuArch.AARCH64,
"aarch64": CpuArch.AARCH64,
"i686": CpuArch.I386,
"x86_64": CpuArch.AMD64,
MAP_CPU = {
"armv7": "armv7",
"armv6": "armhf",
"armv8": "aarch64",
"aarch64": "aarch64",
"i686": "i386",
"x86_64": "amd64",
}
class CpuArchManager(CoreSysAttributes):
class CpuArch(CoreSysAttributes):
"""Manage available architectures."""
def __init__(self, coresys: CoreSys) -> None:
"""Initialize CPU Architecture handler."""
self.coresys = coresys
self._supported_arch: list[CpuArch] = []
self._supported_set: set[CpuArch] = set()
self._default_arch: CpuArch
self._supported_arch: list[str] = []
self._supported_set: set[str] = set()
self._default_arch: str
@property
def default(self) -> CpuArch:
def default(self) -> str:
"""Return system default arch."""
return self._default_arch
@property
def supervisor(self) -> CpuArch:
def supervisor(self) -> str:
"""Return supervisor arch."""
if self.sys_supervisor.arch:
return CpuArch(self.sys_supervisor.arch)
return self._default_arch
return self.sys_supervisor.arch or self._default_arch
@property
def supported(self) -> list[CpuArch]:
def supported(self) -> list[str]:
"""Return support arch by CPU/Machine."""
return self._supported_arch
@@ -68,7 +65,7 @@ class CpuArchManager(CoreSysAttributes):
return
# Use configs from arch.json
self._supported_arch.extend(CpuArch(a) for a in arch_data[self.sys_machine])
self._supported_arch.extend(arch_data[self.sys_machine])
self._default_arch = self.supported[0]
# Make sure native support is in supported list
@@ -81,14 +78,14 @@ class CpuArchManager(CoreSysAttributes):
"""Return True if there is a supported arch by this platform."""
return not self._supported_set.isdisjoint(arch_list)
def match(self, arch_list: list[str]) -> CpuArch:
def match(self, arch_list: list[str]) -> str:
"""Return best match for this CPU/Platform."""
for self_arch in self.supported:
if self_arch in arch_list:
return self_arch
raise HassioArchNotFound()
def detect_cpu(self) -> CpuArch:
def detect_cpu(self) -> str:
"""Return the arch type of local CPU."""
cpu = platform.machine()
for check, value in MAP_CPU.items():
@@ -99,10 +96,9 @@ class CpuArchManager(CoreSysAttributes):
"Unknown CPU architecture %s, falling back to Supervisor architecture.",
cpu,
)
return CpuArch(self.sys_supervisor.arch)
return self.sys_supervisor.arch
_LOGGER.warning(
"Unknown CPU architecture %s, assuming CPU architecture equals Supervisor architecture.",
cpu,
)
# Return the cpu string as-is, wrapped in CpuArch (may fail if invalid)
return CpuArch(cpu)
return cpu

View File

@@ -9,10 +9,8 @@ from .addons.addon import Addon
from .const import ATTR_PASSWORD, ATTR_TYPE, ATTR_USERNAME, FILE_HASSIO_AUTH
from .coresys import CoreSys, CoreSysAttributes
from .exceptions import (
AuthHomeAssistantAPIValidationError,
AuthInvalidNonStringValueError,
AuthError,
AuthListUsersError,
AuthListUsersNoneResponseError,
AuthPasswordResetError,
HomeAssistantAPIError,
HomeAssistantWSError,
@@ -85,8 +83,10 @@ class Auth(FileConfiguration, CoreSysAttributes):
self, addon: Addon, username: str | None, password: str | None
) -> bool:
"""Check username login."""
if username is None or password is None:
raise AuthInvalidNonStringValueError(_LOGGER.error)
if password is None:
raise AuthError("None as password is not supported!", _LOGGER.error)
if username is None:
raise AuthError("None as username is not supported!", _LOGGER.error)
_LOGGER.info("Auth request from '%s' for '%s'", addon.slug, username)
@@ -137,7 +137,7 @@ class Auth(FileConfiguration, CoreSysAttributes):
finally:
self._running.pop(username, None)
raise AuthHomeAssistantAPIValidationError()
raise AuthError()
async def change_password(self, username: str, password: str) -> None:
"""Change user password login."""
@@ -155,7 +155,7 @@ class Auth(FileConfiguration, CoreSysAttributes):
except HomeAssistantAPIError as err:
_LOGGER.error("Can't request password reset on Home Assistant: %s", err)
raise AuthPasswordResetError(user=username)
raise AuthPasswordResetError()
async def list_users(self) -> list[dict[str, Any]]:
"""List users on the Home Assistant instance."""
@@ -166,12 +166,15 @@ class Auth(FileConfiguration, CoreSysAttributes):
{ATTR_TYPE: "config/auth/list"}
)
except HomeAssistantWSError as err:
_LOGGER.error("Can't request listing users on Home Assistant: %s", err)
raise AuthListUsersError() from err
raise AuthListUsersError(
f"Can't request listing users on Home Assistant: {err}", _LOGGER.error
) from err
if users is not None:
return users
raise AuthListUsersNoneResponseError(_LOGGER.error)
raise AuthListUsersError(
"Can't request listing users on Home Assistant!", _LOGGER.error
)
@staticmethod
def _rehash(value: str, salt2: str = "") -> str:

View File

@@ -60,6 +60,7 @@ from ..utils.dt import parse_datetime, utcnow
from ..utils.json import json_bytes
from ..utils.sentinel import DEFAULT
from .const import BUF_SIZE, LOCATION_CLOUD_BACKUP, BackupType
from .utils import password_to_key
from .validate import SCHEMA_BACKUP
IGNORED_COMPARISON_FIELDS = {ATTR_PROTECTED, ATTR_CRYPTO, ATTR_DOCKER}
@@ -100,7 +101,7 @@ class Backup(JobGroup):
self._data: dict[str, Any] = data or {ATTR_SLUG: slug}
self._tmp: TemporaryDirectory | None = None
self._outer_secure_tarfile: SecureTarFile | None = None
self._password: str | None = None
self._key: bytes | None = None
self._locations: dict[str | None, BackupLocation] = {
location: BackupLocation(
path=tar_file,
@@ -326,7 +327,7 @@ class Backup(JobGroup):
# Set password
if password:
self._password = password
self._init_password(password)
self._data[ATTR_PROTECTED] = True
self._data[ATTR_CRYPTO] = CRYPTO_AES128
self._locations[self.location].protected = True
@@ -336,7 +337,14 @@ class Backup(JobGroup):
def set_password(self, password: str | None) -> None:
"""Set the password for an existing backup."""
self._password = password
if password:
self._init_password(password)
else:
self._key = None
def _init_password(self, password: str) -> None:
"""Create key from password."""
self._key = password_to_key(password)
async def validate_backup(self, location: str | None) -> None:
"""Validate backup.
@@ -366,9 +374,9 @@ class Backup(JobGroup):
with SecureTarFile(
ending, # Not used
gzip=self.compressed,
key=self._key,
mode="r",
fileobj=test_tar_file,
password=self._password,
):
# If we can read the tar file, the password is correct
return
@@ -584,7 +592,7 @@ class Backup(JobGroup):
addon_file = self._outer_secure_tarfile.create_inner_tar(
f"./{tar_name}",
gzip=self.compressed,
password=self._password,
key=self._key,
)
# Take backup
try:
@@ -620,6 +628,9 @@ class Backup(JobGroup):
if start_task := await self._addon_save(addon):
start_tasks.append(start_task)
except BackupError as err:
err = BackupError(
f"Can't backup add-on {addon.slug}: {str(err)}", _LOGGER.error
)
self.sys_jobs.current.capture_error(err)
return start_tasks
@@ -635,9 +646,9 @@ class Backup(JobGroup):
addon_file = SecureTarFile(
Path(self._tmp.name, tar_name),
"r",
key=self._key,
gzip=self.compressed,
bufsize=BUF_SIZE,
password=self._password,
)
# If exists inside backup
@@ -733,7 +744,7 @@ class Backup(JobGroup):
with outer_secure_tarfile.create_inner_tar(
f"./{tar_name}",
gzip=self.compressed,
password=self._password,
key=self._key,
) as tar_file:
atomic_contents_add(
tar_file,
@@ -794,9 +805,9 @@ class Backup(JobGroup):
with SecureTarFile(
tar_name,
"r",
key=self._key,
gzip=self.compressed,
bufsize=BUF_SIZE,
password=self._password,
) as tar_file:
tar_file.extractall(
path=origin_dir, members=tar_file, filter="fully_trusted"
@@ -857,7 +868,7 @@ class Backup(JobGroup):
homeassistant_file = self._outer_secure_tarfile.create_inner_tar(
f"./{tar_name}",
gzip=self.compressed,
password=self._password,
key=self._key,
)
await self.sys_homeassistant.backup(homeassistant_file, exclude_database)
@@ -880,11 +891,7 @@ class Backup(JobGroup):
self._tmp.name, f"homeassistant.tar{'.gz' if self.compressed else ''}"
)
homeassistant_file = SecureTarFile(
tar_name,
"r",
gzip=self.compressed,
bufsize=BUF_SIZE,
password=self._password,
tar_name, "r", key=self._key, gzip=self.compressed, bufsize=BUF_SIZE
)
await self.sys_homeassistant.restore(

View File

@@ -6,6 +6,21 @@ import re
RE_DIGITS = re.compile(r"\d+")
def password_to_key(password: str) -> bytes:
"""Generate a AES Key from password."""
key: bytes = password.encode()
for _ in range(100):
key = hashlib.sha256(key).digest()
return key[:16]
def key_to_iv(key: bytes) -> bytes:
"""Generate an iv from Key."""
for _ in range(100):
key = hashlib.sha256(key).digest()
return key[:16]
def create_slug(name: str, date_str: str) -> str:
"""Generate a hash from repository."""
key = f"{date_str} - {name}".lower().encode()

View File

@@ -13,7 +13,7 @@ from colorlog import ColoredFormatter
from .addons.manager import AddonManager
from .api import RestAPI
from .arch import CpuArchManager
from .arch import CpuArch
from .auth import Auth
from .backups.manager import BackupManager
from .bus import Bus
@@ -71,7 +71,7 @@ async def initialize_coresys() -> CoreSys:
coresys.jobs = await JobManager(coresys).load_config()
coresys.core = await Core(coresys).post_init()
coresys.plugins = await PluginManager(coresys).load_config()
coresys.arch = CpuArchManager(coresys)
coresys.arch = CpuArch(coresys)
coresys.auth = await Auth(coresys).load_config()
coresys.updater = await Updater(coresys).load_config()
coresys.api = RestAPI(coresys)
@@ -105,6 +105,7 @@ async def initialize_coresys() -> CoreSys:
if coresys.dev:
coresys.updater.channel = UpdateChannel.DEV
coresys.security.content_trust = False
# Convert datetime
logging.Formatter.converter = lambda *args: coresys.now().timetuple()

View File

@@ -2,7 +2,6 @@
from __future__ import annotations
from asyncio import Task
from collections.abc import Callable, Coroutine
import logging
from typing import Any
@@ -39,13 +38,11 @@ class Bus(CoreSysAttributes):
self._listeners.setdefault(event, []).append(listener)
return listener
def fire_event(self, event: BusEvent, reference: Any) -> list[Task]:
def fire_event(self, event: BusEvent, reference: Any) -> None:
"""Fire an event to the bus."""
_LOGGER.debug("Fire event '%s' with '%s'", event, reference)
tasks: list[Task] = []
for listener in self._listeners.get(event, []):
tasks.append(self.sys_create_task(listener.callback(reference)))
return tasks
self.sys_create_task(listener.callback(reference))
def remove_listener(self, listener: EventListener) -> None:
"""Unregister an listener."""

View File

@@ -328,7 +328,6 @@ ATTR_STATE = "state"
ATTR_STATIC = "static"
ATTR_STDIN = "stdin"
ATTR_STORAGE = "storage"
ATTR_STORAGE_DRIVER = "storage_driver"
ATTR_SUGGESTIONS = "suggestions"
ATTR_SUPERVISOR = "supervisor"
ATTR_SUPERVISOR_INTERNET = "supervisor_internet"

View File

@@ -9,7 +9,6 @@ from datetime import UTC, datetime, tzinfo
from functools import partial
import logging
import os
import time
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, Self, TypeVar
@@ -29,7 +28,7 @@ from .const import (
if TYPE_CHECKING:
from .addons.manager import AddonManager
from .api import RestAPI
from .arch import CpuArchManager
from .arch import CpuArch
from .auth import Auth
from .backups.manager import BackupManager
from .bus import Bus
@@ -78,7 +77,7 @@ class CoreSys:
# Internal objects pointers
self._docker: DockerAPI | None = None
self._core: Core | None = None
self._arch: CpuArchManager | None = None
self._arch: CpuArch | None = None
self._auth: Auth | None = None
self._homeassistant: HomeAssistant | None = None
self._supervisor: Supervisor | None = None
@@ -266,17 +265,17 @@ class CoreSys:
self._plugins = value
@property
def arch(self) -> CpuArchManager:
"""Return CpuArchManager object."""
def arch(self) -> CpuArch:
"""Return CpuArch object."""
if self._arch is None:
raise RuntimeError("CpuArchManager not set!")
raise RuntimeError("CpuArch not set!")
return self._arch
@arch.setter
def arch(self, value: CpuArchManager) -> None:
"""Set a CpuArchManager object."""
def arch(self, value: CpuArch) -> None:
"""Set a CpuArch object."""
if self._arch:
raise RuntimeError("CpuArchManager already set!")
raise RuntimeError("CpuArch already set!")
self._arch = value
@property
@@ -656,14 +655,8 @@ class CoreSys:
if kwargs:
funct = partial(funct, **kwargs)
# Convert datetime to event loop time base
# If datetime is in the past, delay will be negative and call_at will
# schedule the call as soon as possible.
delay = when.timestamp() - time.time()
loop_time = self.loop.time() + delay
return self.loop.call_at(
loop_time, funct, *args, context=self._create_context()
when.timestamp(), funct, *args, context=self._create_context()
)
@@ -733,8 +726,8 @@ class CoreSysAttributes:
return self.coresys.plugins
@property
def sys_arch(self) -> CpuArchManager:
"""Return CpuArchManager object."""
def sys_arch(self) -> CpuArch:
"""Return CpuArch object."""
return self.coresys.arch
@property

View File

@@ -15,8 +15,3 @@ class System(DBusInterface):
async def schedule_wipe_device(self) -> bool:
"""Schedule a factory reset on next system boot."""
return await self.connected_dbus.System.call("schedule_wipe_device")
@dbus_connected
async def migrate_docker_storage_driver(self, backend: str) -> None:
"""Migrate Docker storage driver."""
await self.connected_dbus.System.call("migrate_docker_storage_driver", backend)

View File

@@ -250,7 +250,7 @@ class ConnectionType(StrEnum):
WIRELESS = "802-11-wireless"
class ConnectionState(IntEnum):
class ConnectionStateType(IntEnum):
"""Connection states.
https://networkmanager.dev/docs/api/latest/nm-dbus-types.html#NMActiveConnectionState
@@ -306,8 +306,6 @@ class DeviceType(IntEnum):
VLAN = 11
TUN = 16
VETH = 20
WIREGUARD = 29
LOOPBACK = 32
class WirelessMethodType(IntEnum):

View File

@@ -115,7 +115,7 @@ class DBusManager(CoreSysAttributes):
async def load(self) -> None:
"""Connect interfaces to D-Bus."""
if not await self.sys_run_in_executor(SOCKET_DBUS.exists):
if not SOCKET_DBUS.exists():
_LOGGER.error(
"No D-Bus support on Host. Disabled any kind of host control!"
)

View File

@@ -134,10 +134,9 @@ class NetworkManager(DBusInterfaceProxy):
async def check_connectivity(self, *, force: bool = False) -> ConnectivityState:
"""Check the connectivity of the host."""
if force:
return ConnectivityState(
await self.connected_dbus.call("check_connectivity")
)
return ConnectivityState(await self.connected_dbus.get("connectivity"))
return await self.connected_dbus.call("check_connectivity")
else:
return await self.connected_dbus.get("connectivity")
async def connect(self, bus: MessageBus) -> None:
"""Connect to system's D-Bus."""

View File

@@ -90,8 +90,8 @@ class Ip4Properties(IpProperties):
class Ip6Properties(IpProperties):
"""IPv6 properties object for Network Manager."""
addr_gen_mode: int | None
ip6_privacy: int | None
addr_gen_mode: int
ip6_privacy: int
dns: list[bytes] | None

View File

@@ -16,8 +16,8 @@ from ..const import (
DBUS_IFACE_CONNECTION_ACTIVE,
DBUS_NAME_NM,
DBUS_OBJECT_BASE,
ConnectionState,
ConnectionStateFlags,
ConnectionStateType,
)
from ..interface import DBusInterfaceProxy, dbus_property
from ..utils import dbus_connected
@@ -67,9 +67,9 @@ class NetworkConnection(DBusInterfaceProxy):
@property
@dbus_property
def state(self) -> ConnectionState:
def state(self) -> ConnectionStateType:
"""Return the state of the connection."""
return ConnectionState(self.properties[DBUS_ATTR_STATE])
return self.properties[DBUS_ATTR_STATE]
@property
def state_flags(self) -> set[ConnectionStateFlags]:

View File

@@ -1,6 +1,5 @@
"""NetworkInterface object for Network Manager."""
import logging
from typing import Any
from dbus_fast.aio.message_bus import MessageBus
@@ -24,8 +23,6 @@ from .connection import NetworkConnection
from .setting import NetworkSetting
from .wireless import NetworkWireless
_LOGGER: logging.Logger = logging.getLogger(__name__)
class NetworkInterface(DBusInterfaceProxy):
"""NetworkInterface object represents Network Manager Device objects.
@@ -60,15 +57,7 @@ class NetworkInterface(DBusInterfaceProxy):
@dbus_property
def type(self) -> DeviceType:
"""Return interface type."""
try:
return DeviceType(self.properties[DBUS_ATTR_DEVICE_TYPE])
except ValueError:
_LOGGER.debug(
"Unknown device type %s for %s, treating as UNKNOWN",
self.properties[DBUS_ATTR_DEVICE_TYPE],
self.object_path,
)
return DeviceType.UNKNOWN
return self.properties[DBUS_ATTR_DEVICE_TYPE]
@property
@dbus_property

View File

@@ -16,11 +16,7 @@ from ....host.const import (
InterfaceType,
MulticastDnsMode,
)
from ...const import (
InterfaceAddrGenMode as NMInterfaceAddrGenMode,
InterfaceIp6Privacy as NMInterfaceIp6Privacy,
MulticastDnsValue,
)
from ...const import MulticastDnsValue
from .. import NetworkManager
from . import (
CONF_ATTR_802_ETHERNET,
@@ -122,41 +118,24 @@ def _get_ipv6_connection_settings(
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "auto")
if ipv6setting:
if ipv6setting.addr_gen_mode == InterfaceAddrGenMode.EUI64:
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
"i", NMInterfaceAddrGenMode.EUI64.value
)
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 0)
elif (
not support_addr_gen_mode_defaults
or ipv6setting.addr_gen_mode == InterfaceAddrGenMode.STABLE_PRIVACY
):
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
"i", NMInterfaceAddrGenMode.STABLE_PRIVACY.value
)
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 1)
elif ipv6setting.addr_gen_mode == InterfaceAddrGenMode.DEFAULT_OR_EUI64:
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
"i", NMInterfaceAddrGenMode.DEFAULT_OR_EUI64.value
)
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 2)
else:
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
"i", NMInterfaceAddrGenMode.DEFAULT.value
)
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 3)
if ipv6setting.ip6_privacy == InterfaceIp6Privacy.DISABLED:
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
"i", NMInterfaceIp6Privacy.DISABLED.value
)
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 0)
elif ipv6setting.ip6_privacy == InterfaceIp6Privacy.ENABLED_PREFER_PUBLIC:
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
"i", NMInterfaceIp6Privacy.ENABLED_PREFER_PUBLIC.value
)
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 1)
elif ipv6setting.ip6_privacy == InterfaceIp6Privacy.ENABLED:
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
"i", NMInterfaceIp6Privacy.ENABLED.value
)
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 2)
else:
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
"i", NMInterfaceIp6Privacy.DEFAULT.value
)
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", -1)
elif ipv6setting.method == InterfaceMethod.DISABLED:
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "link-local")
elif ipv6setting.method == InterfaceMethod.STATIC:

View File

@@ -75,7 +75,7 @@ class Resolved(DBusInterfaceProxy):
@dbus_property
def current_dns_server(
self,
) -> tuple[int, DNSAddressFamily, bytes] | None:
) -> list[tuple[int, DNSAddressFamily, bytes]] | None:
"""Return current DNS server."""
return self.properties[DBUS_ATTR_CURRENT_DNS_SERVER]
@@ -83,7 +83,7 @@ class Resolved(DBusInterfaceProxy):
@dbus_property
def current_dns_server_ex(
self,
) -> tuple[int, DNSAddressFamily, bytes, int, str] | None:
) -> list[tuple[int, DNSAddressFamily, bytes, int, str]] | None:
"""Return current DNS server including port and server name."""
return self.properties[DBUS_ATTR_CURRENT_DNS_SERVER_EX]

View File

@@ -70,7 +70,7 @@ class SystemdUnit(DBusInterface):
@dbus_connected
async def get_active_state(self) -> UnitActiveState:
"""Get active state of the unit."""
return UnitActiveState(await self.connected_dbus.Unit.get("active_state"))
return await self.connected_dbus.Unit.get("active_state")
@dbus_connected
def properties_changed(self) -> DBusSignalWrapper:

View File

@@ -9,7 +9,7 @@ from dbus_fast import Variant
from .const import EncryptType, EraseMode
def udisks2_bytes_to_path(path_bytes: bytes) -> Path:
def udisks2_bytes_to_path(path_bytes: bytearray) -> Path:
"""Convert bytes to path object without null character on end."""
if path_bytes and path_bytes[-1] == 0:
return Path(path_bytes[:-1].decode())
@@ -73,7 +73,7 @@ FormatOptionsDataType = TypedDict(
{
"label": NotRequired[str],
"take-ownership": NotRequired[bool],
"encrypt.passphrase": NotRequired[bytes],
"encrypt.passphrase": NotRequired[bytearray],
"encrypt.type": NotRequired[str],
"erase": NotRequired[str],
"update-partition-type": NotRequired[bool],

View File

@@ -2,17 +2,13 @@
from __future__ import annotations
from collections.abc import Awaitable
from contextlib import suppress
from ipaddress import IPv4Address
import logging
import os
from pathlib import Path
from socket import SocketIO
import tempfile
from typing import TYPE_CHECKING, cast
import aiodocker
from attr import evolve
from awesomeversion import AwesomeVersion
import docker
@@ -36,7 +32,6 @@ from ..coresys import CoreSys
from ..exceptions import (
CoreDNSError,
DBusError,
DockerBuildError,
DockerError,
DockerJobError,
DockerNotFound,
@@ -684,12 +679,13 @@ class DockerAddon(DockerInterface):
async def _build(self, version: AwesomeVersion, image: str | None = None) -> None:
"""Build a Docker container."""
build_env = await AddonBuild(self.coresys, self.addon).load_config()
# Check if the build environment is valid, raises if not
await build_env.is_valid()
if not await build_env.is_valid():
_LOGGER.error("Invalid build environment, can't build this add-on!")
raise DockerError()
_LOGGER.info("Starting build for %s:%s", self.image, version)
def build_image() -> tuple[str, str]:
def build_image():
if build_env.squash:
_LOGGER.warning(
"Ignoring squash build option for %s as Docker BuildKit does not support it.",
@@ -708,38 +704,12 @@ class DockerAddon(DockerInterface):
with suppress(docker.errors.NotFound):
self.sys_docker.containers.get(builder_name).remove(force=True, v=True)
# Generate Docker config with registry credentials for base image if needed
docker_config_path: Path | None = None
docker_config_content = build_env.get_docker_config_json()
temp_dir: tempfile.TemporaryDirectory | None = None
try:
if docker_config_content:
# Create temporary directory for docker config
temp_dir = tempfile.TemporaryDirectory(
prefix="hassio_build_", dir=self.sys_config.path_tmp
)
docker_config_path = Path(temp_dir.name) / "config.json"
docker_config_path.write_text(
docker_config_content, encoding="utf-8"
)
_LOGGER.debug(
"Created temporary Docker config for build at %s",
docker_config_path,
)
result = self.sys_docker.run_command(
ADDON_BUILDER_IMAGE,
version=builder_version_tag,
name=builder_name,
**build_env.get_docker_args(
version, addon_image_tag, docker_config_path
),
)
finally:
# Clean up temporary directory
if temp_dir:
temp_dir.cleanup()
result = self.sys_docker.run_command(
ADDON_BUILDER_IMAGE,
version=builder_version_tag,
name=builder_name,
**build_env.get_docker_args(version, addon_image_tag),
)
logs = result.output.decode("utf-8")
@@ -747,24 +717,21 @@ class DockerAddon(DockerInterface):
error_message = f"Docker build failed for {addon_image_tag} (exit code {result.exit_code}). Build output:\n{logs}"
raise docker.errors.DockerException(error_message)
return addon_image_tag, logs
addon_image = self.sys_docker.images.get(addon_image_tag)
return addon_image, logs
try:
addon_image_tag, log = await self.sys_run_in_executor(build_image)
docker_image, log = await self.sys_run_in_executor(build_image)
_LOGGER.debug("Build %s:%s done: %s", self.image, version, log)
# Update meta data
self._meta = await self.sys_docker.images.inspect(addon_image_tag)
self._meta = docker_image.attrs
except (
docker.errors.DockerException,
requests.RequestException,
aiodocker.DockerError,
) as err:
raise DockerBuildError(
f"Can't build {self.image}:{version}: {err!s}", _LOGGER.error
) from err
except (docker.errors.DockerException, requests.RequestException) as err:
_LOGGER.error("Can't build %s:%s: %s", self.image, version, err)
raise DockerError() from err
_LOGGER.info("Build %s:%s done", self.image, version)
@@ -784,8 +751,11 @@ class DockerAddon(DockerInterface):
)
async def import_image(self, tar_file: Path) -> None:
"""Import a tar file as image."""
if docker_image := await self.sys_docker.import_image(tar_file):
self._meta = docker_image
docker_image = await self.sys_run_in_executor(
self.sys_docker.import_image, tar_file
)
if docker_image:
self._meta = docker_image.attrs
_LOGGER.info("Importing image %s and version %s", tar_file, self.version)
with suppress(DockerError):
@@ -799,21 +769,17 @@ class DockerAddon(DockerInterface):
version: AwesomeVersion | None = None,
) -> None:
"""Check if old version exists and cleanup other versions of image not in use."""
if not (use_image := image or self.image):
raise DockerError("Cannot determine image from metadata!", _LOGGER.error)
if not (use_version := version or self.version):
raise DockerError("Cannot determine version from metadata!", _LOGGER.error)
await self.sys_docker.cleanup_old_images(
use_image,
use_version,
await self.sys_run_in_executor(
self.sys_docker.cleanup_old_images,
(image := image or self.image),
version or self.version,
{old_image} if old_image else None,
keep_images={
f"{addon.image}:{addon.version}"
for addon in self.sys_addons.installed
if addon.slug != self.addon.slug
and addon.image
and addon.image in {old_image, use_image}
and addon.image in {old_image, image}
},
)
@@ -822,9 +788,12 @@ class DockerAddon(DockerInterface):
on_condition=DockerJobError,
concurrency=JobConcurrency.GROUP_REJECT,
)
def write_stdin(self, data: bytes) -> Awaitable[None]:
async def write_stdin(self, data: bytes) -> None:
"""Write to add-on stdin."""
return self.sys_run_in_executor(self._write_stdin, data)
if not await self.is_running():
raise DockerError()
await self.sys_run_in_executor(self._write_stdin, data)
def _write_stdin(self, data: bytes) -> None:
"""Write to add-on stdin.
@@ -834,10 +803,7 @@ class DockerAddon(DockerInterface):
try:
# Load needed docker objects
container = self.sys_docker.containers.get(self.name)
# attach_socket returns SocketIO for local Docker connections (Unix socket)
socket = cast(
SocketIO, container.attach_socket(params={"stdin": 1, "stream": 1})
)
socket = container.attach_socket(params={"stdin": 1, "stream": 1})
except (docker.errors.DockerException, requests.RequestException) as err:
_LOGGER.error("Can't attach to %s stdin: %s", self.name, err)
raise DockerError() from err
@@ -880,6 +846,16 @@ class DockerAddon(DockerInterface):
):
self.sys_resolution.dismiss_issue(self.addon.device_access_missing_issue)
async def _validate_trust(self, image_id: str) -> None:
"""Validate trust of content."""
if not self.addon.signed:
return
checksum = image_id.partition(":")[2]
return await self.sys_security.verify_content(
cast(str, self.addon.codenotary), checksum
)
@Job(
name="docker_addon_hardware_events",
conditions=[JobCondition.OS_AGENT],

View File

@@ -2,9 +2,12 @@
from __future__ import annotations
from enum import StrEnum
from contextlib import suppress
from enum import Enum, StrEnum
from functools import total_ordering
from pathlib import PurePath
import re
from typing import cast
from docker.types import Mount
@@ -12,13 +15,6 @@ from ..const import MACHINE_ID
RE_RETRYING_DOWNLOAD_STATUS = re.compile(r"Retrying in \d+ seconds?")
# Docker Hub registry identifier (official default)
# Docker's default registry is docker.io
DOCKER_HUB = "docker.io"
# Legacy Docker Hub identifier for backward compatibility
DOCKER_HUB_LEGACY = "hub.docker.com"
class Capabilities(StrEnum):
"""Linux Capabilities."""
@@ -79,6 +75,57 @@ class PropagationMode(StrEnum):
RSLAVE = "rslave"
@total_ordering
class PullImageLayerStage(Enum):
"""Job stages for pulling an image layer.
These are a subset of the statuses in a docker pull image log. They
are the standardized ones that are the most useful to us.
"""
PULLING_FS_LAYER = 1, "Pulling fs layer"
RETRYING_DOWNLOAD = 2, "Retrying download"
DOWNLOADING = 2, "Downloading"
VERIFYING_CHECKSUM = 3, "Verifying Checksum"
DOWNLOAD_COMPLETE = 4, "Download complete"
EXTRACTING = 5, "Extracting"
PULL_COMPLETE = 6, "Pull complete"
def __init__(self, order: int, status: str) -> None:
"""Set fields from values."""
self.order = order
self.status = status
def __eq__(self, value: object, /) -> bool:
"""Check equality, allow StrEnum style comparisons on status."""
with suppress(AttributeError):
return self.status == cast(PullImageLayerStage, value).status
return self.status == value
def __lt__(self, other: object) -> bool:
"""Order instances."""
with suppress(AttributeError):
return self.order < cast(PullImageLayerStage, other).order
return False
def __hash__(self) -> int:
"""Hash instance."""
return hash(self.status)
@classmethod
def from_status(cls, status: str) -> PullImageLayerStage | None:
"""Return stage instance from pull log status."""
for i in cls:
if i.status == status:
return i
# This one includes number of seconds until download so its not constant
if RE_RETRYING_DOWNLOAD_STATUS.match(status):
return cls.RETRYING_DOWNLOAD
return None
ENV_TIME = "TZ"
ENV_TOKEN = "SUPERVISOR_TOKEN"
ENV_TOKEN_OLD = "HASSIO_TOKEN"

View File

@@ -1,10 +1,11 @@
"""Init file for Supervisor Docker object."""
from collections.abc import Awaitable
from ipaddress import IPv4Address
import logging
import re
from awesomeversion import AwesomeVersion
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
from docker.types import Mount
from ..const import LABEL_MACHINE
@@ -235,10 +236,21 @@ class DockerHomeAssistant(DockerInterface):
environment={ENV_TIME: self.sys_timezone},
)
async def is_initialize(self) -> bool:
def is_initialize(self) -> Awaitable[bool]:
"""Return True if Docker container exists."""
if not self.sys_homeassistant.version:
return False
return await self.sys_docker.container_is_initialized(
self.name, self.image, self.sys_homeassistant.version
return self.sys_run_in_executor(
self.sys_docker.container_is_initialized,
self.name,
self.image,
self.sys_homeassistant.version,
)
async def _validate_trust(self, image_id: str) -> None:
"""Validate trust of content."""
try:
if self.version in {None, LANDINGPAGE} or self.version < _VERIFY_TRUST:
return
except AwesomeVersionCompareException:
return
await super()._validate_trust(image_id)

View File

@@ -6,20 +6,20 @@ from abc import ABC, abstractmethod
from collections import defaultdict
from collections.abc import Awaitable
from contextlib import suppress
from http import HTTPStatus
import logging
import re
from time import time
from typing import Any, cast
from uuid import uuid4
import aiodocker
import aiohttp
from awesomeversion import AwesomeVersion
from awesomeversion.strategy import AwesomeVersionStrategy
import docker
from docker.models.containers import Container
from docker.models.images import Image
import requests
from ..bus import EventListener
from ..const import (
ATTR_PASSWORD,
ATTR_REGISTRY,
@@ -31,27 +31,33 @@ from ..const import (
)
from ..coresys import CoreSys
from ..exceptions import (
CodeNotaryError,
CodeNotaryUntrusted,
DockerAPIError,
DockerError,
DockerHubRateLimitExceeded,
DockerJobError,
DockerLogOutOfOrder,
DockerNotFound,
DockerRequestError,
DockerTrustError,
)
from ..jobs import SupervisorJob
from ..jobs.const import JOB_GROUP_DOCKER_INTERFACE, JobConcurrency
from ..jobs.decorator import Job
from ..jobs.job_group import JobGroup
from ..resolution.const import ContextType, IssueType, SuggestionType
from ..utils.sentry import async_capture_exception
from .const import DOCKER_HUB, DOCKER_HUB_LEGACY, ContainerState, RestartPolicy
from .const import ContainerState, PullImageLayerStage, RestartPolicy
from .manager import CommandReturn, PullLogEntry
from .monitor import DockerContainerStateEvent
from .pull_progress import ImagePullProgress
from .stats import DockerStats
_LOGGER: logging.Logger = logging.getLogger(__name__)
MAP_ARCH: dict[CpuArch, str] = {
IMAGE_WITH_HOST = re.compile(r"^((?:[a-z0-9]+(?:-[a-z0-9]+)*\.)+[a-z]{2,})\/.+")
DOCKER_HUB = "hub.docker.com"
MAP_ARCH: dict[CpuArch | str, str] = {
CpuArch.ARMV7: "linux/arm/v7",
CpuArch.ARMHF: "linux/arm/v6",
CpuArch.AARCH64: "linux/arm64",
@@ -175,17 +181,25 @@ class DockerInterface(JobGroup, ABC):
return self.meta_config.get("Healthcheck")
def _get_credentials(self, image: str) -> dict:
"""Return a dictionary with credentials for docker login."""
"""Return a dictionay with credentials for docker login."""
registry = None
credentials = {}
registry = self.sys_docker.config.get_registry_for_image(image)
matcher = IMAGE_WITH_HOST.match(image)
# Custom registry
if matcher:
if matcher.group(1) in self.sys_docker.config.registries:
registry = matcher.group(1)
credentials[ATTR_REGISTRY] = registry
# If no match assume "dockerhub" as registry
elif DOCKER_HUB in self.sys_docker.config.registries:
registry = DOCKER_HUB
if registry:
stored = self.sys_docker.config.registries[registry]
credentials[ATTR_USERNAME] = stored[ATTR_USERNAME]
credentials[ATTR_PASSWORD] = stored[ATTR_PASSWORD]
# Don't include registry for Docker Hub (both official and legacy)
if registry not in (DOCKER_HUB, DOCKER_HUB_LEGACY):
credentials[ATTR_REGISTRY] = registry
_LOGGER.debug(
"Logging in to %s as %s",
@@ -195,6 +209,170 @@ class DockerInterface(JobGroup, ABC):
return credentials
async def _docker_login(self, image: str) -> None:
"""Try to log in to the registry if there are credentials available."""
if not self.sys_docker.config.registries:
return
credentials = self._get_credentials(image)
if not credentials:
return
await self.sys_run_in_executor(self.sys_docker.docker.login, **credentials)
def _process_pull_image_log(
self, install_job_id: str, reference: PullLogEntry
) -> None:
"""Process events fired from a docker while pulling an image, filtered to a given job id."""
if (
reference.job_id != install_job_id
or not reference.id
or not reference.status
or not (stage := PullImageLayerStage.from_status(reference.status))
):
return
# Pulling FS Layer is our marker for a layer that needs to be downloaded and extracted. Otherwise it already exists and we can ignore
job: SupervisorJob | None = None
if stage == PullImageLayerStage.PULLING_FS_LAYER:
job = self.sys_jobs.new_job(
name="Pulling container image layer",
initial_stage=stage.status,
reference=reference.id,
parent_id=install_job_id,
internal=True,
)
job.done = False
return
# Find our sub job to update details of
for j in self.sys_jobs.jobs:
if j.parent_id == install_job_id and j.reference == reference.id:
job = j
break
# This likely only occurs if the logs came in out of sync and we got progress before the Pulling FS Layer one
if not job:
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {install_job_id} but could not find a matching job, skipping",
_LOGGER.debug,
)
# Hopefully these come in order but if they sometimes get out of sync, avoid accidentally going backwards
# If it happens a lot though we may need to reconsider the value of this feature
if job.done:
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for job {job.uuid} but job was done, skipping",
_LOGGER.debug,
)
if job.stage and stage < PullImageLayerStage.from_status(job.stage):
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for job {job.uuid} but job was already on stage {job.stage}, skipping",
_LOGGER.debug,
)
# For progress calcuation we assume downloading and extracting are each 50% of the time and others stages negligible
progress = job.progress
match stage:
case PullImageLayerStage.DOWNLOADING | PullImageLayerStage.EXTRACTING:
if (
reference.progress_detail
and reference.progress_detail.current
and reference.progress_detail.total
):
progress = 50 * (
reference.progress_detail.current
/ reference.progress_detail.total
)
if stage == PullImageLayerStage.EXTRACTING:
progress += 50
case (
PullImageLayerStage.VERIFYING_CHECKSUM
| PullImageLayerStage.DOWNLOAD_COMPLETE
):
progress = 50
case PullImageLayerStage.PULL_COMPLETE:
progress = 100
case PullImageLayerStage.RETRYING_DOWNLOAD:
progress = 0
if stage != PullImageLayerStage.RETRYING_DOWNLOAD and progress < job.progress:
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for job {job.uuid} that implied progress was {progress} but current progress is {job.progress}, skipping",
_LOGGER.debug,
)
# Our filters have all passed. Time to update the job
# Only downloading and extracting have progress details. Use that to set extra
# We'll leave it around on later stages as the total bytes may be useful after that stage
if (
stage in {PullImageLayerStage.DOWNLOADING, PullImageLayerStage.EXTRACTING}
and reference.progress_detail
):
job.update(
progress=progress,
stage=stage.status,
extra={
"current": reference.progress_detail.current,
"total": reference.progress_detail.total,
},
)
else:
job.update(
progress=progress,
stage=stage.status,
done=stage == PullImageLayerStage.PULL_COMPLETE,
extra=None
if stage == PullImageLayerStage.RETRYING_DOWNLOAD
else job.extra,
)
# Once we have received a progress update for every child job, start to set status of the main one
install_job = self.sys_jobs.get_job(install_job_id)
layer_jobs = [
job
for job in self.sys_jobs.jobs
if job.parent_id == install_job.uuid
and job.name == "Pulling container image layer"
]
# First set the total bytes to be downloaded/extracted on the main job
if not install_job.extra:
total = 0
for job in layer_jobs:
if not job.extra:
return
total += job.extra["total"]
install_job.extra = {"total": total}
else:
total = install_job.extra["total"]
# Then determine total progress based on progress of each sub-job, factoring in size of each compared to total
progress = 0.0
stage = PullImageLayerStage.PULL_COMPLETE
for job in layer_jobs:
if not job.extra:
return
progress += job.progress * (job.extra["total"] / total)
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
if job_stage < PullImageLayerStage.EXTRACTING:
stage = PullImageLayerStage.DOWNLOADING
elif (
stage == PullImageLayerStage.PULL_COMPLETE
and job_stage < PullImageLayerStage.PULL_COMPLETE
):
stage = PullImageLayerStage.EXTRACTING
# Ensure progress is 100 at this point to prevent float drift
if stage == PullImageLayerStage.PULL_COMPLETE:
progress = 100
# To reduce noise, limit updates to when result has changed by an entire percent or when stage changed
if stage != install_job.stage or progress >= install_job.progress + 1:
install_job.update(stage=stage.status, progress=progress)
@Job(
name="docker_interface_install",
on_condition=DockerJobError,
@@ -213,139 +391,129 @@ class DockerInterface(JobGroup, ABC):
if not image:
raise ValueError("Cannot pull without an image!")
image_arch = arch or self.sys_arch.supervisor
platform = MAP_ARCH[image_arch]
pull_progress = ImagePullProgress()
current_job = self.sys_jobs.current
# Try to fetch manifest for accurate size-based progress
# This is optional - if it fails, we fall back to count-based progress
try:
manifest = await self.sys_docker.manifest_fetcher.get_manifest(
image, str(version), platform=platform
)
if manifest:
pull_progress.set_manifest(manifest)
_LOGGER.debug(
"Using manifest for progress: %d layers, %d bytes",
manifest.layer_count,
manifest.total_size,
)
except (aiohttp.ClientError, TimeoutError) as err:
_LOGGER.warning("Could not fetch manifest for progress: %s", err)
async def process_pull_event(event: PullLogEntry) -> None:
"""Process pull event and update job progress."""
if event.job_id != current_job.uuid:
return
# Process event through progress tracker
pull_progress.process_event(event)
# Update job if progress changed significantly (>= 1%)
should_update, progress = pull_progress.should_update_job()
if should_update:
stage = pull_progress.get_stage()
current_job.update(progress=progress, stage=stage)
listener = self.sys_bus.register_event(
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_event
)
image_arch = str(arch) if arch else self.sys_arch.supervisor
listener: EventListener | None = None
_LOGGER.info("Downloading docker image %s with tag %s.", image, version)
try:
# Get credentials for private registries to pass to aiodocker
credentials = self._get_credentials(image) or None
if self.sys_docker.config.registries:
# Try login if we have defined credentials
await self._docker_login(image)
# Pull new image, passing credentials to aiodocker
docker_image = await self.sys_docker.pull_image(
current_job.uuid,
curr_job_id = self.sys_jobs.current.uuid
async def process_pull_image_log(reference: PullLogEntry) -> None:
try:
self._process_pull_image_log(curr_job_id, reference)
except DockerLogOutOfOrder as err:
# Send all these to sentry. Missing a few progress updates
# shouldn't matter to users but matters to us
await async_capture_exception(err)
listener = self.sys_bus.register_event(
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_image_log
)
# Pull new image
docker_image = await self.sys_run_in_executor(
self.sys_docker.pull_image,
self.sys_jobs.current.uuid,
image,
str(version),
platform=platform,
auth=credentials,
platform=MAP_ARCH[image_arch],
)
# Validate content
try:
await self._validate_trust(cast(str, docker_image.id))
except CodeNotaryError:
with suppress(docker.errors.DockerException):
await self.sys_run_in_executor(
self.sys_docker.images.remove,
image=f"{image}:{version!s}",
force=True,
)
raise
# Tag latest
if latest:
_LOGGER.info(
"Tagging image %s with version %s as latest", image, version
)
await self.sys_docker.images.tag(
docker_image["Id"], image, tag="latest"
)
await self.sys_run_in_executor(docker_image.tag, image, tag="latest")
except docker.errors.APIError as err:
if err.status_code == HTTPStatus.TOO_MANY_REQUESTS:
if err.status_code == 429:
self.sys_resolution.create_issue(
IssueType.DOCKER_RATELIMIT,
ContextType.SYSTEM,
suggestions=[SuggestionType.REGISTRY_LOGIN],
)
raise DockerHubRateLimitExceeded(_LOGGER.error) from err
await async_capture_exception(err)
raise DockerError(
f"Can't install {image}:{version!s}: {err}", _LOGGER.error
) from err
except aiodocker.DockerError as err:
if err.status == HTTPStatus.TOO_MANY_REQUESTS:
self.sys_resolution.create_issue(
IssueType.DOCKER_RATELIMIT,
ContextType.SYSTEM,
suggestions=[SuggestionType.REGISTRY_LOGIN],
_LOGGER.info(
"Your IP address has made too many requests to Docker Hub which activated a rate limit. "
"For more details see https://www.home-assistant.io/more-info/dockerhub-rate-limit"
)
raise DockerHubRateLimitExceeded(_LOGGER.error) from err
await async_capture_exception(err)
raise DockerError(
f"Can't install {image}:{version!s}: {err}", _LOGGER.error
) from err
except (
docker.errors.DockerException,
requests.RequestException,
) as err:
except (docker.errors.DockerException, requests.RequestException) as err:
await async_capture_exception(err)
raise DockerError(
f"Unknown error with {image}:{version!s} -> {err!s}", _LOGGER.error
) from err
except CodeNotaryUntrusted as err:
raise DockerTrustError(
f"Pulled image {image}:{version!s} failed on content-trust verification!",
_LOGGER.critical,
) from err
except CodeNotaryError as err:
raise DockerTrustError(
f"Error happened on Content-Trust check for {image}:{version!s}: {err!s}",
_LOGGER.error,
) from err
finally:
self.sys_bus.remove_listener(listener)
if listener:
self.sys_bus.remove_listener(listener)
self._meta = docker_image
self._meta = docker_image.attrs
async def exists(self) -> bool:
"""Return True if Docker image exists in local repository."""
with suppress(aiodocker.DockerError, requests.RequestException):
await self.sys_docker.images.inspect(f"{self.image}:{self.version!s}")
with suppress(docker.errors.DockerException, requests.RequestException):
await self.sys_run_in_executor(
self.sys_docker.images.get, f"{self.image}:{self.version!s}"
)
return True
return False
async def _get_container(self) -> Container | None:
"""Get docker container, returns None if not found."""
async def is_running(self) -> bool:
"""Return True if Docker is running."""
try:
return await self.sys_run_in_executor(
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers.get, self.name
)
except docker.errors.NotFound:
return None
return False
except docker.errors.DockerException as err:
raise DockerAPIError(
f"Docker API error occurred while getting container information: {err!s}"
) from err
raise DockerAPIError() from err
except requests.RequestException as err:
raise DockerRequestError(
f"Error communicating with Docker to get container information: {err!s}"
) from err
raise DockerRequestError() from err
async def is_running(self) -> bool:
"""Return True if Docker is running."""
if docker_container := await self._get_container():
return docker_container.status == "running"
return False
return docker_container.status == "running"
async def current_state(self) -> ContainerState:
"""Return current state of container."""
if docker_container := await self._get_container():
return _container_state_from_model(docker_container)
return ContainerState.UNKNOWN
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers.get, self.name
)
except docker.errors.NotFound:
return ContainerState.UNKNOWN
except docker.errors.DockerException as err:
raise DockerAPIError() from err
except requests.RequestException as err:
raise DockerRequestError() from err
return _container_state_from_model(docker_container)
@Job(name="docker_interface_attach", concurrency=JobConcurrency.GROUP_QUEUE)
async def attach(
@@ -372,17 +540,15 @@ class DockerInterface(JobGroup, ABC):
),
)
with suppress(aiodocker.DockerError, requests.RequestException):
with suppress(docker.errors.DockerException, requests.RequestException):
if not self._meta and self.image:
self._meta = await self.sys_docker.images.inspect(
self._meta = self.sys_docker.images.get(
f"{self.image}:{version!s}"
)
).attrs
# Successful?
if not self._meta:
raise DockerError(
f"Could not get metadata on container or image for {self.name}"
)
raise DockerError()
_LOGGER.info("Attaching to %s with version %s", self.image, self.version)
@Job(
@@ -446,17 +612,14 @@ class DockerInterface(JobGroup, ABC):
)
async def remove(self, *, remove_image: bool = True) -> None:
"""Remove Docker images."""
if not self.image or not self.version:
raise DockerError(
"Cannot determine image and/or version from metadata!", _LOGGER.error
)
# Cleanup container
with suppress(DockerError):
await self.stop()
if remove_image:
await self.sys_docker.remove_image(self.image, self.version)
await self.sys_run_in_executor(
self.sys_docker.remove_image, self.image, self.version
)
self._meta = None
@@ -472,25 +635,29 @@ class DockerInterface(JobGroup, ABC):
expected_cpu_arch: CpuArch | None = None,
) -> None:
"""Check we have expected image with correct arch."""
arch = expected_cpu_arch or self.sys_arch.supervisor
expected_image_cpu_arch = (
str(expected_cpu_arch) if expected_cpu_arch else self.sys_arch.supervisor
)
image_name = f"{expected_image}:{version!s}"
if self.image == expected_image:
try:
image = await self.sys_docker.images.inspect(image_name)
except (aiodocker.DockerError, requests.RequestException) as err:
image: Image = await self.sys_run_in_executor(
self.sys_docker.images.get, image_name
)
except (docker.errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get {image_name} for check due to: {err!s}",
_LOGGER.error,
) from err
image_arch = f"{image['Os']}/{image['Architecture']}"
if "Variant" in image:
image_arch = f"{image_arch}/{image['Variant']}"
image_arch = f"{image.attrs['Os']}/{image.attrs['Architecture']}"
if "Variant" in image.attrs:
image_arch = f"{image_arch}/{image.attrs['Variant']}"
# If we have an image and its the right arch, all set
# It seems that newer Docker version return a variant for arm64 images.
# Make sure we match linux/arm64 and linux/arm64/v8.
expected_image_arch = MAP_ARCH[arch]
expected_image_arch = MAP_ARCH[expected_image_cpu_arch]
if image_arch.startswith(expected_image_arch):
return
_LOGGER.info(
@@ -503,7 +670,7 @@ class DockerInterface(JobGroup, ABC):
# We're missing the image we need. Stop and clean up what we have then pull the right one
with suppress(DockerError):
await self.remove()
await self.install(version, expected_image, arch=arch)
await self.install(version, expected_image, arch=expected_image_cpu_arch)
@Job(
name="docker_interface_update",
@@ -547,13 +714,11 @@ class DockerInterface(JobGroup, ABC):
version: AwesomeVersion | None = None,
) -> None:
"""Check if old version exists and cleanup."""
if not (use_image := image or self.image):
raise DockerError("Cannot determine image from metadata!", _LOGGER.error)
if not (use_version := version or self.version):
raise DockerError("Cannot determine version from metadata!", _LOGGER.error)
await self.sys_docker.cleanup_old_images(
use_image, use_version, {old_image} if old_image else None
await self.sys_run_in_executor(
self.sys_docker.cleanup_old_images,
image or self.image,
version or self.version,
{old_image} if old_image else None,
)
@Job(
@@ -585,8 +750,14 @@ class DockerInterface(JobGroup, ABC):
async def is_failed(self) -> bool:
"""Return True if Docker is failing state."""
if not (docker_container := await self._get_container()):
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers.get, self.name
)
except docker.errors.NotFound:
return False
except (docker.errors.DockerException, requests.RequestException) as err:
raise DockerError() from err
# container is not running
if docker_container.status != "exited":
@@ -599,10 +770,10 @@ class DockerInterface(JobGroup, ABC):
"""Return latest version of local image."""
available_version: list[AwesomeVersion] = []
try:
for image in await self.sys_docker.images.list(
filters=f'{{"reference": ["{self.image}"]}}'
for image in await self.sys_run_in_executor(
self.sys_docker.images.list, self.image
):
for tag in image["RepoTags"]:
for tag in image.tags:
version = AwesomeVersion(tag.partition(":")[2])
if version.strategy == AwesomeVersionStrategy.UNKNOWN:
continue
@@ -611,7 +782,7 @@ class DockerInterface(JobGroup, ABC):
if not available_version:
raise ValueError()
except (aiodocker.DockerError, ValueError) as err:
except (docker.errors.DockerException, ValueError) as err:
raise DockerNotFound(
f"No version found for {self.image}", _LOGGER.info
) from err
@@ -636,3 +807,24 @@ class DockerInterface(JobGroup, ABC):
return self.sys_run_in_executor(
self.sys_docker.container_run_inside, self.name, command
)
async def _validate_trust(self, image_id: str) -> None:
"""Validate trust of content."""
checksum = image_id.partition(":")[2]
return await self.sys_security.verify_own_content(checksum)
@Job(
name="docker_interface_check_trust",
on_condition=DockerJobError,
concurrency=JobConcurrency.GROUP_REJECT,
)
async def check_trust(self) -> None:
"""Check trust of exists Docker image."""
try:
image = await self.sys_run_in_executor(
self.sys_docker.images.get, f"{self.image}:{self.version!s}"
)
except (docker.errors.DockerException, requests.RequestException):
return
await self._validate_trust(cast(str, image.id))

View File

@@ -6,24 +6,20 @@ import asyncio
from contextlib import suppress
from dataclasses import dataclass
from functools import partial
from http import HTTPStatus
from ipaddress import IPv4Address
import json
import logging
import os
from pathlib import Path
import re
from typing import Any, Final, Self, cast
import aiodocker
from aiodocker.images import DockerImages
from aiohttp import ClientSession, ClientTimeout, UnixConnector
import attr
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
from docker import errors as docker_errors
from docker.api.client import APIClient
from docker.client import DockerClient
from docker.errors import DockerException, ImageNotFound, NotFound
from docker.models.containers import Container, ContainerCollection
from docker.models.images import Image, ImageCollection
from docker.models.networks import Network
from docker.types.daemon import CancellableStream
import requests
@@ -49,17 +45,14 @@ from ..exceptions import (
)
from ..utils.common import FileConfiguration
from ..validate import SCHEMA_DOCKER_CONFIG
from .const import DOCKER_HUB, DOCKER_HUB_LEGACY, LABEL_MANAGED
from .manifest import RegistryManifestFetcher
from .const import LABEL_MANAGED
from .monitor import DockerMonitor
from .network import DockerNetwork
from .utils import get_registry_from_image
_LOGGER: logging.Logger = logging.getLogger(__name__)
MIN_SUPPORTED_DOCKER: Final = AwesomeVersion("24.0.0")
DOCKER_NETWORK_HOST: Final = "host"
RE_IMPORT_IMAGE_STREAM = re.compile(r"(^Loaded image ID: |^Loaded image: )(.+)$")
@attr.s(frozen=True)
@@ -78,25 +71,15 @@ class DockerInfo:
storage: str = attr.ib()
logging: str = attr.ib()
cgroup: str = attr.ib()
support_cpu_realtime: bool = attr.ib()
@staticmethod
async def new(data: dict[str, Any]) -> DockerInfo:
def new(data: dict[str, Any]):
"""Create a object from docker info."""
# Check if CONFIG_RT_GROUP_SCHED is loaded (blocking I/O in executor)
cpu_rt_file_exists = await asyncio.get_running_loop().run_in_executor(
None, Path("/sys/fs/cgroup/cpu/cpu.rt_runtime_us").exists
)
cpu_rt_supported = (
cpu_rt_file_exists and os.environ.get(ENV_SUPERVISOR_CPU_RT) == "1"
)
return DockerInfo(
AwesomeVersion(data.get("ServerVersion", "0.0.0")),
data.get("Driver", "unknown"),
data.get("LoggingDriver", "unknown"),
data.get("CgroupVersion", "1"),
cpu_rt_supported,
)
@property
@@ -107,21 +90,23 @@ class DockerInfo:
except AwesomeVersionCompareException:
return False
@property
def support_cpu_realtime(self) -> bool:
"""Return true, if CONFIG_RT_GROUP_SCHED is loaded."""
if not Path("/sys/fs/cgroup/cpu/cpu.rt_runtime_us").exists():
return False
return bool(os.environ.get(ENV_SUPERVISOR_CPU_RT) == "1")
@dataclass(frozen=True, slots=True)
class PullProgressDetail:
"""Progress detail information for pull.
Documentation lacking but both of these seem to be in bytes when populated.
Containerd-snapshot update - When leveraging this new feature, this information
becomes useless to us while extracting. It simply tells elapsed time using
current and units.
"""
current: int | None = None
total: int | None = None
units: str | None = None
@classmethod
def from_pull_log_dict(cls, value: dict[str, int]) -> PullProgressDetail:
@@ -209,33 +194,6 @@ class DockerConfig(FileConfiguration):
"""Return credentials for docker registries."""
return self._data.get(ATTR_REGISTRIES, {})
def get_registry_for_image(self, image: str) -> str | None:
"""Return the registry name if credentials are available for the image.
Matches the image against configured registries and returns the registry
name if found, or None if no matching credentials are configured.
Uses Docker's domain detection logic from:
vendor/github.com/distribution/reference/normalize.go
"""
if not self.registries:
return None
# Check if image uses a custom registry (e.g., ghcr.io/org/image)
registry = get_registry_from_image(image)
if registry:
if registry in self.registries:
return registry
else:
# No registry prefix means Docker Hub
# Support both docker.io (official) and hub.docker.com (legacy)
if DOCKER_HUB in self.registries:
return DOCKER_HUB
if DOCKER_HUB_LEGACY in self.registries:
return DOCKER_HUB_LEGACY
return None
class DockerAPI(CoreSysAttributes):
"""Docker Supervisor wrapper.
@@ -246,47 +204,36 @@ class DockerAPI(CoreSysAttributes):
def __init__(self, coresys: CoreSys):
"""Initialize Docker base wrapper."""
self.coresys = coresys
# We keep both until we can fully refactor to aiodocker
self._dockerpy: DockerClient | None = None
self.docker: aiodocker.Docker = aiodocker.Docker(
url="unix://localhost", # dummy hostname for URL composition
connector=(connector := UnixConnector(SOCKET_DOCKER.as_posix())),
session=ClientSession(connector=connector, timeout=ClientTimeout(900)),
api_version="auto",
)
self._docker: DockerClient | None = None
self._network: DockerNetwork | None = None
self._info: DockerInfo | None = None
self.config: DockerConfig = DockerConfig()
self._monitor: DockerMonitor = DockerMonitor(coresys)
self._manifest_fetcher: RegistryManifestFetcher = RegistryManifestFetcher(
coresys
)
async def post_init(self) -> Self:
"""Post init actions that must be done in event loop."""
self._dockerpy = await asyncio.get_running_loop().run_in_executor(
self._docker = await asyncio.get_running_loop().run_in_executor(
None,
partial(
DockerClient,
base_url=f"unix:/{SOCKET_DOCKER.as_posix()}",
base_url=f"unix:/{str(SOCKET_DOCKER)}",
version="auto",
timeout=900,
),
)
self._info = await DockerInfo.new(self.dockerpy.info())
self._info = DockerInfo.new(self.docker.info())
await self.config.read_data()
self._network = await DockerNetwork(self.dockerpy).post_init(
self._network = await DockerNetwork(self.docker).post_init(
self.config.enable_ipv6, self.config.mtu
)
return self
@property
def dockerpy(self) -> DockerClient:
def docker(self) -> DockerClient:
"""Get docker API client."""
if not self._dockerpy:
if not self._docker:
raise RuntimeError("Docker API Client not initialized!")
return self._dockerpy
return self._docker
@property
def network(self) -> DockerNetwork:
@@ -296,19 +243,19 @@ class DockerAPI(CoreSysAttributes):
return self._network
@property
def images(self) -> DockerImages:
def images(self) -> ImageCollection:
"""Return API images."""
return self.docker.images
@property
def containers(self) -> ContainerCollection:
"""Return API containers."""
return self.dockerpy.containers
return self.docker.containers
@property
def api(self) -> APIClient:
"""Return API containers."""
return self.dockerpy.api
return self.docker.api
@property
def info(self) -> DockerInfo:
@@ -320,18 +267,13 @@ class DockerAPI(CoreSysAttributes):
@property
def events(self) -> CancellableStream:
"""Return docker event stream."""
return self.dockerpy.events(decode=True)
return self.docker.events(decode=True)
@property
def monitor(self) -> DockerMonitor:
"""Return docker events monitor."""
return self._monitor
@property
def manifest_fetcher(self) -> RegistryManifestFetcher:
"""Return manifest fetcher for registry access."""
return self._manifest_fetcher
async def load(self) -> None:
"""Start docker events monitor."""
await self.monitor.load()
@@ -441,7 +383,7 @@ class DockerAPI(CoreSysAttributes):
with suppress(DockerError):
self.network.detach_default_bridge(container)
else:
host_network: Network = self.dockerpy.networks.get(DOCKER_NETWORK_HOST)
host_network: Network = self.docker.networks.get(DOCKER_NETWORK_HOST)
# Check if container is register on host
# https://github.com/moby/moby/issues/23302
@@ -468,35 +410,35 @@ class DockerAPI(CoreSysAttributes):
return container
async def pull_image(
def pull_image(
self,
job_id: str,
repository: str,
tag: str = "latest",
platform: str | None = None,
auth: dict[str, str] | None = None,
) -> dict[str, Any]:
) -> Image:
"""Pull the specified image and return it.
This mimics the high level API of images.pull but provides better error handling by raising
based on a docker error on pull. Whereas the high level API ignores all errors on pull and
raises only if the get fails afterwards. Additionally it fires progress reports for the pull
on the bus so listeners can use that to update status for users.
Must be run in executor.
"""
# Use timeout=None to disable timeout for pull operations, matching docker-py behavior.
# aiodocker converts None to ClientTimeout(total=None) which disables the timeout.
async for e in self.images.pull(
repository, tag=tag, platform=platform, auth=auth, stream=True, timeout=None
):
pull_log = self.docker.api.pull(
repository, tag=tag, platform=platform, stream=True, decode=True
)
for e in pull_log:
entry = PullLogEntry.from_pull_log_dict(job_id, e)
if entry.error:
raise entry.exception
await asyncio.gather(
*self.sys_bus.fire_event(BusEvent.DOCKER_IMAGE_PULL_UPDATE, entry)
self.sys_loop.call_soon_threadsafe(
self.sys_bus.fire_event, BusEvent.DOCKER_IMAGE_PULL_UPDATE, entry
)
sep = "@" if tag.startswith("sha256:") else ":"
return await self.images.inspect(f"{repository}{sep}{tag}")
return self.images.get(f"{repository}{sep}{tag}")
def run_command(
self,
@@ -517,7 +459,7 @@ class DockerAPI(CoreSysAttributes):
_LOGGER.info("Runing command '%s' on %s", command, image_with_tag)
container = None
try:
container = self.dockerpy.containers.run(
container = self.docker.containers.run(
image_with_tag,
command=command,
detach=True,
@@ -545,35 +487,35 @@ class DockerAPI(CoreSysAttributes):
"""Repair local docker overlayfs2 issues."""
_LOGGER.info("Prune stale containers")
try:
output = self.dockerpy.api.prune_containers()
output = self.docker.api.prune_containers()
_LOGGER.debug("Containers prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for containers prune: %s", err)
_LOGGER.info("Prune stale images")
try:
output = self.dockerpy.api.prune_images(filters={"dangling": False})
output = self.docker.api.prune_images(filters={"dangling": False})
_LOGGER.debug("Images prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for images prune: %s", err)
_LOGGER.info("Prune stale builds")
try:
output = self.dockerpy.api.prune_builds()
output = self.docker.api.prune_builds()
_LOGGER.debug("Builds prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for builds prune: %s", err)
_LOGGER.info("Prune stale volumes")
try:
output = self.dockerpy.api.prune_volumes()
output = self.docker.api.prune_builds()
_LOGGER.debug("Volumes prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for volumes prune: %s", err)
_LOGGER.info("Prune stale networks")
try:
output = self.dockerpy.api.prune_networks()
output = self.docker.api.prune_networks()
_LOGGER.debug("Networks prune: %s", output)
except docker_errors.APIError as err:
_LOGGER.warning("Error for networks prune: %s", err)
@@ -595,11 +537,11 @@ class DockerAPI(CoreSysAttributes):
Fix: https://github.com/moby/moby/issues/23302
"""
network: Network = self.dockerpy.networks.get(network_name)
network: Network = self.docker.networks.get(network_name)
for cid, data in network.attrs.get("Containers", {}).items():
try:
self.dockerpy.containers.get(cid)
self.docker.containers.get(cid)
continue
except docker_errors.NotFound:
_LOGGER.debug(
@@ -614,32 +556,22 @@ class DockerAPI(CoreSysAttributes):
with suppress(docker_errors.DockerException, requests.RequestException):
network.disconnect(data.get("Name", cid), force=True)
async def container_is_initialized(
def container_is_initialized(
self, name: str, image: str, version: AwesomeVersion
) -> bool:
"""Return True if docker container exists in good state and is built from expected image."""
try:
docker_container = await self.sys_run_in_executor(self.containers.get, name)
docker_image = await self.images.inspect(f"{image}:{version}")
except docker_errors.NotFound:
docker_container = self.containers.get(name)
docker_image = self.images.get(f"{image}:{version}")
except NotFound:
return False
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
return False
raise DockerError(
f"Could not get container {name} or image {image}:{version} to check state: {err!s}",
_LOGGER.error,
) from err
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get container {name} or image {image}:{version} to check state: {err!s}",
_LOGGER.error,
) from err
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
# Check the image is correct and state is good
return (
docker_container.image is not None
and docker_container.image.id == docker_image["Id"]
and docker_container.image.id == docker_image.id
and docker_container.status in ("exited", "running", "created")
)
@@ -649,22 +581,18 @@ class DockerAPI(CoreSysAttributes):
"""Stop/remove Docker container."""
try:
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
# Generally suppressed so we don't log this
except NotFound:
raise DockerNotFound() from None
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get container {name} for stopping: {err!s}",
_LOGGER.error,
) from err
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
if docker_container.status == "running":
_LOGGER.info("Stopping %s application", name)
with suppress(docker_errors.DockerException, requests.RequestException):
with suppress(DockerException, requests.RequestException):
docker_container.stop(timeout=timeout)
if remove_container:
with suppress(docker_errors.DockerException, requests.RequestException):
with suppress(DockerException, requests.RequestException):
_LOGGER.info("Cleaning %s application", name)
docker_container.remove(force=True, v=True)
@@ -676,11 +604,11 @@ class DockerAPI(CoreSysAttributes):
"""Start Docker container."""
try:
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
except NotFound:
raise DockerNotFound(
f"{name} not found for starting up", _LOGGER.error
) from None
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get {name} for starting up", _LOGGER.error
) from err
@@ -688,44 +616,36 @@ class DockerAPI(CoreSysAttributes):
_LOGGER.info("Starting %s", name)
try:
docker_container.start()
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(f"Can't start {name}: {err}", _LOGGER.error) from err
def restart_container(self, name: str, timeout: int) -> None:
"""Restart docker container."""
try:
container: Container = self.containers.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"Container {name} not found for restarting", _LOGGER.warning
) from None
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get container {name} for restarting: {err!s}", _LOGGER.error
) from err
except NotFound:
raise DockerNotFound() from None
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
_LOGGER.info("Restarting %s", name)
try:
container.restart(timeout=timeout)
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(f"Can't restart {name}: {err}", _LOGGER.warning) from err
def container_logs(self, name: str, tail: int = 100) -> bytes:
"""Return Docker logs of container."""
try:
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"Container {name} not found for logs", _LOGGER.warning
) from None
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not get container {name} for logs: {err!s}", _LOGGER.error
) from err
except NotFound:
raise DockerNotFound() from None
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
try:
return docker_container.logs(tail=tail, stdout=True, stderr=True)
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't grep logs from {name}: {err}", _LOGGER.warning
) from err
@@ -734,23 +654,18 @@ class DockerAPI(CoreSysAttributes):
"""Read and return stats from container."""
try:
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"Container {name} not found for stats", _LOGGER.warning
) from None
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Could not inspect container '{name}': {err!s}", _LOGGER.error
) from err
except NotFound:
raise DockerNotFound() from None
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
# container is not running
if docker_container.status != "running":
raise DockerError(f"Container {name} is not running", _LOGGER.error)
try:
# When stream=False, stats() returns dict, not Iterator
return cast(dict[str, Any], docker_container.stats(stream=False))
except (docker_errors.DockerException, requests.RequestException) as err:
return docker_container.stats(stream=False)
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't read stats from {name}: {err}", _LOGGER.error
) from err
@@ -759,90 +674,61 @@ class DockerAPI(CoreSysAttributes):
"""Execute a command inside Docker container."""
try:
docker_container: Container = self.containers.get(name)
except docker_errors.NotFound:
raise DockerNotFound(
f"Container {name} not found for running command", _LOGGER.warning
) from None
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't get container {name} to run command: {err!s}"
) from err
except NotFound:
raise DockerNotFound() from None
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
# Execute
try:
code, output = docker_container.exec_run(command)
except (docker_errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't run command in container {name}: {err!s}"
) from err
except (DockerException, requests.RequestException) as err:
raise DockerError() from err
return CommandReturn(code, output)
async def remove_image(
def remove_image(
self, image: str, version: AwesomeVersion, latest: bool = True
) -> None:
"""Remove a Docker image by version and latest."""
try:
if latest:
_LOGGER.info("Removing image %s with latest", image)
try:
await self.images.delete(f"{image}:latest", force=True)
except aiodocker.DockerError as err:
if err.status != HTTPStatus.NOT_FOUND:
raise
with suppress(ImageNotFound):
self.images.remove(image=f"{image}:latest", force=True)
_LOGGER.info("Removing image %s with %s", image, version)
try:
await self.images.delete(f"{image}:{version!s}", force=True)
except aiodocker.DockerError as err:
if err.status != HTTPStatus.NOT_FOUND:
raise
with suppress(ImageNotFound):
self.images.remove(image=f"{image}:{version!s}", force=True)
except (aiodocker.DockerError, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't remove image {image}: {err}", _LOGGER.warning
) from err
async def import_image(self, tar_file: Path) -> dict[str, Any] | None:
def import_image(self, tar_file: Path) -> Image | None:
"""Import a tar file as image."""
try:
with tar_file.open("rb") as read_tar:
resp: list[dict[str, Any]] = await self.images.import_image(read_tar)
except (aiodocker.DockerError, OSError) as err:
docker_image_list: list[Image] = self.images.load(read_tar) # type: ignore
if len(docker_image_list) != 1:
_LOGGER.warning(
"Unexpected image count %d while importing image from tar",
len(docker_image_list),
)
return None
return docker_image_list[0]
except (DockerException, OSError) as err:
raise DockerError(
f"Can't import image from tar: {err}", _LOGGER.error
) from err
docker_image_list: list[str] = []
for chunk in resp:
if "errorDetail" in chunk:
raise DockerError(
f"Can't import image from tar: {chunk['errorDetail']['message']}",
_LOGGER.error,
)
if "stream" in chunk:
if match := RE_IMPORT_IMAGE_STREAM.search(chunk["stream"]):
docker_image_list.append(match.group(2))
if len(docker_image_list) != 1:
_LOGGER.warning(
"Unexpected image count %d while importing image from tar",
len(docker_image_list),
)
return None
try:
return await self.images.inspect(docker_image_list[0])
except (aiodocker.DockerError, requests.RequestException) as err:
raise DockerError(
f"Could not inspect imported image due to: {err!s}", _LOGGER.error
) from err
def export_image(self, image: str, version: AwesomeVersion, tar_file: Path) -> None:
"""Export current images into a tar file."""
try:
docker_image = self.api.get_image(f"{image}:{version}")
except (docker_errors.DockerException, requests.RequestException) as err:
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't fetch image {image}: {err}", _LOGGER.error
) from err
@@ -859,7 +745,7 @@ class DockerAPI(CoreSysAttributes):
_LOGGER.info("Export image %s done", image)
async def cleanup_old_images(
def cleanup_old_images(
self,
current_image: str,
current_version: AwesomeVersion,
@@ -870,57 +756,46 @@ class DockerAPI(CoreSysAttributes):
"""Clean up old versions of an image."""
image = f"{current_image}:{current_version!s}"
try:
try:
image_attr = await self.images.inspect(image)
except aiodocker.DockerError as err:
if err.status == HTTPStatus.NOT_FOUND:
raise DockerNotFound(
f"{current_image} not found for cleanup", _LOGGER.warning
) from None
raise
except (aiodocker.DockerError, requests.RequestException) as err:
keep = {cast(str, self.images.get(image).id)}
except ImageNotFound:
raise DockerNotFound(
f"{current_image} not found for cleanup", _LOGGER.warning
) from None
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't get {current_image} for cleanup", _LOGGER.warning
) from err
keep = {cast(str, image_attr["Id"])}
if keep_images:
keep_images -= {image}
results = await asyncio.gather(
*[self.images.inspect(image) for image in keep_images],
return_exceptions=True,
)
for result in results:
# If its not found, no need to preserve it from getting removed
if (
isinstance(result, aiodocker.DockerError)
and result.status == HTTPStatus.NOT_FOUND
):
continue
if isinstance(result, BaseException):
raise DockerError(
f"Failed to get one or more images from {keep} during cleanup",
_LOGGER.warning,
) from result
keep.add(cast(str, result["Id"]))
try:
for image in keep_images:
# If its not found, no need to preserve it from getting removed
with suppress(ImageNotFound):
keep.add(cast(str, self.images.get(image).id))
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Failed to get one or more images from {keep} during cleanup",
_LOGGER.warning,
) from err
# Cleanup old and current
image_names = list(
old_images | {current_image} if old_images else {current_image}
)
try:
images_list = await self.images.list(
filters=json.dumps({"reference": image_names})
)
except (aiodocker.DockerError, requests.RequestException) as err:
# This API accepts a list of image names. Tested and confirmed working on docker==7.1.0
# Its typing does say only `str` though. Bit concerning, could an update break this?
images_list = self.images.list(name=image_names) # type: ignore
except (DockerException, requests.RequestException) as err:
raise DockerError(
f"Corrupt docker overlayfs found: {err}", _LOGGER.warning
) from err
for docker_image in images_list:
if docker_image["Id"] in keep:
if docker_image.id in keep:
continue
with suppress(aiodocker.DockerError, requests.RequestException):
_LOGGER.info("Cleanup images: %s", docker_image["RepoTags"])
await self.images.delete(docker_image["Id"], force=True)
with suppress(DockerException, requests.RequestException):
_LOGGER.info("Cleanup images: %s", docker_image.tags)
self.images.remove(docker_image.id, force=True)

View File

@@ -1,339 +0,0 @@
"""Docker registry manifest fetcher.
Fetches image manifests directly from container registries to get layer sizes
before pulling an image. This enables accurate size-based progress tracking.
"""
from __future__ import annotations
from dataclasses import dataclass
import logging
import re
from typing import TYPE_CHECKING
import aiohttp
from supervisor.docker.utils import get_registry_from_image
from .const import DOCKER_HUB, DOCKER_HUB_LEGACY
if TYPE_CHECKING:
from ..coresys import CoreSys
_LOGGER = logging.getLogger(__name__)
# Media types for manifest requests
MANIFEST_MEDIA_TYPES = (
"application/vnd.docker.distribution.manifest.v2+json",
"application/vnd.oci.image.manifest.v1+json",
"application/vnd.docker.distribution.manifest.list.v2+json",
"application/vnd.oci.image.index.v1+json",
)
@dataclass
class ImageManifest:
"""Container image manifest with layer information."""
digest: str
total_size: int
layers: dict[str, int] # digest -> size in bytes
@property
def layer_count(self) -> int:
"""Return number of layers."""
return len(self.layers)
def parse_image_reference(image: str, tag: str) -> tuple[str, str, str]:
"""Parse image reference into (registry, repository, tag).
Examples:
ghcr.io/home-assistant/home-assistant:2025.1.0
-> (ghcr.io, home-assistant/home-assistant, 2025.1.0)
homeassistant/home-assistant:latest
-> (registry-1.docker.io, homeassistant/home-assistant, latest)
alpine:3.18
-> (registry-1.docker.io, library/alpine, 3.18)
"""
# Check if image has explicit registry host
registry = get_registry_from_image(image)
if registry:
repository = image[len(registry) + 1 :] # Remove "registry/" prefix
else:
registry = DOCKER_HUB
repository = image
# Docker Hub requires "library/" prefix for official images
if "/" not in repository:
repository = f"library/{repository}"
return registry, repository, tag
class RegistryManifestFetcher:
"""Fetches manifests from container registries."""
def __init__(self, coresys: CoreSys) -> None:
"""Initialize the fetcher."""
self.coresys = coresys
@property
def _session(self) -> aiohttp.ClientSession:
"""Return the websession for HTTP requests."""
return self.coresys.websession
def _get_credentials(self, registry: str) -> tuple[str, str] | None:
"""Get credentials for registry from Docker config.
Returns (username, password) tuple or None if no credentials.
"""
registries = self.coresys.docker.config.registries
# Map registry hostname to config key
# Docker Hub can be stored as "hub.docker.com" in config
if registry in (DOCKER_HUB, DOCKER_HUB_LEGACY):
if DOCKER_HUB in registries:
creds = registries[DOCKER_HUB]
return creds.get("username"), creds.get("password")
elif registry in registries:
creds = registries[registry]
return creds.get("username"), creds.get("password")
return None
async def _get_auth_token(
self,
registry: str,
repository: str,
) -> str | None:
"""Get authentication token for registry.
Uses the WWW-Authenticate header from a 401 response to discover
the token endpoint, then requests a token with appropriate scope.
"""
# First, make an unauthenticated request to get WWW-Authenticate header
manifest_url = f"https://{registry}/v2/{repository}/manifests/latest"
try:
async with self._session.get(manifest_url) as resp:
if resp.status == 200:
# No auth required
return None
if resp.status != 401:
_LOGGER.warning(
"Unexpected status %d from registry %s", resp.status, registry
)
return None
www_auth = resp.headers.get("WWW-Authenticate", "")
except aiohttp.ClientError as err:
_LOGGER.warning("Failed to connect to registry %s: %s", registry, err)
return None
# Parse WWW-Authenticate: Bearer realm="...",service="...",scope="..."
if not www_auth.startswith("Bearer "):
_LOGGER.warning("Unsupported auth type from %s: %s", registry, www_auth)
return None
params = {}
for match in re.finditer(r'(\w+)="([^"]*)"', www_auth):
params[match.group(1)] = match.group(2)
realm = params.get("realm")
service = params.get("service")
if not realm:
_LOGGER.warning("No realm in WWW-Authenticate from %s", registry)
return None
# Build token request URL
token_url = f"{realm}?scope=repository:{repository}:pull"
if service:
token_url += f"&service={service}"
# Check for credentials
auth = None
credentials = self._get_credentials(registry)
if credentials:
username, password = credentials
if username and password:
auth = aiohttp.BasicAuth(username, password)
_LOGGER.debug("Using credentials for %s", registry)
try:
async with self._session.get(token_url, auth=auth) as resp:
if resp.status != 200:
_LOGGER.warning(
"Failed to get token from %s: %d", realm, resp.status
)
return None
data = await resp.json()
return data.get("token") or data.get("access_token")
except aiohttp.ClientError as err:
_LOGGER.warning("Failed to get auth token: %s", err)
return None
async def _fetch_manifest(
self,
registry: str,
repository: str,
reference: str,
token: str | None,
platform: str,
) -> dict | None:
"""Fetch manifest from registry.
If the manifest is a manifest list (multi-arch), fetches the
platform-specific manifest.
"""
manifest_url = f"https://{registry}/v2/{repository}/manifests/{reference}"
headers = {"Accept": ", ".join(MANIFEST_MEDIA_TYPES)}
if token:
headers["Authorization"] = f"Bearer {token}"
try:
async with self._session.get(manifest_url, headers=headers) as resp:
if resp.status != 200:
_LOGGER.warning(
"Failed to fetch manifest for %s/%s:%s - %d",
registry,
repository,
reference,
resp.status,
)
return None
manifest = await resp.json()
except aiohttp.ClientError as err:
_LOGGER.warning("Failed to fetch manifest: %s", err)
return None
media_type = manifest.get("mediaType", "")
# Check if this is a manifest list (multi-arch image)
if "list" in media_type or "index" in media_type:
manifests = manifest.get("manifests", [])
if not manifests:
_LOGGER.warning("Empty manifest list for %s/%s", registry, repository)
return None
# Platform format is "linux/amd64", "linux/arm64", etc.
parts = platform.split("/")
if len(parts) < 2:
_LOGGER.warning("Invalid platform format: %s", platform)
return None
target_os, target_arch = parts[0], parts[1]
platform_manifest = None
for m in manifests:
plat = m.get("platform", {})
if (
plat.get("os") == target_os
and plat.get("architecture") == target_arch
):
platform_manifest = m
break
if not platform_manifest:
_LOGGER.warning(
"Platform %s/%s not found in manifest list for %s/%s, "
"cannot use manifest for progress tracking",
target_os,
target_arch,
registry,
repository,
)
return None
# Fetch the platform-specific manifest
return await self._fetch_manifest(
registry,
repository,
platform_manifest["digest"],
token,
platform,
)
return manifest
async def get_manifest(
self,
image: str,
tag: str,
platform: str,
) -> ImageManifest | None:
"""Fetch manifest and extract layer sizes.
Args:
image: Image name (e.g., "ghcr.io/home-assistant/home-assistant")
tag: Image tag (e.g., "2025.1.0")
platform: Target platform (e.g., "linux/amd64")
Returns:
ImageManifest with layer sizes, or None if fetch failed.
"""
registry, repository, tag = parse_image_reference(image, tag)
_LOGGER.debug(
"Fetching manifest for %s/%s:%s (platform=%s)",
registry,
repository,
tag,
platform,
)
# Get auth token
token = await self._get_auth_token(registry, repository)
# Fetch manifest
manifest = await self._fetch_manifest(
registry, repository, tag, token, platform
)
if not manifest:
return None
# Extract layer information
layers = manifest.get("layers", [])
if not layers:
_LOGGER.warning(
"No layers in manifest for %s/%s:%s", registry, repository, tag
)
return None
layer_sizes: dict[str, int] = {}
total_size = 0
for layer in layers:
digest = layer.get("digest", "")
size = layer.get("size", 0)
if digest and size:
# Store by short digest (first 12 chars after sha256:)
short_digest = (
digest.split(":")[1][:12] if ":" in digest else digest[:12]
)
layer_sizes[short_digest] = size
total_size += size
digest = manifest.get("config", {}).get("digest", "")
_LOGGER.debug(
"Manifest for %s/%s:%s - %d layers, %d bytes total",
registry,
repository,
tag,
len(layer_sizes),
total_size,
)
return ImageManifest(
digest=digest,
total_size=total_size,
layers=layer_sizes,
)

View File

@@ -89,7 +89,7 @@ class DockerMonitor(CoreSysAttributes, Thread):
DockerContainerStateEvent(
name=attributes["name"],
state=container_state,
id=event["Actor"]["ID"],
id=event["id"],
time=event["time"],
),
)

View File

@@ -7,8 +7,6 @@ import logging
from typing import Self, cast
import docker
from docker.models.containers import Container
from docker.models.networks import Network
import requests
from ..const import (
@@ -61,7 +59,7 @@ class DockerNetwork:
def __init__(self, docker_client: docker.DockerClient):
"""Initialize internal Supervisor network."""
self.docker: docker.DockerClient = docker_client
self._network: Network
self._network: docker.models.networks.Network
async def post_init(
self, enable_ipv6: bool | None = None, mtu: int | None = None
@@ -78,7 +76,7 @@ class DockerNetwork:
return DOCKER_NETWORK
@property
def network(self) -> Network:
def network(self) -> docker.models.networks.Network:
"""Return docker network."""
return self._network
@@ -119,7 +117,7 @@ class DockerNetwork:
def _get_network(
self, enable_ipv6: bool | None = None, mtu: int | None = None
) -> Network:
) -> docker.models.networks.Network:
"""Get supervisor network."""
try:
if network := self.docker.networks.get(DOCKER_NETWORK):
@@ -220,7 +218,7 @@ class DockerNetwork:
def attach_container(
self,
container: Container,
container: docker.models.containers.Container,
alias: list[str] | None = None,
ipv4: IPv4Address | None = None,
) -> None:
@@ -277,7 +275,9 @@ class DockerNetwork:
if container.id not in self.containers:
self.attach_container(container, alias, ipv4)
def detach_default_bridge(self, container: Container) -> None:
def detach_default_bridge(
self, container: docker.models.containers.Container
) -> None:
"""Detach default Docker bridge.
Need run inside executor.

View File

@@ -1,368 +0,0 @@
"""Image pull progress tracking."""
from __future__ import annotations
from contextlib import suppress
from dataclasses import dataclass, field
from enum import Enum
import logging
from typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
from .manager import PullLogEntry
from .manifest import ImageManifest
_LOGGER = logging.getLogger(__name__)
# Progress weight distribution: 70% downloading, 30% extraction
DOWNLOAD_WEIGHT = 70.0
EXTRACT_WEIGHT = 30.0
class LayerPullStatus(Enum):
"""Status values for pulling an image layer.
These are a subset of the statuses in a docker pull image log.
The order field allows comparing which stage is further along.
"""
PULLING_FS_LAYER = 1, "Pulling fs layer"
WAITING = 1, "Waiting"
RETRYING = 2, "Retrying" # Matches "Retrying in N seconds"
DOWNLOADING = 3, "Downloading"
VERIFYING_CHECKSUM = 4, "Verifying Checksum"
DOWNLOAD_COMPLETE = 5, "Download complete"
EXTRACTING = 6, "Extracting"
PULL_COMPLETE = 7, "Pull complete"
ALREADY_EXISTS = 7, "Already exists"
def __init__(self, order: int, status: str) -> None:
"""Set fields from values."""
self.order = order
self.status = status
def __eq__(self, value: object, /) -> bool:
"""Check equality, allow string comparisons on status."""
with suppress(AttributeError):
return self.status == cast(LayerPullStatus, value).status
return self.status == value
def __hash__(self) -> int:
"""Return hash based on status string."""
return hash(self.status)
def __lt__(self, other: object) -> bool:
"""Order instances by stage progression."""
with suppress(AttributeError):
return self.order < cast(LayerPullStatus, other).order
return False
@classmethod
def from_status(cls, status: str) -> LayerPullStatus | None:
"""Get enum from status string, or None if not recognized."""
# Handle "Retrying in N seconds" pattern
if status.startswith("Retrying in "):
return cls.RETRYING
for member in cls:
if member.status == status:
return member
return None
@dataclass
class LayerProgress:
"""Track progress of a single layer."""
layer_id: str
total_size: int = 0 # Size in bytes (from downloading, reused for extraction)
download_current: int = 0
extract_current: int = 0 # Extraction progress in bytes (overlay2 only)
download_complete: bool = False
extract_complete: bool = False
already_exists: bool = False # Layer was already locally available
def calculate_progress(self) -> float:
"""Calculate layer progress 0-100.
Progress is weighted: 70% download, 30% extraction.
For overlay2, we have byte-based extraction progress.
For containerd, extraction jumps from 70% to 100% on completion.
"""
if self.already_exists or self.extract_complete:
return 100.0
if self.download_complete:
# Check if we have extraction progress (overlay2)
if self.extract_current > 0 and self.total_size > 0:
extract_pct = min(1.0, self.extract_current / self.total_size)
return DOWNLOAD_WEIGHT + (extract_pct * EXTRACT_WEIGHT)
# No extraction progress yet - return 70%
return DOWNLOAD_WEIGHT
if self.total_size > 0:
download_pct = min(1.0, self.download_current / self.total_size)
return download_pct * DOWNLOAD_WEIGHT
return 0.0
@dataclass
class ImagePullProgress:
"""Track overall progress of pulling an image.
When manifest layer sizes are provided, uses size-weighted progress where
each layer contributes proportionally to its size. This gives accurate
progress based on actual bytes to download.
When manifest is not available, falls back to count-based progress where
each layer contributes equally.
Layers that already exist locally are excluded from the progress calculation.
"""
layers: dict[str, LayerProgress] = field(default_factory=dict)
_last_reported_progress: float = field(default=0.0, repr=False)
_seen_downloading: bool = field(default=False, repr=False)
_manifest_layer_sizes: dict[str, int] = field(default_factory=dict, repr=False)
_total_manifest_size: int = field(default=0, repr=False)
def set_manifest(self, manifest: ImageManifest) -> None:
"""Set manifest layer sizes for accurate size-based progress.
Should be called before processing pull events.
"""
self._manifest_layer_sizes = dict(manifest.layers)
self._total_manifest_size = manifest.total_size
_LOGGER.debug(
"Manifest set: %d layers, %d bytes total",
len(self._manifest_layer_sizes),
self._total_manifest_size,
)
def get_or_create_layer(self, layer_id: str) -> LayerProgress:
"""Get existing layer or create new one."""
if layer_id not in self.layers:
# If we have manifest sizes, pre-populate the layer's total_size
manifest_size = self._manifest_layer_sizes.get(layer_id, 0)
self.layers[layer_id] = LayerProgress(
layer_id=layer_id, total_size=manifest_size
)
return self.layers[layer_id]
def process_event(self, entry: PullLogEntry) -> None:
"""Process a pull log event and update layer state."""
# Skip events without layer ID or status
if not entry.id or not entry.status:
return
# Skip metadata events that aren't layer-specific
# "Pulling from X" has id=tag but isn't a layer
if entry.status.startswith("Pulling from "):
return
# Parse status to enum (returns None for unrecognized statuses)
status = LayerPullStatus.from_status(entry.status)
if status is None:
return
layer = self.get_or_create_layer(entry.id)
# Handle "Already exists" - layer is locally available
if status is LayerPullStatus.ALREADY_EXISTS:
layer.already_exists = True
layer.download_complete = True
layer.extract_complete = True
return
# Handle "Pulling fs layer" / "Waiting" - layer is being tracked
if status in (LayerPullStatus.PULLING_FS_LAYER, LayerPullStatus.WAITING):
return
# Handle "Downloading" - update download progress
if status is LayerPullStatus.DOWNLOADING:
# Mark that we've seen downloading - now we know layer count is complete
self._seen_downloading = True
if entry.progress_detail and entry.progress_detail.current is not None:
layer.download_current = entry.progress_detail.current
if entry.progress_detail and entry.progress_detail.total is not None:
# Only set total_size if not already set or if this is larger
# (handles case where total changes during download)
layer.total_size = max(layer.total_size, entry.progress_detail.total)
return
# Handle "Verifying Checksum" - download is essentially complete
if status is LayerPullStatus.VERIFYING_CHECKSUM:
if layer.total_size > 0:
layer.download_current = layer.total_size
return
# Handle "Download complete" - download phase done
if status is LayerPullStatus.DOWNLOAD_COMPLETE:
layer.download_complete = True
if layer.total_size > 0:
layer.download_current = layer.total_size
elif layer.total_size == 0:
# Small layer that skipped downloading phase
# Set minimal size so it doesn't distort weighted average
layer.total_size = 1
layer.download_current = 1
return
# Handle "Extracting" - extraction in progress
if status is LayerPullStatus.EXTRACTING:
# For overlay2: progressDetail has {current, total} in bytes
# For containerd: progressDetail has {current, units: "s"} (time elapsed)
# We can only use byte-based progress (overlay2)
layer.download_complete = True
if layer.total_size > 0:
layer.download_current = layer.total_size
# Check if this is byte-based extraction progress (overlay2)
# Overlay2 has {current, total} in bytes, no units field
# Containerd has {current, units: "s"} which is useless for progress
if (
entry.progress_detail
and entry.progress_detail.current is not None
and entry.progress_detail.units is None
):
# Use layer's total_size from downloading phase (doesn't change)
layer.extract_current = entry.progress_detail.current
_LOGGER.debug(
"Layer %s extracting: %d/%d (%.1f%%)",
layer.layer_id,
layer.extract_current,
layer.total_size,
(layer.extract_current / layer.total_size * 100)
if layer.total_size > 0
else 0,
)
return
# Handle "Pull complete" - layer is fully done
if status is LayerPullStatus.PULL_COMPLETE:
layer.download_complete = True
layer.extract_complete = True
if layer.total_size > 0:
layer.download_current = layer.total_size
return
# Handle "Retrying in N seconds" - reset download progress
if status is LayerPullStatus.RETRYING:
layer.download_current = 0
layer.download_complete = False
return
def calculate_progress(self) -> float:
"""Calculate overall progress 0-100.
When manifest layer sizes are available, uses size-weighted progress
where each layer contributes proportionally to its size.
When manifest is not available, falls back to count-based progress
where each layer contributes equally.
Layers that already exist locally are excluded from the calculation.
Returns 0 until we've seen the first "Downloading" event, since Docker
reports "Already exists" and "Pulling fs layer" events before we know
the complete layer count.
"""
# Don't report progress until we've seen downloading start
# This ensures we know the full layer count before calculating progress
if not self._seen_downloading or not self.layers:
return 0.0
# Only count layers that need pulling (exclude already_exists)
layers_to_pull = [
layer for layer in self.layers.values() if not layer.already_exists
]
if not layers_to_pull:
# All layers already exist, nothing to download
return 100.0
# Use size-weighted progress if manifest sizes are available
if self._manifest_layer_sizes:
return min(100, self._calculate_size_weighted_progress(layers_to_pull))
# Fall back to count-based progress
total_progress = sum(layer.calculate_progress() for layer in layers_to_pull)
return min(100, total_progress / len(layers_to_pull))
def _calculate_size_weighted_progress(
self, layers_to_pull: list[LayerProgress]
) -> float:
"""Calculate size-weighted progress.
Each layer contributes to progress proportionally to its size.
Progress = sum(layer_progress * layer_size) / total_size
"""
# Calculate total size of layers that need pulling
total_size = sum(layer.total_size for layer in layers_to_pull)
if total_size == 0:
# No size info available, fall back to count-based
total_progress = sum(layer.calculate_progress() for layer in layers_to_pull)
return total_progress / len(layers_to_pull)
# Weight each layer's progress by its size
weighted_progress = 0.0
for layer in layers_to_pull:
if layer.total_size > 0:
layer_weight = layer.total_size / total_size
weighted_progress += layer.calculate_progress() * layer_weight
return weighted_progress
def get_stage(self) -> str | None:
"""Get current stage based on layer states."""
if not self.layers:
return None
# Check if any layer is still downloading
for layer in self.layers.values():
if layer.already_exists:
continue
if not layer.download_complete:
return "Downloading"
# All downloads complete, check if extracting
for layer in self.layers.values():
if layer.already_exists:
continue
if not layer.extract_complete:
return "Extracting"
# All done
return "Pull complete"
def should_update_job(self, threshold: float = 1.0) -> tuple[bool, float]:
"""Check if job should be updated based on progress change.
Returns (should_update, current_progress).
Updates are triggered when progress changes by at least threshold%.
Progress is guaranteed to only increase (monotonic).
"""
current_progress = self.calculate_progress()
# Ensure monotonic progress - never report a decrease
# This can happen when new layers get size info and change the weighted average
if current_progress < self._last_reported_progress:
_LOGGER.debug(
"Progress decreased from %.1f%% to %.1f%%, keeping last reported",
self._last_reported_progress,
current_progress,
)
return False, self._last_reported_progress
if current_progress >= self._last_reported_progress + threshold:
_LOGGER.debug(
"Progress update: %.1f%% -> %.1f%% (delta: %.1f%%)",
self._last_reported_progress,
current_progress,
current_progress - self._last_reported_progress,
)
self._last_reported_progress = current_progress
return True, current_progress
return False, self._last_reported_progress

View File

@@ -1,12 +1,10 @@
"""Init file for Supervisor Docker object."""
import asyncio
from collections.abc import Awaitable
from ipaddress import IPv4Address
import logging
import os
import aiodocker
from awesomeversion.awesomeversion import AwesomeVersion
import docker
import requests
@@ -114,18 +112,19 @@ class DockerSupervisor(DockerInterface):
name="docker_supervisor_update_start_tag",
concurrency=JobConcurrency.GROUP_QUEUE,
)
async def update_start_tag(self, image: str, version: AwesomeVersion) -> None:
def update_start_tag(self, image: str, version: AwesomeVersion) -> Awaitable[None]:
"""Update start tag to new version."""
return self.sys_run_in_executor(self._update_start_tag, image, version)
def _update_start_tag(self, image: str, version: AwesomeVersion) -> None:
"""Update start tag to new version.
Need run inside executor.
"""
try:
docker_container = await self.sys_run_in_executor(
self.sys_docker.containers.get, self.name
)
docker_image = await self.sys_docker.images.inspect(f"{image}:{version!s}")
except (
aiodocker.DockerError,
docker.errors.DockerException,
requests.RequestException,
) as err:
docker_container = self.sys_docker.containers.get(self.name)
docker_image = self.sys_docker.images.get(f"{image}:{version!s}")
except (docker.errors.DockerException, requests.RequestException) as err:
raise DockerError(
f"Can't get image or container to fix start tag: {err}", _LOGGER.error
) from err
@@ -145,14 +144,8 @@ class DockerSupervisor(DockerInterface):
# If version tag
if start_tag != "latest":
continue
await asyncio.gather(
self.sys_docker.images.tag(
docker_image["Id"], start_image, tag=start_tag
),
self.sys_docker.images.tag(
docker_image["Id"], start_image, tag=version.string
),
)
docker_image.tag(start_image, start_tag)
docker_image.tag(start_image, version.string)
except (aiodocker.DockerError, requests.RequestException) as err:
except (docker.errors.DockerException, requests.RequestException) as err:
raise DockerError(f"Can't fix start tag: {err}", _LOGGER.error) from err

View File

@@ -1,57 +0,0 @@
"""Docker utilities."""
from __future__ import annotations
import re
# Docker image reference domain regex
# Based on Docker's reference implementation:
# vendor/github.com/distribution/reference/normalize.go
#
# A domain is detected if the part before the first / contains:
# - "localhost" (with optional port)
# - Contains "." (like registry.example.com or 127.0.0.1)
# - Contains ":" (like myregistry:5000)
# - IPv6 addresses in brackets (like [::1]:5000)
#
# Note: Docker also treats uppercase letters as registry indicators since
# namespaces must be lowercase, but this regex handles lowercase matching
# and the get_registry_from_image() function validates the registry rules.
IMAGE_REGISTRY_REGEX = re.compile(
r"^(?P<registry>"
r"localhost(?::[0-9]+)?|" # localhost with optional port
r"(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])" # domain component
r"(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))*" # more components
r"(?::[0-9]+)?|" # optional port
r"\[[a-fA-F0-9:]+\](?::[0-9]+)?" # IPv6 with optional port
r")/" # must be followed by /
)
def get_registry_from_image(image_ref: str) -> str | None:
"""Extract registry from Docker image reference.
Returns the registry if the image reference contains one,
or None if the image uses Docker Hub (docker.io).
Based on Docker's reference implementation:
vendor/github.com/distribution/reference/normalize.go
Examples:
get_registry_from_image("nginx") -> None (docker.io)
get_registry_from_image("library/nginx") -> None (docker.io)
get_registry_from_image("myregistry.com/nginx") -> "myregistry.com"
get_registry_from_image("localhost/myimage") -> "localhost"
get_registry_from_image("localhost:5000/myimage") -> "localhost:5000"
get_registry_from_image("registry.io:5000/org/app:v1") -> "registry.io:5000"
get_registry_from_image("[::1]:5000/myimage") -> "[::1]:5000"
"""
match = IMAGE_REGISTRY_REGEX.match(image_ref)
if match:
registry = match.group("registry")
# Must contain '.' or ':' or be 'localhost' to be a real registry
# This prevents treating "myuser/myimage" as having registry "myuser"
if "." in registry or ":" in registry or registry == "localhost":
return registry
return None # No registry = Docker Hub (docker.io)

View File

@@ -1,25 +1,25 @@
"""Core Exceptions."""
from collections.abc import Callable, Mapping
from collections.abc import Callable
from typing import Any
MESSAGE_CHECK_SUPERVISOR_LOGS = (
"Check supervisor logs for details (check with '{logs_command}')"
)
EXTRA_FIELDS_LOGS_COMMAND = {"logs_command": "ha supervisor logs"}
class HassioError(Exception):
"""Root exception."""
error_key: str | None = None
message_template: str | None = None
extra_fields: dict[str, Any] | None = None
def __init__(
self, message: str | None = None, logger: Callable[..., None] | None = None
self,
message: str | None = None,
logger: Callable[..., None] | None = None,
*,
extra_fields: dict[str, Any] | None = None,
) -> None:
"""Raise & log."""
self.extra_fields = extra_fields or {}
if not message and self.message_template:
message = (
self.message_template.format(**self.extra_fields)
@@ -41,94 +41,6 @@ class HassioNotSupportedError(HassioError):
"""Function is not supported."""
# API
class APIError(HassioError, RuntimeError):
"""API errors."""
status = 400
headers: Mapping[str, str] | None = None
def __init__(
self,
message: str | None = None,
logger: Callable[..., None] | None = None,
*,
headers: Mapping[str, str] | None = None,
job_id: str | None = None,
) -> None:
"""Raise & log, optionally with job."""
super().__init__(message, logger)
self.headers = headers
self.job_id = job_id
class APIUnauthorized(APIError):
"""API unauthorized error."""
status = 401
class APIForbidden(APIError):
"""API forbidden error."""
status = 403
class APINotFound(APIError):
"""API not found error."""
status = 404
class APIGone(APIError):
"""API is no longer available."""
status = 410
class APITooManyRequests(APIError):
"""API too many requests error."""
status = 429
class APIInternalServerError(APIError):
"""API internal server error."""
status = 500
class APIAddonNotInstalled(APIError):
"""Not installed addon requested at addons API."""
class APIDBMigrationInProgress(APIError):
"""Service is unavailable due to an offline DB migration is in progress."""
status = 503
class APIUnknownSupervisorError(APIError):
"""Unknown error occurred within supervisor. Adds supervisor check logs rider to message template."""
status = 500
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
job_id: str | None = None,
) -> None:
"""Initialize exception."""
self.message_template = (
f"{self.message_template}. {MESSAGE_CHECK_SUPERVISOR_LOGS}"
)
self.extra_fields = (self.extra_fields or {}) | EXTRA_FIELDS_LOGS_COMMAND
super().__init__(None, logger, job_id=job_id)
# JobManager
@@ -210,13 +122,6 @@ class SupervisorAppArmorError(SupervisorError):
"""Supervisor AppArmor error."""
class SupervisorUnknownError(SupervisorError, APIUnknownSupervisorError):
"""Raise when an unknown error occurs interacting with Supervisor or its container."""
error_key = "supervisor_unknown_error"
message_template = "An unknown error occurred with Supervisor"
class SupervisorJobError(SupervisorError, JobException):
"""Raise on job errors."""
@@ -345,54 +250,6 @@ class AddonConfigurationError(AddonsError):
"""Error with add-on configuration."""
class AddonConfigurationInvalidError(AddonConfigurationError, APIError):
"""Raise if invalid configuration provided for addon."""
error_key = "addon_configuration_invalid_error"
message_template = "Add-on {addon} has invalid options: {validation_error}"
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
addon: str,
validation_error: str,
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon, "validation_error": validation_error}
super().__init__(None, logger)
class AddonBootConfigCannotChangeError(AddonsError, APIError):
"""Raise if user attempts to change addon boot config when it can't be changed."""
error_key = "addon_boot_config_cannot_change_error"
message_template = (
"Addon {addon} boot option is set to {boot_config} so it cannot be changed"
)
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str, boot_config: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon, "boot_config": boot_config}
super().__init__(None, logger)
class AddonNotRunningError(AddonsError, APIError):
"""Raise when an addon is not running."""
error_key = "addon_not_running_error"
message_template = "Add-on {addon} is not running"
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon}
super().__init__(None, logger)
class AddonNotSupportedError(HassioNotSupportedError):
"""Addon doesn't support a function."""
@@ -411,8 +268,11 @@ class AddonNotSupportedArchitectureError(AddonNotSupportedError):
architectures: list[str],
) -> None:
"""Initialize exception."""
self.extra_fields = {"slug": slug, "architectures": ", ".join(architectures)}
super().__init__(None, logger)
super().__init__(
None,
logger,
extra_fields={"slug": slug, "architectures": ", ".join(architectures)},
)
class AddonNotSupportedMachineTypeError(AddonNotSupportedError):
@@ -429,8 +289,11 @@ class AddonNotSupportedMachineTypeError(AddonNotSupportedError):
machine_types: list[str],
) -> None:
"""Initialize exception."""
self.extra_fields = {"slug": slug, "machine_types": ", ".join(machine_types)}
super().__init__(None, logger)
super().__init__(
None,
logger,
extra_fields={"slug": slug, "machine_types": ", ".join(machine_types)},
)
class AddonNotSupportedHomeAssistantVersionError(AddonNotSupportedError):
@@ -447,96 +310,11 @@ class AddonNotSupportedHomeAssistantVersionError(AddonNotSupportedError):
version: str,
) -> None:
"""Initialize exception."""
self.extra_fields = {"slug": slug, "version": version}
super().__init__(None, logger)
class AddonNotSupportedWriteStdinError(AddonNotSupportedError, APIError):
"""Addon does not support writing to stdin."""
error_key = "addon_not_supported_write_stdin_error"
message_template = "Add-on {addon} does not support writing to stdin"
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon}
super().__init__(None, logger)
class AddonBuildDockerfileMissingError(AddonNotSupportedError, APIError):
"""Raise when addon build invalid because dockerfile is missing."""
error_key = "addon_build_dockerfile_missing_error"
message_template = (
"Cannot build addon '{addon}' because dockerfile is missing. A repair "
"using '{repair_command}' will fix this if the cause is data "
"corruption. Otherwise please report this to the addon developer."
)
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon, "repair_command": "ha supervisor repair"}
super().__init__(None, logger)
class AddonBuildArchitectureNotSupportedError(AddonNotSupportedError, APIError):
"""Raise when addon cannot be built on system because it doesn't support its architecture."""
error_key = "addon_build_architecture_not_supported_error"
message_template = (
"Cannot build addon '{addon}' because its supported architectures "
"({addon_arches}) do not match the system supported architectures ({system_arches})"
)
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
addon: str,
addon_arch_list: list[str],
system_arch_list: list[str],
) -> None:
"""Initialize exception."""
self.extra_fields = {
"addon": addon,
"addon_arches": ", ".join(addon_arch_list),
"system_arches": ", ".join(system_arch_list),
}
super().__init__(None, logger)
class AddonUnknownError(AddonsError, APIUnknownSupervisorError):
"""Raise when unknown error occurs taking an action for an addon."""
error_key = "addon_unknown_error"
message_template = "An unknown error occurred with addon {addon}"
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon}
super().__init__(logger)
class AddonBuildFailedUnknownError(AddonsError, APIUnknownSupervisorError):
"""Raise when the build failed for an addon due to an unknown error."""
error_key = "addon_build_failed_unknown_error"
message_template = (
"An unknown error occurred while trying to build the image for addon {addon}"
)
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon}
super().__init__(logger)
super().__init__(
None,
logger,
extra_fields={"slug": slug, "version": version},
)
class AddonsJobError(AddonsError, JobException):
@@ -568,64 +346,13 @@ class AuthError(HassioError):
"""Auth errors."""
class AuthPasswordResetError(AuthError, APIError):
class AuthPasswordResetError(HassioError):
"""Auth error if password reset failed."""
error_key = "auth_password_reset_error"
message_template = "Username '{user}' does not exist. Check list of users using '{auth_list_command}'."
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
user: str,
) -> None:
"""Initialize exception."""
self.extra_fields = {"user": user, "auth_list_command": "ha auth list"}
super().__init__(None, logger)
class AuthListUsersError(AuthError, APIUnknownSupervisorError):
class AuthListUsersError(HassioError):
"""Auth error if listing users failed."""
error_key = "auth_list_users_error"
message_template = "Can't request listing users on Home Assistant"
class AuthListUsersNoneResponseError(AuthError, APIInternalServerError):
"""Auth error if listing users returned invalid None response."""
error_key = "auth_list_users_none_response_error"
message_template = "Home Assistant returned invalid response of `{none}` instead of a list of users. Check Home Assistant logs for details (check with `{logs_command}`)"
extra_fields = {"none": "None", "logs_command": "ha core logs"}
def __init__(self, logger: Callable[..., None] | None = None) -> None:
"""Initialize exception."""
super().__init__(None, logger)
class AuthInvalidNonStringValueError(AuthError, APIUnauthorized):
"""Auth error if something besides a string provided as username or password."""
error_key = "auth_invalid_non_string_value_error"
message_template = "Username and password must be strings"
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
headers: Mapping[str, str] | None = None,
) -> None:
"""Initialize exception."""
super().__init__(None, logger, headers=headers)
class AuthHomeAssistantAPIValidationError(AuthError, APIUnknownSupervisorError):
"""Error encountered trying to validate auth details via Home Assistant API."""
error_key = "auth_home_assistant_api_validation_error"
message_template = "Unable to validate authentication details with Home Assistant"
# Host
@@ -658,6 +385,54 @@ class HostLogError(HostError):
"""Internal error with host log."""
# API
class APIError(HassioError, RuntimeError):
"""API errors."""
status = 400
def __init__(
self,
message: str | None = None,
logger: Callable[..., None] | None = None,
*,
job_id: str | None = None,
error: HassioError | None = None,
) -> None:
"""Raise & log, optionally with job."""
# Allow these to be set from another error here since APIErrors essentially wrap others to add a status
self.error_key = error.error_key if error else None
self.message_template = error.message_template if error else None
super().__init__(
message, logger, extra_fields=error.extra_fields if error else None
)
self.job_id = job_id
class APIForbidden(APIError):
"""API forbidden error."""
status = 403
class APINotFound(APIError):
"""API not found error."""
status = 404
class APIAddonNotInstalled(APIError):
"""Not installed addon requested at addons API."""
class APIDBMigrationInProgress(APIError):
"""Service is unavailable due to an offline DB migration is in progress."""
status = 503
# Service / Discovery
@@ -802,6 +577,21 @@ class PwnedConnectivityError(PwnedError):
"""Connectivity errors while checking pwned passwords."""
# util/codenotary
class CodeNotaryError(HassioError):
"""Error general with CodeNotary."""
class CodeNotaryUntrusted(CodeNotaryError):
"""Error on untrusted content."""
class CodeNotaryBackendError(CodeNotaryError):
"""CodeNotary backend error happening."""
# util/whoami
@@ -835,10 +625,6 @@ class DockerError(HassioError):
"""Docker API/Transport errors."""
class DockerBuildError(DockerError):
"""Docker error during build."""
class DockerAPIError(DockerError):
"""Docker API error."""
@@ -855,32 +641,16 @@ class DockerNotFound(DockerError):
"""Docker object don't Exists."""
class DockerLogOutOfOrder(DockerError):
"""Raise when log from docker action was out of order."""
class DockerNoSpaceOnDevice(DockerError):
"""Raise if a docker pull fails due to available space."""
error_key = "docker_no_space_on_device"
message_template = "No space left on disk"
def __init__(self, logger: Callable[..., None] | None = None) -> None:
"""Raise & log."""
super().__init__(None, logger=logger)
class DockerHubRateLimitExceeded(DockerError, APITooManyRequests):
"""Raise for docker hub rate limit exceeded error."""
error_key = "dockerhub_rate_limit_exceeded"
message_template = (
"Your IP address has made too many requests to Docker Hub which activated a rate limit. "
"For more details see {dockerhub_rate_limit_url}"
)
extra_fields = {
"dockerhub_rate_limit_url": "https://www.home-assistant.io/more-info/dockerhub-rate-limit"
}
def __init__(self, logger: Callable[..., None] | None = None) -> None:
"""Raise & log."""
super().__init__(None, logger=logger)
super().__init__("No space left on disk", logger=logger)
class DockerJobError(DockerError, JobException):
@@ -951,20 +721,6 @@ class StoreNotFound(StoreError):
"""Raise if slug is not known."""
class StoreAddonNotFoundError(StoreError, APINotFound):
"""Raise if a requested addon is not in the store."""
error_key = "store_addon_not_found_error"
message_template = "Addon {addon} does not exist in the store"
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon}
super().__init__(None, logger)
class StoreJobError(StoreError, JobException):
"""Raise on job error with git."""
@@ -1000,7 +756,7 @@ class BackupJobError(BackupError, JobException):
"""Raise on Backup job error."""
class BackupFileNotFoundError(BackupError, APINotFound):
class BackupFileNotFoundError(BackupError):
"""Raise if the backup file hasn't been found."""
@@ -1012,55 +768,6 @@ class BackupFileExistError(BackupError):
"""Raise if the backup file already exists."""
class AddonBackupMetadataInvalidError(BackupError, APIError):
"""Raise if invalid metadata file provided for addon in backup."""
error_key = "addon_backup_metadata_invalid_error"
message_template = (
"Metadata file for add-on {addon} in backup is invalid: {validation_error}"
)
def __init__(
self,
logger: Callable[..., None] | None = None,
*,
addon: str,
validation_error: str,
) -> None:
"""Initialize exception."""
self.extra_fields = {"addon": addon, "validation_error": validation_error}
super().__init__(None, logger)
class AddonPrePostBackupCommandReturnedError(BackupError, APIError):
"""Raise when addon's pre/post backup command returns an error."""
error_key = "addon_pre_post_backup_command_returned_error"
message_template = (
"Pre-/Post backup command for add-on {addon} returned error code: "
"{exit_code}. Please report this to the addon developer. Enable debug "
"logging to capture complete command output using {debug_logging_command}"
)
def __init__(
self, logger: Callable[..., None] | None = None, *, addon: str, exit_code: int
) -> None:
"""Initialize exception."""
self.extra_fields = {
"addon": addon,
"exit_code": exit_code,
"debug_logging_command": "ha supervisor options --logging debug",
}
super().__init__(None, logger)
class BackupRestoreUnknownError(BackupError, APIUnknownSupervisorError):
"""Raise when an unknown error occurs during backup or restore."""
error_key = "backup_restore_unknown_error"
message_template = "An unknown error occurred during backup/restore"
# Security

View File

@@ -9,12 +9,7 @@ from typing import Any
from supervisor.resolution.const import UnhealthyReason
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import (
DBusError,
DBusNotConnectedError,
DBusObjectError,
HardwareNotFound,
)
from ..exceptions import DBusError, DBusObjectError, HardwareNotFound
from .const import UdevSubsystem
from .data import Device
@@ -212,8 +207,6 @@ class HwDisk(CoreSysAttributes):
try:
block_device = self.sys_dbus.udisks2.get_block_device_by_path(device_path)
drive = self.sys_dbus.udisks2.get_drive(block_device.drive)
except DBusNotConnectedError:
return None
except DBusObjectError:
_LOGGER.warning(
"Unable to find UDisks2 drive for device at %s", device_path.as_posix()

View File

@@ -428,6 +428,13 @@ class HomeAssistantCore(JobGroup):
"""
return self.instance.logs()
def check_trust(self) -> Awaitable[None]:
"""Calculate HomeAssistant docker content trust.
Return Coroutine.
"""
return self.instance.check_trust()
async def stats(self) -> DockerStats:
"""Return stats of Home Assistant."""
try:

View File

@@ -6,8 +6,8 @@ import logging
import socket
from ..dbus.const import (
ConnectionState,
ConnectionStateFlags,
ConnectionStateType,
DeviceType,
InterfaceAddrGenMode as NMInterfaceAddrGenMode,
InterfaceIp6Privacy as NMInterfaceIp6Privacy,
@@ -267,47 +267,25 @@ class Interface:
return InterfaceMethod.DISABLED
@staticmethod
def _map_nm_addr_gen_mode(addr_gen_mode: int | None) -> InterfaceAddrGenMode:
"""Map IPv6 interface addr_gen_mode.
NetworkManager omits the addr_gen_mode property when set to DEFAULT, so we
treat None as DEFAULT here.
"""
def _map_nm_addr_gen_mode(addr_gen_mode: int) -> InterfaceAddrGenMode:
"""Map IPv6 interface addr_gen_mode."""
mapping = {
NMInterfaceAddrGenMode.EUI64.value: InterfaceAddrGenMode.EUI64,
NMInterfaceAddrGenMode.STABLE_PRIVACY.value: InterfaceAddrGenMode.STABLE_PRIVACY,
NMInterfaceAddrGenMode.DEFAULT_OR_EUI64.value: InterfaceAddrGenMode.DEFAULT_OR_EUI64,
NMInterfaceAddrGenMode.DEFAULT.value: InterfaceAddrGenMode.DEFAULT,
None: InterfaceAddrGenMode.DEFAULT,
}
if addr_gen_mode not in mapping:
_LOGGER.warning(
"Unknown addr_gen_mode value from NetworkManager: %s", addr_gen_mode
)
return mapping.get(addr_gen_mode, InterfaceAddrGenMode.DEFAULT)
@staticmethod
def _map_nm_ip6_privacy(ip6_privacy: int | None) -> InterfaceIp6Privacy:
"""Map IPv6 interface ip6_privacy.
NetworkManager omits the ip6_privacy property when set to DEFAULT, so we
treat None as DEFAULT here.
"""
def _map_nm_ip6_privacy(ip6_privacy: int) -> InterfaceIp6Privacy:
"""Map IPv6 interface ip6_privacy."""
mapping = {
NMInterfaceIp6Privacy.DISABLED.value: InterfaceIp6Privacy.DISABLED,
NMInterfaceIp6Privacy.ENABLED_PREFER_PUBLIC.value: InterfaceIp6Privacy.ENABLED_PREFER_PUBLIC,
NMInterfaceIp6Privacy.ENABLED.value: InterfaceIp6Privacy.ENABLED,
NMInterfaceIp6Privacy.DEFAULT.value: InterfaceIp6Privacy.DEFAULT,
None: InterfaceIp6Privacy.DEFAULT,
}
if ip6_privacy not in mapping:
_LOGGER.warning(
"Unknown ip6_privacy value from NetworkManager: %s", ip6_privacy
)
return mapping.get(ip6_privacy, InterfaceIp6Privacy.DEFAULT)
@staticmethod
@@ -317,8 +295,8 @@ class Interface:
return False
return connection.state in (
ConnectionState.ACTIVATED,
ConnectionState.ACTIVATING,
ConnectionStateType.ACTIVATED,
ConnectionStateType.ACTIVATING,
)
@staticmethod

View File

@@ -16,7 +16,7 @@ from ..dbus.const import (
DBUS_IFACE_DNS,
DBUS_IFACE_NM,
DBUS_SIGNAL_NM_CONNECTION_ACTIVE_CHANGED,
ConnectionState,
ConnectionStateType,
ConnectivityState,
DeviceType,
WirelessMethodType,
@@ -338,16 +338,16 @@ class NetworkManager(CoreSysAttributes):
# the state change before this point. Get the state currently to
# avoid any race condition.
await con.update()
state: ConnectionState = con.state
state: ConnectionStateType = con.state
while state != ConnectionState.ACTIVATED:
if state == ConnectionState.DEACTIVATED:
while state != ConnectionStateType.ACTIVATED:
if state == ConnectionStateType.DEACTIVATED:
raise HostNetworkError(
"Activating connection failed, check connection settings."
)
msg = await signal.wait_for_signal()
state = ConnectionState(msg[0])
state = msg[0]
_LOGGER.debug("Active connection state changed to %s", state)
# update_only means not done by user so don't force a check afterwards

View File

@@ -9,7 +9,7 @@ from contextvars import Context, ContextVar, Token
from dataclasses import dataclass
from datetime import datetime
import logging
from typing import Any, Self, cast
from typing import Any, Self
from uuid import uuid4
from attr.validators import gt, lt
@@ -98,21 +98,15 @@ class SupervisorJobError:
"""Representation of an error occurring during a supervisor job."""
type_: type[HassioError] = HassioError
message: str = (
"Unknown error, see Supervisor logs (check with 'ha supervisor logs')"
)
message: str = "Unknown error, see supervisor logs"
stage: str | None = None
error_key: str | None = None
extra_fields: dict[str, Any] | None = None
def as_dict(self) -> dict[str, Any]:
def as_dict(self) -> dict[str, str | None]:
"""Return dictionary representation."""
return {
"type": self.type_.__name__,
"message": self.message,
"stage": self.stage,
"error_key": self.error_key,
"extra_fields": self.extra_fields,
}
@@ -162,9 +156,7 @@ class SupervisorJob:
def capture_error(self, err: HassioError | None = None) -> None:
"""Capture an error or record that an unknown error has occurred."""
if err:
new_error = SupervisorJobError(
type(err), str(err), self.stage, err.error_key, err.extra_fields
)
new_error = SupervisorJobError(type(err), str(err), self.stage)
else:
new_error = SupervisorJobError(stage=self.stage)
self.errors += [new_error]
@@ -202,7 +194,7 @@ class SupervisorJob:
self,
progress: float | None = None,
stage: str | None = None,
extra: dict[str, Any] | None | type[DEFAULT] = DEFAULT,
extra: dict[str, Any] | None = DEFAULT, # type: ignore
done: bool | None = None,
) -> None:
"""Update multiple fields with one on change event."""
@@ -213,8 +205,8 @@ class SupervisorJob:
self.progress = progress
if stage is not None:
self.stage = stage
if extra is not DEFAULT:
self.extra = cast(dict[str, Any] | None, extra)
if extra != DEFAULT:
self.extra = extra
# Done has special event. use that to trigger on change if included
# If not then just use any other field to trigger
@@ -312,21 +304,19 @@ class JobManager(FileConfiguration, CoreSysAttributes):
reference: str | None = None,
initial_stage: str | None = None,
internal: bool = False,
parent_id: str | None | type[DEFAULT] = DEFAULT,
parent_id: str | None = DEFAULT, # type: ignore
child_job_syncs: list[ChildJobSyncFilter] | None = None,
) -> SupervisorJob:
"""Create a new job."""
kwargs: dict[str, Any] = {
"reference": reference,
"stage": initial_stage,
"on_change": self._on_job_change,
"internal": internal,
"child_job_syncs": child_job_syncs,
}
if parent_id is not DEFAULT:
kwargs["parent_id"] = parent_id
job = SupervisorJob(name, **kwargs)
job = SupervisorJob(
name,
reference=reference,
stage=initial_stage,
on_change=self._on_job_change,
internal=internal,
child_job_syncs=child_job_syncs,
**({} if parent_id == DEFAULT else {"parent_id": parent_id}), # type: ignore
)
# Shouldn't happen but inability to find a parent for progress reporting
# shouldn't raise and break the active job
@@ -337,17 +327,6 @@ class JobManager(FileConfiguration, CoreSysAttributes):
if not curr_parent.child_job_syncs:
continue
# HACK: If parent trigger the same child job, we just skip this second
# sync. Maybe it would be better to have this reflected in the job stage
# and reset progress to 0 instead? There is no support for such stage
# information on Core update entities today though.
if curr_parent.done is True or curr_parent.progress >= 100:
_LOGGER.debug(
"Skipping parent job sync for done parent job %s",
curr_parent.name,
)
continue
# Break after first match at each parent as it doesn't make sense
# to match twice. But it could match multiple parents
for sync in curr_parent.child_job_syncs:

View File

@@ -34,7 +34,6 @@ class JobCondition(StrEnum):
PLUGINS_UPDATED = "plugins_updated"
RUNNING = "running"
SUPERVISOR_UPDATED = "supervisor_updated"
ARCHITECTURE_SUPPORTED = "architecture_supported"
class JobConcurrency(StrEnum):

View File

@@ -441,14 +441,6 @@ class Job(CoreSysAttributes):
raise JobConditionException(
f"'{method_name}' blocked from execution, supervisor needs to be updated first"
)
if (
JobCondition.ARCHITECTURE_SUPPORTED in used_conditions
and UnsupportedReason.SYSTEM_ARCHITECTURE
in coresys.sys_resolution.unsupported
):
raise JobConditionException(
f"'{method_name}' blocked from execution, unsupported system architecture"
)
if JobCondition.PLUGINS_UPDATED in used_conditions and (
out_of_date := [

View File

@@ -64,19 +64,6 @@ def filter_data(coresys: CoreSys, event: Event, hint: Hint) -> Event | None:
# Not full startup - missing information
if coresys.core.state in (CoreState.INITIALIZE, CoreState.SETUP):
# During SETUP, we have basic system info available for better debugging
if coresys.core.state == CoreState.SETUP:
event.setdefault("contexts", {}).update(
{
"versions": {
"docker": coresys.docker.info.version,
"supervisor": coresys.supervisor.version,
},
"host": {
"machine": coresys.machine,
},
}
)
return event
# List installed addons

View File

@@ -1,6 +1,5 @@
"""A collection of tasks."""
from contextlib import suppress
from datetime import datetime, timedelta
import logging
from typing import cast
@@ -14,7 +13,6 @@ from ..exceptions import (
BackupFileNotFoundError,
HomeAssistantError,
ObserverError,
SupervisorUpdateError,
)
from ..homeassistant.const import LANDINGPAGE, WSType
from ..jobs.const import JobConcurrency
@@ -163,7 +161,6 @@ class Tasks(CoreSysAttributes):
JobCondition.INTERNET_HOST,
JobCondition.OS_SUPPORTED,
JobCondition.RUNNING,
JobCondition.ARCHITECTURE_SUPPORTED,
],
concurrency=JobConcurrency.REJECT,
)
@@ -176,11 +173,7 @@ class Tasks(CoreSysAttributes):
"Found new Supervisor version %s, updating",
self.sys_supervisor.latest_version,
)
# Errors are logged by the exceptions, we can't really do something
# if an update fails here.
with suppress(SupervisorUpdateError):
await self.sys_supervisor.update()
await self.sys_supervisor.update()
async def _watchdog_homeassistant_api(self):
"""Create scheduler task for monitoring running state of API.

View File

@@ -135,7 +135,7 @@ class Mount(CoreSysAttributes, ABC):
@property
def state(self) -> UnitActiveState | None:
"""Get state of mount."""
return UnitActiveState(self._state) if self._state is not None else None
return self._state
@cached_property
def local_where(self) -> Path:

View File

@@ -76,6 +76,13 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
"""Return True if a task is in progress."""
return self.instance.in_progress
def check_trust(self) -> Awaitable[None]:
"""Calculate plugin docker content trust.
Return Coroutine.
"""
return self.instance.check_trust()
def logs(self) -> Awaitable[bytes]:
"""Get docker plugin logs.

View File

@@ -23,5 +23,4 @@ PLUGIN_UPDATE_CONDITIONS = [
JobCondition.HEALTHY,
JobCondition.INTERNET_HOST,
JobCondition.SUPERVISOR_UPDATED,
JobCondition.ARCHITECTURE_SUPPORTED,
]

View File

@@ -2,7 +2,7 @@
from ...const import CoreState
from ...coresys import CoreSys
from ...dbus.const import ConnectionState, ConnectionStateFlags
from ...dbus.const import ConnectionStateFlags, ConnectionStateType
from ...dbus.network.interface import NetworkInterface
from ...exceptions import NetworkInterfaceNotFound
from ..const import ContextType, IssueType
@@ -47,7 +47,7 @@ class CheckNetworkInterfaceIPV4(CheckBase):
return not (
interface.connection.state
in [ConnectionState.ACTIVATED, ConnectionState.ACTIVATING]
in [ConnectionStateType.ACTIVATED, ConnectionStateType.ACTIVATING]
and ConnectionStateFlags.IP4_READY in interface.connection.state_flags
)

View File

@@ -0,0 +1,59 @@
"""Helpers to check supervisor trust."""
import logging
from ...const import CoreState
from ...coresys import CoreSys
from ...exceptions import CodeNotaryError, CodeNotaryUntrusted
from ..const import ContextType, IssueType, UnhealthyReason
from .base import CheckBase
_LOGGER: logging.Logger = logging.getLogger(__name__)
def setup(coresys: CoreSys) -> CheckBase:
"""Check setup function."""
return CheckSupervisorTrust(coresys)
class CheckSupervisorTrust(CheckBase):
"""CheckSystemTrust class for check."""
async def run_check(self) -> None:
"""Run check if not affected by issue."""
if not self.sys_security.content_trust:
_LOGGER.warning(
"Skipping %s, content_trust is globally disabled", self.slug
)
return
try:
await self.sys_supervisor.check_trust()
except CodeNotaryUntrusted:
self.sys_resolution.add_unhealthy_reason(UnhealthyReason.UNTRUSTED)
self.sys_resolution.create_issue(IssueType.TRUST, ContextType.SUPERVISOR)
except CodeNotaryError:
pass
async def approve_check(self, reference: str | None = None) -> bool:
"""Approve check if it is affected by issue."""
try:
await self.sys_supervisor.check_trust()
except CodeNotaryError:
return True
return False
@property
def issue(self) -> IssueType:
"""Return a IssueType enum."""
return IssueType.TRUST
@property
def context(self) -> ContextType:
"""Return a ContextType enum."""
return ContextType.SUPERVISOR
@property
def states(self) -> list[CoreState]:
"""Return a list of valid states when this check can run."""
return [CoreState.RUNNING, CoreState.STARTUP]

View File

@@ -39,6 +39,7 @@ class UnsupportedReason(StrEnum):
APPARMOR = "apparmor"
CGROUP_VERSION = "cgroup_version"
CONNECTIVITY_CHECK = "connectivity_check"
CONTENT_TRUST = "content_trust"
DBUS = "dbus"
DNS_SERVER = "dns_server"
DOCKER_CONFIGURATION = "docker_configuration"
@@ -53,12 +54,12 @@ class UnsupportedReason(StrEnum):
PRIVILEGED = "privileged"
RESTART_POLICY = "restart_policy"
SOFTWARE = "software"
SOURCE_MODS = "source_mods"
SUPERVISOR_VERSION = "supervisor_version"
SYSTEMD = "systemd"
SYSTEMD_JOURNAL = "systemd_journal"
SYSTEMD_RESOLVED = "systemd_resolved"
VIRTUALIZATION_IMAGE = "virtualization_image"
SYSTEM_ARCHITECTURE = "system_architecture"
class UnhealthyReason(StrEnum):
@@ -102,6 +103,7 @@ class IssueType(StrEnum):
PWNED = "pwned"
REBOOT_REQUIRED = "reboot_required"
SECURITY = "security"
TRUST = "trust"
UPDATE_FAILED = "update_failed"
UPDATE_ROLLBACK = "update_rollback"
@@ -113,6 +115,7 @@ class SuggestionType(StrEnum):
CLEAR_FULL_BACKUP = "clear_full_backup"
CREATE_FULL_BACKUP = "create_full_backup"
DISABLE_BOOT = "disable_boot"
EXECUTE_INTEGRITY = "execute_integrity"
EXECUTE_REBOOT = "execute_reboot"
EXECUTE_REBUILD = "execute_rebuild"
EXECUTE_RELOAD = "execute_reload"

View File

@@ -13,6 +13,7 @@ from .validate import get_valid_modules
_LOGGER: logging.Logger = logging.getLogger(__name__)
UNHEALTHY = [
UnsupportedReason.DOCKER_VERSION,
UnsupportedReason.LXC,
UnsupportedReason.PRIVILEGED,
]

View File

@@ -1,4 +1,4 @@
"""Evaluation class for system architecture support."""
"""Evaluation class for Content Trust."""
from ...const import CoreState
from ...coresys import CoreSys
@@ -8,31 +8,27 @@ from .base import EvaluateBase
def setup(coresys: CoreSys) -> EvaluateBase:
"""Initialize evaluation-setup function."""
return EvaluateSystemArchitecture(coresys)
return EvaluateContentTrust(coresys)
class EvaluateSystemArchitecture(EvaluateBase):
"""Evaluate if the current Supervisor architecture is supported."""
class EvaluateContentTrust(EvaluateBase):
"""Evaluate system content trust level."""
@property
def reason(self) -> UnsupportedReason:
"""Return a UnsupportedReason enum."""
return UnsupportedReason.SYSTEM_ARCHITECTURE
return UnsupportedReason.CONTENT_TRUST
@property
def on_failure(self) -> str:
"""Return a string that is printed when self.evaluate is True."""
return "System architecture is no longer supported. Move to a supported system architecture."
return "System run with disabled trusted content security."
@property
def states(self) -> list[CoreState]:
"""Return a list of valid states when this evaluation can run."""
return [CoreState.INITIALIZE]
return [CoreState.INITIALIZE, CoreState.SETUP, CoreState.RUNNING]
async def evaluate(self):
async def evaluate(self) -> bool:
"""Run evaluation."""
return self.sys_host.info.sys_arch.supervisor in {
"i386",
"armhf",
"armv7",
}
return not self.sys_security.content_trust

View File

@@ -8,7 +8,7 @@ from ..const import UnsupportedReason
from .base import EvaluateBase
EXPECTED_LOGGING = "journald"
EXPECTED_STORAGE = ("overlay2", "overlayfs")
EXPECTED_STORAGE = "overlay2"
_LOGGER: logging.Logger = logging.getLogger(__name__)
@@ -41,18 +41,14 @@ class EvaluateDockerConfiguration(EvaluateBase):
storage_driver = self.sys_docker.info.storage
logging_driver = self.sys_docker.info.logging
is_unsupported = False
if storage_driver not in EXPECTED_STORAGE:
is_unsupported = True
if storage_driver != EXPECTED_STORAGE:
_LOGGER.warning(
"Docker storage driver %s is not supported!", storage_driver
)
if logging_driver != EXPECTED_LOGGING:
is_unsupported = True
_LOGGER.warning(
"Docker logging driver %s is not supported!", logging_driver
)
return is_unsupported
return storage_driver != EXPECTED_STORAGE or logging_driver != EXPECTED_LOGGING

View File

@@ -5,6 +5,8 @@ from ...coresys import CoreSys
from ..const import UnsupportedReason
from .base import EvaluateBase
SUPPORTED_OS = ["Debian GNU/Linux 12 (bookworm)"]
def setup(coresys: CoreSys) -> EvaluateBase:
"""Initialize evaluation-setup function."""
@@ -31,4 +33,6 @@ class EvaluateOperatingSystem(EvaluateBase):
async def evaluate(self) -> bool:
"""Run evaluation."""
return not self.sys_os.available
if self.sys_os.available:
return False
return self.sys_host.info.operating_system not in SUPPORTED_OS

View File

@@ -0,0 +1,72 @@
"""Evaluation class for Content Trust."""
import errno
import logging
from pathlib import Path
from ...const import CoreState
from ...coresys import CoreSys
from ...exceptions import CodeNotaryError, CodeNotaryUntrusted
from ...utils.codenotary import calc_checksum_path_sourcecode
from ..const import ContextType, IssueType, UnhealthyReason, UnsupportedReason
from .base import EvaluateBase
_SUPERVISOR_SOURCE = Path("/usr/src/supervisor/supervisor")
_LOGGER: logging.Logger = logging.getLogger(__name__)
def setup(coresys: CoreSys) -> EvaluateBase:
"""Initialize evaluation-setup function."""
return EvaluateSourceMods(coresys)
class EvaluateSourceMods(EvaluateBase):
"""Evaluate supervisor source modifications."""
@property
def reason(self) -> UnsupportedReason:
"""Return a UnsupportedReason enum."""
return UnsupportedReason.SOURCE_MODS
@property
def on_failure(self) -> str:
"""Return a string that is printed when self.evaluate is True."""
return "System detect unauthorized source code modifications."
@property
def states(self) -> list[CoreState]:
"""Return a list of valid states when this evaluation can run."""
return [CoreState.RUNNING]
async def evaluate(self) -> bool:
"""Run evaluation."""
if not self.sys_security.content_trust:
_LOGGER.warning("Disabled content-trust, skipping evaluation")
return False
# Calculate sume of the sourcecode
try:
checksum = await self.sys_run_in_executor(
calc_checksum_path_sourcecode, _SUPERVISOR_SOURCE
)
except OSError as err:
if err.errno == errno.EBADMSG:
self.sys_resolution.add_unhealthy_reason(
UnhealthyReason.OSERROR_BAD_MESSAGE
)
self.sys_resolution.create_issue(
IssueType.CORRUPT_FILESYSTEM, ContextType.SYSTEM
)
_LOGGER.error("Can't calculate checksum of source code: %s", err)
return False
# Validate checksum
try:
await self.sys_security.verify_own_content(checksum)
except CodeNotaryUntrusted:
return True
except CodeNotaryError:
pass
return False

View File

@@ -0,0 +1,67 @@
"""Helpers to check and fix issues with free space."""
from datetime import timedelta
import logging
from ...coresys import CoreSys
from ...exceptions import ResolutionFixupError, ResolutionFixupJobError
from ...jobs.const import JobCondition, JobThrottle
from ...jobs.decorator import Job
from ...security.const import ContentTrustResult
from ..const import ContextType, IssueType, SuggestionType
from .base import FixupBase
_LOGGER: logging.Logger = logging.getLogger(__name__)
def setup(coresys: CoreSys) -> FixupBase:
"""Check setup function."""
return FixupSystemExecuteIntegrity(coresys)
class FixupSystemExecuteIntegrity(FixupBase):
"""Storage class for fixup."""
@Job(
name="fixup_system_execute_integrity_process",
conditions=[JobCondition.INTERNET_SYSTEM],
on_condition=ResolutionFixupJobError,
throttle_period=timedelta(hours=8),
throttle=JobThrottle.THROTTLE,
)
async def process_fixup(self, reference: str | None = None) -> None:
"""Initialize the fixup class."""
result = await self.sys_security.integrity_check()
if ContentTrustResult.FAILED in (result.core, result.supervisor):
raise ResolutionFixupError()
for plugin in result.plugins:
if plugin != ContentTrustResult.FAILED:
continue
raise ResolutionFixupError()
for addon in result.addons:
if addon != ContentTrustResult.FAILED:
continue
raise ResolutionFixupError()
@property
def suggestion(self) -> SuggestionType:
"""Return a SuggestionType enum."""
return SuggestionType.EXECUTE_INTEGRITY
@property
def context(self) -> ContextType:
"""Return a ContextType enum."""
return ContextType.SYSTEM
@property
def issues(self) -> list[IssueType]:
"""Return a IssueType enum list."""
return [IssueType.TRUST]
@property
def auto(self) -> bool:
"""Return if a fixup can be apply as auto fix."""
return True

View File

@@ -0,0 +1,24 @@
"""Security constants."""
from enum import StrEnum
import attr
class ContentTrustResult(StrEnum):
"""Content trust result enum."""
PASS = "pass"
ERROR = "error"
FAILED = "failed"
UNTESTED = "untested"
@attr.s
class IntegrityResult:
"""Result of a full integrity check."""
supervisor: ContentTrustResult = attr.ib(default=ContentTrustResult.UNTESTED)
core: ContentTrustResult = attr.ib(default=ContentTrustResult.UNTESTED)
plugins: dict[str, ContentTrustResult] = attr.ib(default={})
addons: dict[str, ContentTrustResult] = attr.ib(default={})

View File

@@ -4,12 +4,27 @@ from __future__ import annotations
import logging
from ..const import ATTR_FORCE_SECURITY, ATTR_PWNED, FILE_HASSIO_SECURITY
from ..const import (
ATTR_CONTENT_TRUST,
ATTR_FORCE_SECURITY,
ATTR_PWNED,
FILE_HASSIO_SECURITY,
)
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import PwnedError
from ..exceptions import (
CodeNotaryError,
CodeNotaryUntrusted,
PwnedError,
SecurityJobError,
)
from ..jobs.const import JobConcurrency
from ..jobs.decorator import Job, JobCondition
from ..resolution.const import ContextType, IssueType, SuggestionType
from ..utils.codenotary import cas_validate
from ..utils.common import FileConfiguration
from ..utils.pwned import check_pwned_password
from ..validate import SCHEMA_SECURITY_CONFIG
from .const import ContentTrustResult, IntegrityResult
_LOGGER: logging.Logger = logging.getLogger(__name__)
@@ -22,6 +37,16 @@ class Security(FileConfiguration, CoreSysAttributes):
super().__init__(FILE_HASSIO_SECURITY, SCHEMA_SECURITY_CONFIG)
self.coresys = coresys
@property
def content_trust(self) -> bool:
"""Return if content trust is enabled/disabled."""
return self._data[ATTR_CONTENT_TRUST]
@content_trust.setter
def content_trust(self, value: bool) -> None:
"""Set content trust is enabled/disabled."""
self._data[ATTR_CONTENT_TRUST] = value
@property
def force(self) -> bool:
"""Return if force security is enabled/disabled."""
@@ -42,6 +67,30 @@ class Security(FileConfiguration, CoreSysAttributes):
"""Set pwned is enabled/disabled."""
self._data[ATTR_PWNED] = value
async def verify_content(self, signer: str, checksum: str) -> None:
"""Verify content on CAS."""
if not self.content_trust:
_LOGGER.warning("Disabled content-trust, skip validation")
return
try:
await cas_validate(signer, checksum)
except CodeNotaryUntrusted:
raise
except CodeNotaryError:
if self.force:
raise
self.sys_resolution.create_issue(
IssueType.TRUST,
ContextType.SYSTEM,
suggestions=[SuggestionType.EXECUTE_INTEGRITY],
)
return
async def verify_own_content(self, checksum: str) -> None:
"""Verify content from HA org."""
return await self.verify_content("notary@home-assistant.io", checksum)
async def verify_secret(self, pwned_hash: str) -> None:
"""Verify pwned state of a secret."""
if not self.pwned:
@@ -54,3 +103,73 @@ class Security(FileConfiguration, CoreSysAttributes):
if self.force:
raise
return
@Job(
name="security_manager_integrity_check",
conditions=[JobCondition.INTERNET_SYSTEM],
on_condition=SecurityJobError,
concurrency=JobConcurrency.REJECT,
)
async def integrity_check(self) -> IntegrityResult:
"""Run a full system integrity check of the platform.
We only allow to install trusted content.
This is a out of the band manual check.
"""
result: IntegrityResult = IntegrityResult()
if not self.content_trust:
_LOGGER.warning(
"Skipping integrity check, content_trust is globally disabled"
)
return result
# Supervisor
try:
await self.sys_supervisor.check_trust()
result.supervisor = ContentTrustResult.PASS
except CodeNotaryUntrusted:
result.supervisor = ContentTrustResult.ERROR
self.sys_resolution.create_issue(IssueType.TRUST, ContextType.SUPERVISOR)
except CodeNotaryError:
result.supervisor = ContentTrustResult.FAILED
# Core
try:
await self.sys_homeassistant.core.check_trust()
result.core = ContentTrustResult.PASS
except CodeNotaryUntrusted:
result.core = ContentTrustResult.ERROR
self.sys_resolution.create_issue(IssueType.TRUST, ContextType.CORE)
except CodeNotaryError:
result.core = ContentTrustResult.FAILED
# Plugins
for plugin in self.sys_plugins.all_plugins:
try:
await plugin.check_trust()
result.plugins[plugin.slug] = ContentTrustResult.PASS
except CodeNotaryUntrusted:
result.plugins[plugin.slug] = ContentTrustResult.ERROR
self.sys_resolution.create_issue(
IssueType.TRUST, ContextType.PLUGIN, reference=plugin.slug
)
except CodeNotaryError:
result.plugins[plugin.slug] = ContentTrustResult.FAILED
# Add-ons
for addon in self.sys_addons.installed:
if not addon.signed:
result.addons[addon.slug] = ContentTrustResult.UNTESTED
continue
try:
await addon.check_trust()
result.addons[addon.slug] = ContentTrustResult.PASS
except CodeNotaryUntrusted:
result.addons[addon.slug] = ContentTrustResult.ERROR
self.sys_resolution.create_issue(
IssueType.TRUST, ContextType.ADDON, reference=addon.slug
)
except CodeNotaryError:
result.addons[addon.slug] = ContentTrustResult.FAILED
return result

View File

@@ -183,22 +183,19 @@ class GitRepo(CoreSysAttributes):
raise StoreGitError() from err
try:
repo = self.repo
branch = self.repo.active_branch.name
def _fetch_and_check() -> tuple[str, bool]:
"""Fetch from origin and check if changed."""
# This property access is I/O bound
branch = repo.active_branch.name
repo.remotes.origin.fetch(
**{"update-shallow": True, "depth": 1} # type: ignore[arg-type]
# Download data
await self.sys_run_in_executor(
ft.partial(
self.repo.remotes.origin.fetch,
**{"update-shallow": True, "depth": 1}, # type: ignore
)
changed = repo.commit(branch) != repo.commit(f"origin/{branch}")
return branch, changed
)
# Download data and check for changes
branch, changed = await self.sys_run_in_executor(_fetch_and_check)
if changed:
if changed := self.repo.commit(branch) != self.repo.commit(
f"origin/{branch}"
):
# Jump on top of that
await self.sys_run_in_executor(
ft.partial(self.repo.git.reset, f"origin/{branch}", hard=True)
@@ -227,7 +224,6 @@ class GitRepo(CoreSysAttributes):
git.CommandError,
ValueError,
AssertionError,
AttributeError,
UnicodeDecodeError,
) as err:
_LOGGER.error("Can't update %s repo: %s.", self.url, err)

View File

@@ -25,16 +25,19 @@ from .coresys import CoreSys, CoreSysAttributes
from .docker.stats import DockerStats
from .docker.supervisor import DockerSupervisor
from .exceptions import (
CodeNotaryError,
CodeNotaryUntrusted,
DockerError,
HostAppArmorError,
SupervisorAppArmorError,
SupervisorError,
SupervisorJobError,
SupervisorUnknownError,
SupervisorUpdateError,
)
from .jobs.const import JobCondition, JobThrottle
from .jobs.decorator import Job
from .resolution.const import ContextType, IssueType, UnhealthyReason
from .utils.codenotary import calc_checksum
from .utils.sentry import async_capture_exception
_LOGGER: logging.Logger = logging.getLogger(__name__)
@@ -147,6 +150,20 @@ class Supervisor(CoreSysAttributes):
_LOGGER.error,
) from err
# Validate
try:
await self.sys_security.verify_own_content(calc_checksum(data))
except CodeNotaryUntrusted as err:
raise SupervisorAppArmorError(
"Content-Trust is broken for the AppArmor profile fetch!",
_LOGGER.critical,
) from err
except CodeNotaryError as err:
raise SupervisorAppArmorError(
f"CodeNotary error while processing AppArmor fetch: {err!s}",
_LOGGER.error,
) from err
# Load
temp_dir: TemporaryDirectory | None = None
@@ -256,12 +273,19 @@ class Supervisor(CoreSysAttributes):
"""
return self.instance.logs()
def check_trust(self) -> Awaitable[None]:
"""Calculate Supervisor docker content trust.
Return Coroutine.
"""
return self.instance.check_trust()
async def stats(self) -> DockerStats:
"""Return stats of Supervisor."""
try:
return await self.instance.stats()
except DockerError as err:
raise SupervisorUnknownError() from err
raise SupervisorError() from err
async def repair(self):
"""Repair local Supervisor data."""

View File

@@ -31,8 +31,14 @@ from .const import (
UpdateChannel,
)
from .coresys import CoreSys, CoreSysAttributes
from .exceptions import UpdaterError, UpdaterJobError
from .exceptions import (
CodeNotaryError,
CodeNotaryUntrusted,
UpdaterError,
UpdaterJobError,
)
from .jobs.decorator import Job, JobCondition
from .utils.codenotary import calc_checksum
from .utils.common import FileConfiguration
from .validate import SCHEMA_UPDATER_CONFIG
@@ -242,10 +248,9 @@ class Updater(FileConfiguration, CoreSysAttributes):
@Job(
name="updater_fetch_data",
conditions=[
JobCondition.ARCHITECTURE_SUPPORTED,
JobCondition.INTERNET_SYSTEM,
JobCondition.HOME_ASSISTANT_CORE_SUPPORTED,
JobCondition.OS_SUPPORTED,
JobCondition.HOME_ASSISTANT_CORE_SUPPORTED,
],
on_condition=UpdaterJobError,
throttle_period=timedelta(seconds=30),
@@ -284,6 +289,19 @@ class Updater(FileConfiguration, CoreSysAttributes):
self.sys_bus.remove_listener(self._connectivity_listener)
self._connectivity_listener = None
# Validate
try:
await self.sys_security.verify_own_content(calc_checksum(data))
except CodeNotaryUntrusted as err:
raise UpdaterError(
"Content-Trust is broken for the version file fetch!", _LOGGER.critical
) from err
except CodeNotaryError as err:
raise UpdaterError(
f"CodeNotary error while processing version fetch: {err!s}",
_LOGGER.error,
) from err
# Parse data
try:
data = json.loads(data)

View File

@@ -0,0 +1,109 @@
"""Small wrapper for CodeNotary."""
from __future__ import annotations
import asyncio
import hashlib
import json
import logging
from pathlib import Path
import shlex
from typing import Final
from dirhash import dirhash
from ..exceptions import CodeNotaryBackendError, CodeNotaryError, CodeNotaryUntrusted
from . import clean_env
_LOGGER: logging.Logger = logging.getLogger(__name__)
_CAS_CMD: str = (
"cas authenticate --signerID {signer} --silent --output json --hash {sum}"
)
_CACHE: set[tuple[str, str]] = set()
_ATTR_ERROR: Final = "error"
_ATTR_STATUS: Final = "status"
_FALLBACK_ERROR: Final = "Unknown CodeNotary backend issue"
def calc_checksum(data: str | bytes) -> str:
"""Generate checksum for CodeNotary."""
if isinstance(data, str):
return hashlib.sha256(data.encode()).hexdigest()
return hashlib.sha256(data).hexdigest()
def calc_checksum_path_sourcecode(folder: Path) -> str:
"""Calculate checksum for a path source code.
Need catch OSError.
"""
return dirhash(folder.as_posix(), "sha256", match=["*.py"])
# pylint: disable=unreachable
async def cas_validate(
signer: str,
checksum: str,
) -> None:
"""Validate data against CodeNotary."""
return
if (checksum, signer) in _CACHE:
return
# Generate command for request
command = shlex.split(_CAS_CMD.format(signer=signer, sum=checksum))
# Request notary authorization
_LOGGER.debug("Send cas command: %s", command)
try:
proc = await asyncio.create_subprocess_exec(
*command,
stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=clean_env(),
)
async with asyncio.timeout(15):
data, error = await proc.communicate()
except TimeoutError:
raise CodeNotaryBackendError(
"Timeout while processing CodeNotary", _LOGGER.warning
) from None
except OSError as err:
raise CodeNotaryError(
f"CodeNotary fatal error: {err!s}", _LOGGER.critical
) from err
# Check if Notarized
if proc.returncode != 0 and not data:
if error:
try:
error = error.decode("utf-8")
except UnicodeDecodeError as err:
raise CodeNotaryBackendError(_FALLBACK_ERROR, _LOGGER.warning) from err
if "not notarized" in error:
raise CodeNotaryUntrusted()
else:
error = _FALLBACK_ERROR
raise CodeNotaryBackendError(error, _LOGGER.warning)
# Parse data
try:
data_json = json.loads(data)
_LOGGER.debug("CodeNotary response with: %s", data_json)
except (json.JSONDecodeError, UnicodeDecodeError) as err:
raise CodeNotaryError(
f"Can't parse CodeNotary output: {data!s} - {err!s}", _LOGGER.error
) from err
if _ATTR_ERROR in data_json:
raise CodeNotaryBackendError(data_json[_ATTR_ERROR], _LOGGER.warning)
if data_json[_ATTR_STATUS] == 0:
_CACHE.add((checksum, signer))
else:
raise CodeNotaryUntrusted()

View File

@@ -7,7 +7,13 @@ from collections.abc import Awaitable, Callable
import logging
from typing import Any, Protocol, cast
from dbus_fast import ErrorType, InvalidIntrospectionError, Message, MessageType
from dbus_fast import (
ErrorType,
InvalidIntrospectionError,
Message,
MessageType,
Variant,
)
from dbus_fast.aio.message_bus import MessageBus
from dbus_fast.aio.proxy_object import ProxyInterface, ProxyObject
from dbus_fast.errors import DBusError as DBusFastDBusError
@@ -259,7 +265,7 @@ class DBus:
"""
async def sync_property_change(
prop_interface: str, changed: dict[str, Any], invalidated: list[str]
prop_interface: str, changed: dict[str, Variant], invalidated: list[str]
) -> None:
"""Sync property changes to cache."""
if interface != prop_interface:

View File

@@ -5,20 +5,12 @@ from collections.abc import AsyncGenerator
from datetime import UTC, datetime
from functools import wraps
import json
import re
from aiohttp import ClientResponse
from supervisor.exceptions import MalformedBinaryEntryError
from supervisor.host.const import LogFormatter
_RE_ANSI_CSI_COLORS_PATTERN = re.compile(r"\x1B\[[0-9;]*m")
def _strip_ansi_colors(message: str) -> str:
"""Remove ANSI color codes from a message string."""
return _RE_ANSI_CSI_COLORS_PATTERN.sub("", message)
def formatter(required_fields: list[str]):
"""Decorate journal entry formatters with list of required fields.
@@ -39,9 +31,9 @@ def formatter(required_fields: list[str]):
@formatter(["MESSAGE"])
def journal_plain_formatter(entries: dict[str, str], no_colors: bool = False) -> str:
def journal_plain_formatter(entries: dict[str, str]) -> str:
"""Format parsed journal entries as a plain message."""
return _strip_ansi_colors(entries["MESSAGE"]) if no_colors else entries["MESSAGE"]
return entries["MESSAGE"]
@formatter(
@@ -53,7 +45,7 @@ def journal_plain_formatter(entries: dict[str, str], no_colors: bool = False) ->
"MESSAGE",
]
)
def journal_verbose_formatter(entries: dict[str, str], no_colors: bool = False) -> str:
def journal_verbose_formatter(entries: dict[str, str]) -> str:
"""Format parsed journal entries to a journalctl-like format."""
ts = datetime.fromtimestamp(
int(entries["__REALTIME_TIMESTAMP"]) / 1e6, UTC
@@ -66,24 +58,14 @@ def journal_verbose_formatter(entries: dict[str, str], no_colors: bool = False)
else entries.get("SYSLOG_IDENTIFIER", "_UNKNOWN_")
)
message = (
_strip_ansi_colors(entries.get("MESSAGE", ""))
if no_colors
else entries.get("MESSAGE", "")
)
return f"{ts} {entries.get('_HOSTNAME', '')} {identifier}: {message}"
return f"{ts} {entries.get('_HOSTNAME', '')} {identifier}: {entries.get('MESSAGE', '')}"
async def journal_logs_reader(
journal_logs: ClientResponse,
log_formatter: LogFormatter = LogFormatter.PLAIN,
no_colors: bool = False,
journal_logs: ClientResponse, log_formatter: LogFormatter = LogFormatter.PLAIN
) -> AsyncGenerator[tuple[str | None, str]]:
"""Read logs from systemd journal line by line, formatted using the given formatter.
Optionally strip ANSI color codes from the entries' messages.
Returns a generator of (cursor, formatted_entry) tuples.
"""
match log_formatter:
@@ -102,10 +84,7 @@ async def journal_logs_reader(
# at EOF (likely race between at_eof and EOF check in readuntil)
if line == b"\n" or not line:
if entries:
yield (
entries.get("__CURSOR"),
formatter_(entries, no_colors=no_colors),
)
yield entries.get("__CURSOR"), formatter_(entries)
entries = {}
continue

View File

@@ -12,6 +12,7 @@ from .const import (
ATTR_AUTO_UPDATE,
ATTR_CHANNEL,
ATTR_CLI,
ATTR_CONTENT_TRUST,
ATTR_COUNTRY,
ATTR_DEBUG,
ATTR_DEBUG_BLOCK,
@@ -228,6 +229,7 @@ SCHEMA_INGRESS_CONFIG = vol.Schema(
# pylint: disable=no-value-for-parameter
SCHEMA_SECURITY_CONFIG = vol.Schema(
{
vol.Optional(ATTR_CONTENT_TRUST, default=True): vol.Boolean(),
vol.Optional(ATTR_PWNED, default=True): vol.Boolean(),
vol.Optional(ATTR_FORCE_SECURITY, default=False): vol.Boolean(),
},

View File

@@ -3,34 +3,24 @@
import asyncio
from datetime import timedelta
import errno
from http import HTTPStatus
from pathlib import Path
from typing import Any
from unittest.mock import MagicMock, PropertyMock, call, patch
from unittest.mock import MagicMock, PropertyMock, patch
import aiodocker
from awesomeversion import AwesomeVersion
from docker.errors import APIError, DockerException, NotFound
from docker.errors import DockerException, ImageNotFound, NotFound
import pytest
from securetar import SecureTarFile
from supervisor.addons.addon import Addon
from supervisor.addons.const import AddonBackupMode
from supervisor.addons.model import AddonModel
from supervisor.config import CoreConfig
from supervisor.const import AddonBoot, AddonState, BusEvent
from supervisor.coresys import CoreSys
from supervisor.docker.addon import DockerAddon
from supervisor.docker.const import ContainerState
from supervisor.docker.manager import CommandReturn, DockerAPI
from supervisor.docker.manager import CommandReturn
from supervisor.docker.monitor import DockerContainerStateEvent
from supervisor.exceptions import (
AddonPrePostBackupCommandReturnedError,
AddonsJobError,
AddonUnknownError,
AudioUpdateError,
HassioError,
)
from supervisor.exceptions import AddonsError, AddonsJobError, AudioUpdateError
from supervisor.hardware.helper import HwHelper
from supervisor.ingress import Ingress
from supervisor.store.repository import Repository
@@ -509,26 +499,31 @@ async def test_backup_with_pre_post_command(
@pytest.mark.parametrize(
("container_get_side_effect", "exec_run_side_effect", "exc_type_raised"),
"get_error,exception_on_exec",
[
(NotFound("missing"), [(1, None)], AddonUnknownError),
(DockerException(), [(1, None)], AddonUnknownError),
(None, DockerException(), AddonUnknownError),
(None, [(1, None)], AddonPrePostBackupCommandReturnedError),
(NotFound("missing"), False),
(DockerException(), False),
(None, True),
(None, False),
],
)
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
async def test_backup_with_pre_command_error(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
container_get_side_effect: DockerException | None,
exec_run_side_effect: DockerException | list[tuple[int, Any]],
exc_type_raised: type[HassioError],
get_error: DockerException | None,
exception_on_exec: bool,
tmp_supervisor_data,
path_extern,
) -> None:
"""Test backing up an addon with error running pre command."""
coresys.docker.containers.get.side_effect = container_get_side_effect
container.exec_run.side_effect = exec_run_side_effect
if get_error:
coresys.docker.containers.get.side_effect = get_error
if exception_on_exec:
container.exec_run.side_effect = DockerException()
else:
container.exec_run.return_value = (1, None)
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
@@ -537,7 +532,7 @@ async def test_backup_with_pre_command_error(
with (
patch.object(DockerAddon, "is_running", return_value=True),
patch.object(Addon, "backup_pre", new=PropertyMock(return_value="backup_pre")),
pytest.raises(exc_type_raised),
pytest.raises(AddonsError),
):
assert await install_addon_ssh.backup(tarfile) is None
@@ -866,14 +861,16 @@ async def test_addon_loads_wrong_image(
container.remove.assert_called_with(force=True, v=True)
# one for removing the addon, one for removing the addon builder
assert coresys.docker.images.delete.call_count == 2
assert coresys.docker.images.remove.call_count == 2
assert coresys.docker.images.delete.call_args_list[0] == call(
"local/aarch64-addon-ssh:latest", force=True
)
assert coresys.docker.images.delete.call_args_list[1] == call(
"local/aarch64-addon-ssh:9.2.1", force=True
)
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
"image": "local/aarch64-addon-ssh:latest",
"force": True,
}
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
"image": "local/aarch64-addon-ssh:9.2.1",
"force": True,
}
mock_run_command.assert_called_once()
assert mock_run_command.call_args.args[0] == "docker.io/library/docker"
assert mock_run_command.call_args.kwargs["version"] == "1.0.0-cli"
@@ -897,9 +894,7 @@ async def test_addon_loads_missing_image(
mock_amd64_arch_supported,
):
"""Test addon corrects a missing image on load."""
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
HTTPStatus.NOT_FOUND, {"message": "missing"}
)
coresys.docker.images.get.side_effect = ImageNotFound("missing")
with (
patch("pathlib.Path.is_file", return_value=True),
@@ -931,51 +926,41 @@ async def test_addon_loads_missing_image(
assert install_addon_ssh.image == "local/amd64-addon-ssh"
@pytest.mark.parametrize(
"pull_image_exc",
[APIError("error"), aiodocker.DockerError(400, {"message": "error"})],
)
@pytest.mark.usefixtures("container", "mock_amd64_arch_supported")
async def test_addon_load_succeeds_with_docker_errors(
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
caplog: pytest.LogCaptureFixture,
pull_image_exc: Exception,
mock_amd64_arch_supported,
):
"""Docker errors while building/pulling an image during load should not raise and fail setup."""
# Build env invalid failure
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
HTTPStatus.NOT_FOUND, {"message": "missing"}
)
coresys.docker.images.get.side_effect = ImageNotFound("missing")
caplog.clear()
await install_addon_ssh.load()
assert "Cannot build addon 'local_ssh' because dockerfile is missing" in caplog.text
assert "Invalid build environment" in caplog.text
# Image build failure
coresys.docker.images.build.side_effect = DockerException()
caplog.clear()
with (
patch("pathlib.Path.is_file", return_value=True),
patch.object(
CoreConfig, "local_to_extern_path", return_value="/addon/path/on/host"
),
patch.object(
DockerAPI,
"run_command",
return_value=MagicMock(exit_code=1, output=b"error"),
type(coresys.config),
"local_to_extern_path",
return_value="/addon/path/on/host",
),
):
await install_addon_ssh.load()
assert (
"Can't build local/amd64-addon-ssh:9.2.1: Docker build failed for local/amd64-addon-ssh:9.2.1 (exit code 1). Build output:\nerror"
in caplog.text
)
assert "Can't build local/amd64-addon-ssh:9.2.1" in caplog.text
# Image pull failure
install_addon_ssh.data["image"] = "test/amd64-addon-ssh"
coresys.docker.images.build.reset_mock(side_effect=True)
coresys.docker.pull_image.side_effect = DockerException()
caplog.clear()
with patch.object(DockerAPI, "pull_image", side_effect=pull_image_exc):
await install_addon_ssh.load()
assert "Can't install test/amd64-addon-ssh:9.2.1:" in caplog.text
await install_addon_ssh.load()
assert "Unknown error with test/amd64-addon-ssh:9.2.1" in caplog.text
async def test_addon_manual_only_boot(coresys: CoreSys, install_addon_example: Addon):

View File

@@ -1,18 +1,12 @@
"""Test addon build."""
import base64
import json
from pathlib import Path
from unittest.mock import PropertyMock, patch
from awesomeversion import AwesomeVersion
import pytest
from supervisor.addons.addon import Addon
from supervisor.addons.build import AddonBuild
from supervisor.coresys import CoreSys
from supervisor.docker.const import DOCKER_HUB
from supervisor.exceptions import AddonBuildDockerfileMissingError
from tests.common import is_in_list
@@ -35,7 +29,7 @@ async def test_platform_set(coresys: CoreSys, install_addon_ssh: Addon):
),
):
args = await coresys.run_in_executor(
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest"
)
assert is_in_list(["--platform", "linux/amd64"], args["command"])
@@ -59,7 +53,7 @@ async def test_dockerfile_evaluation(coresys: CoreSys, install_addon_ssh: Addon)
),
):
args = await coresys.run_in_executor(
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest"
)
assert is_in_list(["--file", "Dockerfile"], args["command"])
@@ -87,7 +81,7 @@ async def test_dockerfile_evaluation_arch(coresys: CoreSys, install_addon_ssh: A
),
):
args = await coresys.run_in_executor(
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest"
)
assert is_in_list(["--file", "Dockerfile.aarch64"], args["command"])
@@ -108,11 +102,11 @@ async def test_build_valid(coresys: CoreSys, install_addon_ssh: Addon):
type(coresys.arch), "default", new=PropertyMock(return_value="aarch64")
),
):
assert (await build.is_valid()) is None
assert await build.is_valid()
async def test_build_invalid(coresys: CoreSys, install_addon_ssh: Addon):
"""Test build not supported because Dockerfile missing for specified architecture."""
"""Test platform set in docker args."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
with (
patch.object(
@@ -121,161 +115,5 @@ async def test_build_invalid(coresys: CoreSys, install_addon_ssh: Addon):
patch.object(
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
),
pytest.raises(AddonBuildDockerfileMissingError),
):
await build.is_valid()
async def test_docker_config_no_registries(coresys: CoreSys, install_addon_ssh: Addon):
"""Test docker config generation when no registries configured."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
# No registries configured by default
assert build.get_docker_config_json() is None
async def test_docker_config_no_matching_registry(
coresys: CoreSys, install_addon_ssh: Addon
):
"""Test docker config generation when registry doesn't match base image."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
# Configure a registry that doesn't match the base image
# pylint: disable-next=protected-access
coresys.docker.config._data["registries"] = {
"some.other.registry": {"username": "user", "password": "pass"}
}
with (
patch.object(
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
),
patch.object(
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
),
):
# Base image is ghcr.io/home-assistant/... which doesn't match
assert build.get_docker_config_json() is None
async def test_docker_config_matching_registry(
coresys: CoreSys, install_addon_ssh: Addon
):
"""Test docker config generation when registry matches base image."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
# Configure ghcr.io registry which matches the default base image
# pylint: disable-next=protected-access
coresys.docker.config._data["registries"] = {
"ghcr.io": {"username": "testuser", "password": "testpass"}
}
with (
patch.object(
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
),
patch.object(
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
),
):
config_json = build.get_docker_config_json()
assert config_json is not None
config = json.loads(config_json)
assert "auths" in config
assert "ghcr.io" in config["auths"]
# Verify base64-encoded credentials
expected_auth = base64.b64encode(b"testuser:testpass").decode()
assert config["auths"]["ghcr.io"]["auth"] == expected_auth
async def test_docker_config_docker_hub(coresys: CoreSys, install_addon_ssh: Addon):
"""Test docker config generation for Docker Hub registry."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
# Configure Docker Hub registry
# pylint: disable-next=protected-access
coresys.docker.config._data["registries"] = {
DOCKER_HUB: {"username": "hubuser", "password": "hubpass"}
}
# Mock base_image to return a Docker Hub image (no registry prefix)
with patch.object(
type(build),
"base_image",
new=PropertyMock(return_value="library/alpine:latest"),
):
config_json = build.get_docker_config_json()
assert config_json is not None
config = json.loads(config_json)
# Docker Hub uses special URL as key
assert "https://index.docker.io/v1/" in config["auths"]
expected_auth = base64.b64encode(b"hubuser:hubpass").decode()
assert config["auths"]["https://index.docker.io/v1/"]["auth"] == expected_auth
async def test_docker_args_with_config_path(coresys: CoreSys, install_addon_ssh: Addon):
"""Test docker args include config volume when path provided."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
with (
patch.object(
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
),
patch.object(
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
),
patch.object(
type(coresys.config),
"local_to_extern_path",
side_effect=lambda p: f"/extern{p}",
),
):
config_path = Path("/data/supervisor/tmp/config.json")
args = await coresys.run_in_executor(
build.get_docker_args,
AwesomeVersion("latest"),
"test-image:latest",
config_path,
)
# Check that config is mounted
assert "/extern/data/supervisor/tmp/config.json" in args["volumes"]
assert (
args["volumes"]["/extern/data/supervisor/tmp/config.json"]["bind"]
== "/root/.docker/config.json"
)
assert args["volumes"]["/extern/data/supervisor/tmp/config.json"]["mode"] == "ro"
async def test_docker_args_without_config_path(
coresys: CoreSys, install_addon_ssh: Addon
):
"""Test docker args don't include config volume when no path provided."""
build = await AddonBuild(coresys, install_addon_ssh).load_config()
with (
patch.object(
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
),
patch.object(
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
),
patch.object(
type(coresys.config),
"local_to_extern_path",
return_value="/addon/path/on/host",
),
):
args = await coresys.run_in_executor(
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
)
# Only docker socket and addon path should be mounted
assert len(args["volumes"]) == 2
# Verify no docker config mount
for bind in args["volumes"].values():
assert bind["bind"] != "/root/.docker/config.json"
assert not await build.is_valid()

View File

@@ -4,13 +4,13 @@ import asyncio
from collections.abc import AsyncGenerator, Generator
from copy import deepcopy
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, call, patch
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
from awesomeversion import AwesomeVersion
import pytest
from supervisor.addons.addon import Addon
from supervisor.arch import CpuArchManager
from supervisor.arch import CpuArch
from supervisor.config import CoreConfig
from supervisor.const import AddonBoot, AddonStartup, AddonState, BusEvent
from supervisor.coresys import CoreSys
@@ -54,9 +54,7 @@ async def fixture_mock_arch_disk() -> AsyncGenerator[None]:
"""Mock supported arch and disk space."""
with (
patch("shutil.disk_usage", return_value=(42, 42, 2 * (1024.0**3))),
patch.object(
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
),
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
):
yield
@@ -516,13 +514,19 @@ async def test_shared_image_kept_on_uninstall(
latest = f"{install_addon_example.image}:latest"
await coresys.addons.uninstall("local_example2")
coresys.docker.images.delete.assert_not_called()
coresys.docker.images.remove.assert_not_called()
assert not coresys.addons.get("local_example2", local_only=True)
await coresys.addons.uninstall("local_example")
assert coresys.docker.images.delete.call_count == 2
assert coresys.docker.images.delete.call_args_list[0] == call(latest, force=True)
assert coresys.docker.images.delete.call_args_list[1] == call(image, force=True)
assert coresys.docker.images.remove.call_count == 2
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
"image": latest,
"force": True,
}
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
"image": image,
"force": True,
}
assert not coresys.addons.get("local_example", local_only=True)
@@ -550,17 +554,19 @@ async def test_shared_image_kept_on_update(
assert example_2.version == "1.2.0"
assert install_addon_example_image.version == "1.2.0"
image_new = {"Id": "image_new", "RepoTags": ["image_new:latest"]}
image_old = {"Id": "image_old", "RepoTags": ["image_old:latest"]}
docker.images.inspect.side_effect = [image_new, image_old]
image_new = MagicMock()
image_new.id = "image_new"
image_old = MagicMock()
image_old.id = "image_old"
docker.images.get.side_effect = [image_new, image_old]
docker.images.list.return_value = [image_new, image_old]
with patch.object(DockerAPI, "pull_image", return_value=image_new):
await coresys.addons.update("local_example2")
docker.images.delete.assert_not_called()
docker.images.remove.assert_not_called()
assert example_2.version == "1.3.0"
docker.images.inspect.side_effect = [image_new]
docker.images.get.side_effect = [image_new]
await coresys.addons.update("local_example_image")
docker.images.delete.assert_called_once_with("image_old", force=True)
docker.images.remove.assert_called_once_with("image_old", force=True)
assert install_addon_example_image.version == "1.3.0"

View File

@@ -1 +1,95 @@
"""Test for API calls."""
from unittest.mock import AsyncMock, MagicMock
from aiohttp.test_utils import TestClient
from supervisor.coresys import CoreSys
from supervisor.host.const import LogFormat
DEFAULT_LOG_RANGE = "entries=:-99:100"
DEFAULT_LOG_RANGE_FOLLOW = "entries=:-99:18446744073709551615"
async def common_test_api_advanced_logs(
path_prefix: str,
syslog_identifier: str,
api_client: TestClient,
journald_logs: MagicMock,
coresys: CoreSys,
os_available: None,
):
"""Template for tests of endpoints using advanced logs."""
resp = await api_client.get(f"{path_prefix}/logs")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier},
range_header=DEFAULT_LOG_RANGE,
accept=LogFormat.JOURNAL,
)
journald_logs.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/follow")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier, "follow": ""},
range_header=DEFAULT_LOG_RANGE_FOLLOW,
accept=LogFormat.JOURNAL,
)
journald_logs.reset_mock()
mock_response = MagicMock()
mock_response.text = AsyncMock(
return_value='{"CONTAINER_LOG_EPOCH": "12345"}\n{"CONTAINER_LOG_EPOCH": "12345"}\n'
)
journald_logs.return_value.__aenter__.return_value = mock_response
resp = await api_client.get(f"{path_prefix}/logs/latest")
assert resp.status == 200
assert journald_logs.call_count == 2
# Check the first call for getting epoch
epoch_call = journald_logs.call_args_list[0]
assert epoch_call[1]["params"] == {"CONTAINER_NAME": syslog_identifier}
assert epoch_call[1]["range_header"] == "entries=:-1:2"
# Check the second call for getting logs with the epoch
logs_call = journald_logs.call_args_list[1]
assert logs_call[1]["params"]["SYSLOG_IDENTIFIER"] == syslog_identifier
assert logs_call[1]["params"]["CONTAINER_LOG_EPOCH"] == "12345"
assert logs_call[1]["range_header"] == "entries=:0:18446744073709551615"
journald_logs.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/boots/0")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier, "_BOOT_ID": "ccc"},
range_header=DEFAULT_LOG_RANGE,
accept=LogFormat.JOURNAL,
)
journald_logs.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/boots/0/follow")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={
"SYSLOG_IDENTIFIER": syslog_identifier,
"_BOOT_ID": "ccc",
"follow": "",
},
range_header=DEFAULT_LOG_RANGE_FOLLOW,
accept=LogFormat.JOURNAL,
)

View File

@@ -1,149 +0,0 @@
"""Fixtures for API tests."""
from collections.abc import Awaitable, Callable
from unittest.mock import ANY, AsyncMock, MagicMock
from aiohttp.test_utils import TestClient
import pytest
from supervisor.coresys import CoreSys
from supervisor.host.const import LogFormat, LogFormatter
DEFAULT_LOG_RANGE = "entries=:-99:100"
DEFAULT_LOG_RANGE_FOLLOW = "entries=:-99:18446744073709551615"
async def _common_test_api_advanced_logs(
path_prefix: str,
syslog_identifier: str,
api_client: TestClient,
journald_logs: MagicMock,
coresys: CoreSys,
os_available: None,
journal_logs_reader: MagicMock,
):
"""Template for tests of endpoints using advanced logs."""
resp = await api_client.get(f"{path_prefix}/logs")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier},
range_header=DEFAULT_LOG_RANGE,
accept=LogFormat.JOURNAL,
)
journal_logs_reader.assert_called_with(ANY, LogFormatter.PLAIN, False)
journald_logs.reset_mock()
journal_logs_reader.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs?no_colors")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier},
range_header=DEFAULT_LOG_RANGE,
accept=LogFormat.JOURNAL,
)
journal_logs_reader.assert_called_with(ANY, LogFormatter.PLAIN, True)
journald_logs.reset_mock()
journal_logs_reader.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/follow")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier, "follow": ""},
range_header=DEFAULT_LOG_RANGE_FOLLOW,
accept=LogFormat.JOURNAL,
)
journal_logs_reader.assert_called_with(ANY, LogFormatter.PLAIN, False)
journald_logs.reset_mock()
journal_logs_reader.reset_mock()
mock_response = MagicMock()
mock_response.text = AsyncMock(
return_value='{"CONTAINER_LOG_EPOCH": "12345"}\n{"CONTAINER_LOG_EPOCH": "12345"}\n'
)
journald_logs.return_value.__aenter__.return_value = mock_response
resp = await api_client.get(f"{path_prefix}/logs/latest")
assert resp.status == 200
assert journald_logs.call_count == 2
# Check the first call for getting epoch
epoch_call = journald_logs.call_args_list[0]
assert epoch_call[1]["params"] == {"CONTAINER_NAME": syslog_identifier}
assert epoch_call[1]["range_header"] == "entries=:-1:2"
# Check the second call for getting logs with the epoch
logs_call = journald_logs.call_args_list[1]
assert logs_call[1]["params"]["SYSLOG_IDENTIFIER"] == syslog_identifier
assert logs_call[1]["params"]["CONTAINER_LOG_EPOCH"] == "12345"
assert logs_call[1]["range_header"] == "entries=:0:18446744073709551615"
journal_logs_reader.assert_called_with(ANY, LogFormatter.PLAIN, True)
journald_logs.reset_mock()
journal_logs_reader.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/boots/0")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={"SYSLOG_IDENTIFIER": syslog_identifier, "_BOOT_ID": "ccc"},
range_header=DEFAULT_LOG_RANGE,
accept=LogFormat.JOURNAL,
)
journald_logs.reset_mock()
resp = await api_client.get(f"{path_prefix}/logs/boots/0/follow")
assert resp.status == 200
assert resp.content_type == "text/plain"
journald_logs.assert_called_once_with(
params={
"SYSLOG_IDENTIFIER": syslog_identifier,
"_BOOT_ID": "ccc",
"follow": "",
},
range_header=DEFAULT_LOG_RANGE_FOLLOW,
accept=LogFormat.JOURNAL,
)
@pytest.fixture
async def advanced_logs_tester(
api_client: TestClient,
journald_logs: MagicMock,
coresys: CoreSys,
os_available,
journal_logs_reader: MagicMock,
) -> Callable[[str, str], Awaitable[None]]:
"""Fixture that returns a function to test advanced logs endpoints.
This allows tests to avoid explicitly passing all the required fixtures.
Usage:
async def test_my_logs(advanced_logs_tester):
await advanced_logs_tester("/path/prefix", "syslog_identifier")
"""
async def test_logs(path_prefix: str, syslog_identifier: str):
await _common_test_api_advanced_logs(
path_prefix,
syslog_identifier,
api_client,
journald_logs,
coresys,
os_available,
journal_logs_reader,
)
return test_logs

View File

@@ -5,12 +5,11 @@ from unittest.mock import MagicMock, PropertyMock, patch
from aiohttp import ClientResponse
from aiohttp.test_utils import TestClient
from docker.errors import DockerException
import pytest
from supervisor.addons.addon import Addon
from supervisor.addons.build import AddonBuild
from supervisor.arch import CpuArchManager
from supervisor.arch import CpuArch
from supervisor.const import AddonState
from supervisor.coresys import CoreSys
from supervisor.docker.addon import DockerAddon
@@ -21,6 +20,7 @@ from supervisor.exceptions import HassioError
from supervisor.store.repository import Repository
from ..const import TEST_ADDON_SLUG
from . import common_test_api_advanced_logs
def _create_test_event(name: str, state: ContainerState) -> DockerContainerStateEvent:
@@ -72,11 +72,21 @@ async def test_addons_info_not_installed(
async def test_api_addon_logs(
advanced_logs_tester,
api_client: TestClient,
journald_logs: MagicMock,
coresys: CoreSys,
os_available,
install_addon_ssh: Addon,
):
"""Test addon logs."""
await advanced_logs_tester("/addons/local_ssh", "addon_local_ssh")
await common_test_api_advanced_logs(
"/addons/local_ssh",
"addon_local_ssh",
api_client,
journald_logs,
coresys,
os_available,
)
async def test_api_addon_logs_not_installed(api_client: TestClient):
@@ -237,9 +247,7 @@ async def test_api_addon_rebuild_healthcheck(
patch.object(AddonBuild, "is_valid", return_value=True),
patch.object(DockerAddon, "is_running", return_value=False),
patch.object(Addon, "need_build", new=PropertyMock(return_value=True)),
patch.object(
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
),
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
patch.object(DockerAddon, "run", new=container_events_task),
patch.object(
coresys.docker,
@@ -311,9 +319,7 @@ async def test_api_addon_rebuild_force(
patch.object(
Addon, "need_build", new=PropertyMock(return_value=False)
), # Image-based
patch.object(
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
),
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
):
resp = await api_client.post("/addons/local_ssh/rebuild")
@@ -331,9 +337,7 @@ async def test_api_addon_rebuild_force(
patch.object(
Addon, "need_build", new=PropertyMock(return_value=False)
), # Image-based
patch.object(
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
),
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
patch.object(DockerAddon, "run", new=container_events_task),
patch.object(
coresys.docker,
@@ -478,11 +482,6 @@ async def test_addon_options_boot_mode_manual_only_invalid(
body["message"]
== "Addon local_example boot option is set to manual_only so it cannot be changed"
)
assert body["error_key"] == "addon_boot_config_cannot_change_error"
assert body["extra_fields"] == {
"addon": "local_example",
"boot_config": "manual_only",
}
async def get_message(resp: ClientResponse, json_expected: bool) -> str:
@@ -551,154 +550,3 @@ async def test_addon_not_installed(
resp = await api_client.request(method, url)
assert resp.status == 400
assert await get_message(resp, json_expected) == "Addon is not installed"
async def test_addon_set_options(api_client: TestClient, install_addon_example: Addon):
"""Test setting options for an addon."""
resp = await api_client.post(
"/addons/local_example/options", json={"options": {"message": "test"}}
)
assert resp.status == 200
assert install_addon_example.options == {"message": "test"}
async def test_addon_reset_options(
api_client: TestClient, install_addon_example: Addon
):
"""Test resetting options for an addon to defaults.
Fixes SUPERVISOR-171F.
"""
# First set some custom options
install_addon_example.options = {"message": "custom"}
assert install_addon_example.persist["options"] == {"message": "custom"}
# Reset to defaults by sending null
resp = await api_client.post(
"/addons/local_example/options", json={"options": None}
)
assert resp.status == 200
# Persisted options should be empty (meaning defaults will be used)
assert install_addon_example.persist["options"] == {}
async def test_addon_set_options_error(
api_client: TestClient, install_addon_example: Addon
):
"""Test setting options for an addon."""
resp = await api_client.post(
"/addons/local_example/options", json={"options": {"message": True}}
)
assert resp.status == 400
body = await resp.json()
assert (
body["message"]
== "Add-on local_example has invalid options: not a valid value. Got {'message': True}"
)
assert body["error_key"] == "addon_configuration_invalid_error"
assert body["extra_fields"] == {
"addon": "local_example",
"validation_error": "not a valid value. Got {'message': True}",
}
async def test_addon_start_options_error(
api_client: TestClient,
install_addon_example: Addon,
caplog: pytest.LogCaptureFixture,
):
"""Test error writing options when trying to start addon."""
install_addon_example.options = {"message": "hello"}
# Simulate OS error trying to write the file
with patch("supervisor.utils.json.atomic_write", side_effect=OSError("fail")):
resp = await api_client.post("/addons/local_example/start")
assert resp.status == 500
body = await resp.json()
assert (
body["message"]
== "An unknown error occurred with addon local_example. Check supervisor logs for details (check with 'ha supervisor logs')"
)
assert body["error_key"] == "addon_unknown_error"
assert body["extra_fields"] == {
"addon": "local_example",
"logs_command": "ha supervisor logs",
}
assert "Add-on local_example can't write options" in caplog.text
# Simulate an update with a breaking change for options schema creating failure on start
caplog.clear()
install_addon_example.data["schema"] = {"message": "bool"}
resp = await api_client.post("/addons/local_example/start")
assert resp.status == 400
body = await resp.json()
assert (
body["message"]
== "Add-on local_example has invalid options: expected boolean. Got {'message': 'hello'}"
)
assert body["error_key"] == "addon_configuration_invalid_error"
assert body["extra_fields"] == {
"addon": "local_example",
"validation_error": "expected boolean. Got {'message': 'hello'}",
}
assert (
"Add-on local_example has invalid options: expected boolean. Got {'message': 'hello'}"
in caplog.text
)
@pytest.mark.parametrize(("method", "action"), [("get", "stats"), ("post", "stdin")])
@pytest.mark.usefixtures("install_addon_example")
async def test_addon_not_running_error(
api_client: TestClient, method: str, action: str
):
"""Test addon not running error for endpoints that require that."""
with patch.object(Addon, "with_stdin", new=PropertyMock(return_value=True)):
resp = await api_client.request(method, f"/addons/local_example/{action}")
assert resp.status == 400
body = await resp.json()
assert body["message"] == "Add-on local_example is not running"
assert body["error_key"] == "addon_not_running_error"
assert body["extra_fields"] == {"addon": "local_example"}
@pytest.mark.usefixtures("install_addon_example")
async def test_addon_write_stdin_not_supported_error(api_client: TestClient):
"""Test error when trying to write stdin to addon that does not support it."""
resp = await api_client.post("/addons/local_example/stdin")
assert resp.status == 400
body = await resp.json()
assert body["message"] == "Add-on local_example does not support writing to stdin"
assert body["error_key"] == "addon_not_supported_write_stdin_error"
assert body["extra_fields"] == {"addon": "local_example"}
@pytest.mark.usefixtures("install_addon_ssh")
async def test_addon_rebuild_fails_error(api_client: TestClient, coresys: CoreSys):
"""Test error when build fails during rebuild for addon."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
coresys.docker.containers.run.side_effect = DockerException("fail")
with (
patch.object(
CpuArchManager, "supported", new=PropertyMock(return_value=["aarch64"])
),
patch.object(
CpuArchManager, "default", new=PropertyMock(return_value="aarch64")
),
patch.object(AddonBuild, "get_docker_args", return_value={}),
):
resp = await api_client.post("/addons/local_ssh/rebuild")
assert resp.status == 500
body = await resp.json()
assert (
body["message"]
== "An unknown error occurred while trying to build the image for addon local_ssh. Check supervisor logs for details (check with 'ha supervisor logs')"
)
assert body["error_key"] == "addon_build_failed_unknown_error"
assert body["extra_fields"] == {
"addon": "local_ssh",
"logs_command": "ha supervisor logs",
}

Some files were not shown because too many files have changed in this diff Show More