mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-12-12 10:58:41 +00:00
Compare commits
50 Commits
improve-pr
...
container-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dc44e117a9 | ||
|
|
4df0db9df4 | ||
|
|
ed2275a8cf | ||
|
|
c29a82c47d | ||
|
|
0599238217 | ||
|
|
b30be21df4 | ||
|
|
7d2bfe8fa6 | ||
|
|
27c53048f6 | ||
|
|
88ab5e9196 | ||
|
|
b7a7475d47 | ||
|
|
5fe6b934e2 | ||
|
|
a2d301ed27 | ||
|
|
cdef1831ba | ||
|
|
b79130816b | ||
|
|
923bc2ba87 | ||
|
|
0f6b211151 | ||
|
|
054c6d0365 | ||
|
|
d920bde7e4 | ||
|
|
9862499751 | ||
|
|
287a58e004 | ||
|
|
2993a23711 | ||
|
|
3cae17cb79 | ||
|
|
cd4e7f2530 | ||
|
|
5d02b09a0d | ||
|
|
6f12d2cb6f | ||
|
|
f0db82d715 | ||
|
|
4d9e2838fe | ||
|
|
382f0e8aef | ||
|
|
3b3db2a9bc | ||
|
|
7895bc9007 | ||
|
|
81b7e54b18 | ||
|
|
d203f20b7f | ||
|
|
fea8159ccf | ||
|
|
aeb8e59da4 | ||
|
|
bee0a4482e | ||
|
|
37cc078144 | ||
|
|
20f993e891 | ||
|
|
d220fa801f | ||
|
|
abeee95eb1 | ||
|
|
50d31202ae | ||
|
|
bac072a985 | ||
|
|
2fc6a7dcab | ||
|
|
fa490210cd | ||
|
|
ba82eb0620 | ||
|
|
11e3fa0bb7 | ||
|
|
9466111d56 | ||
|
|
5ec3bea0dd | ||
|
|
72159a0ae2 | ||
|
|
0a7b26187d | ||
|
|
2dc1f9224e |
@@ -1,6 +1,7 @@
|
||||
# General files
|
||||
.git
|
||||
.github
|
||||
.gitkeep
|
||||
.devcontainer
|
||||
.vscode
|
||||
|
||||
|
||||
89
.github/workflows/builder.yml
vendored
89
.github/workflows/builder.yml
vendored
@@ -53,10 +53,10 @@ jobs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
channel: ${{ steps.version.outputs.channel }}
|
||||
publish: ${{ steps.version.outputs.publish }}
|
||||
requirements: ${{ steps.requirements.outputs.changed }}
|
||||
build_wheels: ${{ steps.requirements.outputs.build_wheels }}
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -72,20 +72,25 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed_files
|
||||
if: steps.version.outputs.publish == 'false'
|
||||
if: github.event_name != 'release'
|
||||
uses: masesgroup/retrieve-changed-files@491e80760c0e28d36ca6240a27b1ccb8e1402c13 # v3.0.0
|
||||
|
||||
- name: Check if requirements files changed
|
||||
id: requirements
|
||||
run: |
|
||||
if [[ "${{ steps.changed_files.outputs.all }}" =~ (requirements.txt|build.yaml) ]]; then
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
# No wheels build necessary for releases
|
||||
if [[ "${{ github.event_name }}" == "release" ]]; then
|
||||
echo "build_wheels=false" >> "$GITHUB_OUTPUT"
|
||||
elif [[ "${{ steps.changed_files.outputs.all }}" =~ (requirements\.txt|build\.yaml|\.github/workflows/builder\.yml) ]]; then
|
||||
echo "build_wheels=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "build_wheels=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
build:
|
||||
name: Build ${{ matrix.arch }} supervisor
|
||||
needs: init
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
@@ -93,34 +98,66 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
arch: ${{ fromJson(needs.init.outputs.architectures) }}
|
||||
include:
|
||||
- runs-on: ubuntu-24.04
|
||||
- runs-on: ubuntu-24.04-arm
|
||||
arch: aarch64
|
||||
env:
|
||||
WHEELS_ABI: cp313
|
||||
WHEELS_TAG: musllinux_1_2
|
||||
WHEELS_APK_DEPS: "libffi-dev;openssl-dev;yaml-dev"
|
||||
WHEELS_SKIP_BINARY: aiohttp
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Write env-file
|
||||
if: needs.init.outputs.requirements == 'true'
|
||||
- name: Write env-file for wheels build
|
||||
if: needs.init.outputs.build_wheels == 'true'
|
||||
run: |
|
||||
(
|
||||
# Fix out of memory issues with rust
|
||||
echo "CARGO_NET_GIT_FETCH_WITH_CLI=true"
|
||||
) > .env_file
|
||||
|
||||
# home-assistant/wheels doesn't support sha pinning
|
||||
- name: Build wheels
|
||||
if: needs.init.outputs.requirements == 'true'
|
||||
uses: home-assistant/wheels@2025.11.0
|
||||
- name: Build and publish wheels
|
||||
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'true'
|
||||
uses: home-assistant/wheels@e5742a69d69f0e274e2689c998900c7d19652c21 # 2025.12.0
|
||||
with:
|
||||
abi: cp313
|
||||
tag: musllinux_1_2
|
||||
arch: ${{ matrix.arch }}
|
||||
wheels-key: ${{ secrets.WHEELS_KEY }}
|
||||
apk: "libffi-dev;openssl-dev;yaml-dev"
|
||||
skip-binary: aiohttp
|
||||
abi: ${{ env.WHEELS_ABI }}
|
||||
tag: ${{ env.WHEELS_TAG }}
|
||||
arch: ${{ matrix.arch }}
|
||||
apk: ${{ env.WHEELS_APK_DEPS }}
|
||||
skip-binary: ${{ env.WHEELS_SKIP_BINARY }}
|
||||
env-file: true
|
||||
requirements: "requirements.txt"
|
||||
|
||||
- name: Build local wheels
|
||||
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
|
||||
uses: home-assistant/wheels@e5742a69d69f0e274e2689c998900c7d19652c21 # 2025.12.0
|
||||
with:
|
||||
wheels-host: ""
|
||||
wheels-user: ""
|
||||
wheels-key: ""
|
||||
local-wheels-repo-path: "wheels/"
|
||||
abi: ${{ env.WHEELS_ABI }}
|
||||
tag: ${{ env.WHEELS_TAG }}
|
||||
arch: ${{ matrix.arch }}
|
||||
apk: ${{ env.WHEELS_APK_DEPS }}
|
||||
skip-binary: ${{ env.WHEELS_SKIP_BINARY }}
|
||||
env-file: true
|
||||
requirements: "requirements.txt"
|
||||
|
||||
- name: Upload local wheels artifact
|
||||
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: wheels-${{ matrix.arch }}
|
||||
path: wheels
|
||||
retention-days: 1
|
||||
|
||||
- name: Set version
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: home-assistant/actions/helpers/version@master
|
||||
@@ -165,8 +202,9 @@ jobs:
|
||||
|
||||
# home-assistant/builder doesn't support sha pinning
|
||||
- name: Build supervisor
|
||||
uses: home-assistant/builder@2025.09.0
|
||||
uses: home-assistant/builder@2025.11.0
|
||||
with:
|
||||
image: ${{ matrix.arch }}
|
||||
args: |
|
||||
$BUILD_ARGS \
|
||||
--${{ matrix.arch }} \
|
||||
@@ -181,7 +219,7 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Initialize git
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
@@ -206,12 +244,19 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Download local wheels artifact
|
||||
if: needs.init.outputs.build_wheels == 'true' && needs.init.outputs.publish == 'false'
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: wheels-amd64
|
||||
path: wheels
|
||||
|
||||
# home-assistant/builder doesn't support sha pinning
|
||||
- name: Build the Supervisor
|
||||
if: needs.init.outputs.publish != 'true'
|
||||
uses: home-assistant/builder@2025.09.0
|
||||
uses: home-assistant/builder@2025.11.0
|
||||
with:
|
||||
args: |
|
||||
--test \
|
||||
|
||||
22
.github/workflows/ci.yaml
vendored
22
.github/workflows/ci.yaml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
name: Prepare Python dependencies
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Set up Python
|
||||
id: python
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -111,7 +111,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -154,7 +154,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Register hadolint problem matcher
|
||||
run: |
|
||||
echo "::add-matcher::.github/workflows/matchers/hadolint.json"
|
||||
@@ -169,7 +169,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -213,7 +213,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -257,7 +257,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -293,7 +293,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -339,7 +339,7 @@ jobs:
|
||||
name: Run tests Python ${{ needs.prepare.outputs.python-version }}
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -398,7 +398,7 @@ jobs:
|
||||
needs: ["pytest", "prepare"]
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
id: python
|
||||
@@ -428,4 +428,4 @@ jobs:
|
||||
coverage report
|
||||
coverage xml
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
|
||||
2
.github/workflows/release-drafter.yml
vendored
2
.github/workflows/release-drafter.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Release Drafter
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
2
.github/workflows/sentry.yaml
vendored
2
.github/workflows/sentry.yaml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Sentry Release
|
||||
uses: getsentry/action-release@128c5058bbbe93c8e02147fe0a9c713f166259a6 # v3.4.0
|
||||
env:
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
days-before-stale: 30
|
||||
|
||||
6
.github/workflows/update_frontend.yml
vendored
6
.github/workflows/update_frontend.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
latest_version: ${{ steps.latest_frontend_version.outputs.latest_tag }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Get latest frontend release
|
||||
id: latest_frontend_version
|
||||
uses: abatilo/release-info-action@32cb932219f1cee3fc4f4a298fd65ead5d35b661 # v1.3.3
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
if: needs.check-version.outputs.skip != 'true'
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Clear www folder
|
||||
run: |
|
||||
rm -rf supervisor/api/panel/*
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
run: |
|
||||
rm -f supervisor/api/panel/home_assistant_frontend_supervisor-*.tar.gz
|
||||
- name: Create PR
|
||||
uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
commit-message: "Update frontend to version ${{ needs.check-version.outputs.latest_version }}"
|
||||
branch: autoupdate-frontend
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -24,6 +24,9 @@ var/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# Local wheels
|
||||
wheels/**/*.whl
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
@@ -102,4 +105,4 @@ ENV/
|
||||
/.dmypy.json
|
||||
|
||||
# Mac
|
||||
.DS_Store
|
||||
.DS_Store
|
||||
|
||||
22
Dockerfile
22
Dockerfile
@@ -8,9 +8,7 @@ ENV \
|
||||
UV_SYSTEM_PYTHON=true
|
||||
|
||||
ARG \
|
||||
COSIGN_VERSION \
|
||||
BUILD_ARCH \
|
||||
QEMU_CPU
|
||||
COSIGN_VERSION
|
||||
|
||||
# Install base
|
||||
WORKDIR /usr/src
|
||||
@@ -32,15 +30,19 @@ RUN \
|
||||
&& pip3 install uv==0.8.9
|
||||
|
||||
# Install requirements
|
||||
COPY requirements.txt .
|
||||
RUN \
|
||||
if [ "${BUILD_ARCH}" = "i386" ]; then \
|
||||
setarch="linux32"; \
|
||||
--mount=type=bind,source=./requirements.txt,target=/usr/src/requirements.txt \
|
||||
--mount=type=bind,source=./wheels,target=/usr/src/wheels \
|
||||
if ls /usr/src/wheels/musllinux/* >/dev/null 2>&1; then \
|
||||
LOCAL_WHEELS=/usr/src/wheels/musllinux; \
|
||||
echo "Using local wheels from: $LOCAL_WHEELS"; \
|
||||
else \
|
||||
setarch=""; \
|
||||
fi \
|
||||
&& ${setarch} uv pip install --compile-bytecode --no-cache --no-build -r requirements.txt \
|
||||
&& rm -f requirements.txt
|
||||
LOCAL_WHEELS=; \
|
||||
echo "No local wheels found"; \
|
||||
fi && \
|
||||
uv pip install --compile-bytecode --no-cache --no-build \
|
||||
-r requirements.txt \
|
||||
${LOCAL_WHEELS:+--find-links $LOCAL_WHEELS}
|
||||
|
||||
# Install Home Assistant Supervisor
|
||||
COPY . supervisor
|
||||
|
||||
@@ -321,8 +321,6 @@ lint.ignore = [
|
||||
"PLW2901", # Outer {outer_kind} variable {name} overwritten by inner {inner_kind} target
|
||||
"UP006", # keep type annotation style as is
|
||||
"UP007", # keep type annotation style as is
|
||||
# Ignored due to performance: https://github.com/charliermarsh/ruff/issues/2923
|
||||
"UP038", # Use `X | Y` in `isinstance` call instead of `(X, Y)`
|
||||
|
||||
# May conflict with the formatter, https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules
|
||||
"W191",
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
aiodns==3.5.0
|
||||
aiodns==3.6.1
|
||||
aiodocker==0.24.0
|
||||
aiohttp==3.13.2
|
||||
atomicwrites-homeassistant==1.4.1
|
||||
attrs==25.4.0
|
||||
awesomeversion==25.8.0
|
||||
backports.zstd==1.1.0
|
||||
blockbuster==1.5.25
|
||||
backports.zstd==1.2.0
|
||||
blockbuster==1.5.26
|
||||
brotli==1.2.0
|
||||
ciso8601==2.3.3
|
||||
colorlog==6.10.1
|
||||
@@ -19,13 +19,13 @@ faust-cchardet==2.1.19
|
||||
gitpython==3.1.45
|
||||
jinja2==3.1.6
|
||||
log-rate-limit==1.4.2
|
||||
orjson==3.11.4
|
||||
orjson==3.11.5
|
||||
pulsectl==24.12.0
|
||||
pyudev==0.24.4
|
||||
PyYAML==6.0.3
|
||||
requests==2.32.5
|
||||
securetar==2025.2.1
|
||||
sentry-sdk==2.46.0
|
||||
securetar==2025.12.0
|
||||
sentry-sdk==2.47.0
|
||||
setuptools==80.9.0
|
||||
voluptuous==0.15.2
|
||||
dbus-fast==3.1.2
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
astroid==4.0.2
|
||||
coverage==7.12.0
|
||||
mypy==1.18.2
|
||||
coverage==7.13.0
|
||||
mypy==1.19.0
|
||||
pre-commit==4.5.0
|
||||
pylint==4.0.3
|
||||
pylint==4.0.4
|
||||
pytest-aiohttp==1.1.0
|
||||
pytest-asyncio==1.3.0
|
||||
pytest-cov==7.0.0
|
||||
pytest-timeout==2.4.0
|
||||
pytest==9.0.1
|
||||
ruff==0.14.6
|
||||
pytest==9.0.2
|
||||
ruff==0.14.8
|
||||
time-machine==3.1.0
|
||||
types-docker==7.1.0.20251127
|
||||
types-docker==7.1.0.20251202
|
||||
types-pyyaml==6.0.12.20250915
|
||||
types-requests==2.32.4.20250913
|
||||
urllib3==2.5.0
|
||||
urllib3==2.6.1
|
||||
|
||||
@@ -66,13 +66,22 @@ from ..docker.const import ContainerState
|
||||
from ..docker.monitor import DockerContainerStateEvent
|
||||
from ..docker.stats import DockerStats
|
||||
from ..exceptions import (
|
||||
AddonConfigurationError,
|
||||
AddonBackupMetadataInvalidError,
|
||||
AddonBuildFailedUnknownError,
|
||||
AddonConfigurationInvalidError,
|
||||
AddonNotRunningError,
|
||||
AddonNotSupportedError,
|
||||
AddonNotSupportedWriteStdinError,
|
||||
AddonPrePostBackupCommandReturnedError,
|
||||
AddonsError,
|
||||
AddonsJobError,
|
||||
AddonUnknownError,
|
||||
BackupRestoreUnknownError,
|
||||
ConfigurationFileError,
|
||||
DockerBuildError,
|
||||
DockerError,
|
||||
HostAppArmorError,
|
||||
StoreAddonNotFoundError,
|
||||
)
|
||||
from ..hardware.data import Device
|
||||
from ..homeassistant.const import WSEvent
|
||||
@@ -235,7 +244,7 @@ class Addon(AddonModel):
|
||||
await self.instance.check_image(self.version, default_image, self.arch)
|
||||
except DockerError:
|
||||
_LOGGER.info("No %s addon Docker image %s found", self.slug, self.image)
|
||||
with suppress(DockerError):
|
||||
with suppress(DockerError, AddonNotSupportedError):
|
||||
await self.instance.install(self.version, default_image, arch=self.arch)
|
||||
|
||||
self.persist[ATTR_IMAGE] = default_image
|
||||
@@ -718,18 +727,16 @@ class Addon(AddonModel):
|
||||
options = self.schema.validate(self.options)
|
||||
await self.sys_run_in_executor(write_json_file, self.path_options, options)
|
||||
except vol.Invalid as ex:
|
||||
_LOGGER.error(
|
||||
"Add-on %s has invalid options: %s",
|
||||
self.slug,
|
||||
humanize_error(self.options, ex),
|
||||
)
|
||||
except ConfigurationFileError:
|
||||
raise AddonConfigurationInvalidError(
|
||||
_LOGGER.error,
|
||||
addon=self.slug,
|
||||
validation_error=humanize_error(self.options, ex),
|
||||
) from None
|
||||
except ConfigurationFileError as err:
|
||||
_LOGGER.error("Add-on %s can't write options", self.slug)
|
||||
else:
|
||||
_LOGGER.debug("Add-on %s write options: %s", self.slug, options)
|
||||
return
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
raise AddonConfigurationError()
|
||||
_LOGGER.debug("Add-on %s write options: %s", self.slug, options)
|
||||
|
||||
@Job(
|
||||
name="addon_unload",
|
||||
@@ -772,7 +779,7 @@ class Addon(AddonModel):
|
||||
async def install(self) -> None:
|
||||
"""Install and setup this addon."""
|
||||
if not self.addon_store:
|
||||
raise AddonsError("Missing from store, cannot install!")
|
||||
raise StoreAddonNotFoundError(addon=self.slug)
|
||||
|
||||
await self.sys_addons.data.install(self.addon_store)
|
||||
|
||||
@@ -793,9 +800,17 @@ class Addon(AddonModel):
|
||||
await self.instance.install(
|
||||
self.latest_version, self.addon_store.image, arch=self.arch
|
||||
)
|
||||
except DockerError as err:
|
||||
except AddonsError:
|
||||
await self.sys_addons.data.uninstall(self)
|
||||
raise AddonsError() from err
|
||||
raise
|
||||
except DockerBuildError as err:
|
||||
_LOGGER.error("Could not build image for addon %s: %s", self.slug, err)
|
||||
await self.sys_addons.data.uninstall(self)
|
||||
raise AddonBuildFailedUnknownError(addon=self.slug) from err
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Could not pull image to update addon %s: %s", self.slug, err)
|
||||
await self.sys_addons.data.uninstall(self)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
# Finish initialization and set up listeners
|
||||
await self.load()
|
||||
@@ -819,7 +834,8 @@ class Addon(AddonModel):
|
||||
try:
|
||||
await self.instance.remove(remove_image=remove_image)
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
_LOGGER.error("Could not remove image for addon %s: %s", self.slug, err)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
self.state = AddonState.UNKNOWN
|
||||
|
||||
@@ -884,7 +900,7 @@ class Addon(AddonModel):
|
||||
if it was running. Else nothing is returned.
|
||||
"""
|
||||
if not self.addon_store:
|
||||
raise AddonsError("Missing from store, cannot update!")
|
||||
raise StoreAddonNotFoundError(addon=self.slug)
|
||||
|
||||
old_image = self.image
|
||||
# Cache data to prevent races with other updates to global
|
||||
@@ -892,8 +908,12 @@ class Addon(AddonModel):
|
||||
|
||||
try:
|
||||
await self.instance.update(store.version, store.image, arch=self.arch)
|
||||
except DockerBuildError as err:
|
||||
_LOGGER.error("Could not build image for addon %s: %s", self.slug, err)
|
||||
raise AddonBuildFailedUnknownError(addon=self.slug) from err
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
_LOGGER.error("Could not pull image to update addon %s: %s", self.slug, err)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
# Stop the addon if running
|
||||
if (last_state := self.state) in {AddonState.STARTED, AddonState.STARTUP}:
|
||||
@@ -935,12 +955,23 @@ class Addon(AddonModel):
|
||||
"""
|
||||
last_state: AddonState = self.state
|
||||
try:
|
||||
# remove docker container but not addon config
|
||||
# remove docker container and image but not addon config
|
||||
try:
|
||||
await self.instance.remove()
|
||||
await self.instance.install(self.version)
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
_LOGGER.error("Could not remove image for addon %s: %s", self.slug, err)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
try:
|
||||
await self.instance.install(self.version)
|
||||
except DockerBuildError as err:
|
||||
_LOGGER.error("Could not build image for addon %s: %s", self.slug, err)
|
||||
raise AddonBuildFailedUnknownError(addon=self.slug) from err
|
||||
except DockerError as err:
|
||||
_LOGGER.error(
|
||||
"Could not pull image to update addon %s: %s", self.slug, err
|
||||
)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
if self.addon_store:
|
||||
await self.sys_addons.data.update(self.addon_store)
|
||||
@@ -1111,8 +1142,9 @@ class Addon(AddonModel):
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Could not start container for addon %s: %s", self.slug, err)
|
||||
self.state = AddonState.ERROR
|
||||
raise AddonsError() from err
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
return self.sys_create_task(self._wait_for_startup())
|
||||
|
||||
@@ -1127,8 +1159,9 @@ class Addon(AddonModel):
|
||||
try:
|
||||
await self.instance.stop()
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Could not stop container for addon %s: %s", self.slug, err)
|
||||
self.state = AddonState.ERROR
|
||||
raise AddonsError() from err
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
@Job(
|
||||
name="addon_restart",
|
||||
@@ -1161,9 +1194,15 @@ class Addon(AddonModel):
|
||||
async def stats(self) -> DockerStats:
|
||||
"""Return stats of container."""
|
||||
try:
|
||||
if not await self.is_running():
|
||||
raise AddonNotRunningError(_LOGGER.warning, addon=self.slug)
|
||||
|
||||
return await self.instance.stats()
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
_LOGGER.error(
|
||||
"Could not get stats of container for addon %s: %s", self.slug, err
|
||||
)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
@Job(
|
||||
name="addon_write_stdin",
|
||||
@@ -1173,14 +1212,18 @@ class Addon(AddonModel):
|
||||
async def write_stdin(self, data) -> None:
|
||||
"""Write data to add-on stdin."""
|
||||
if not self.with_stdin:
|
||||
raise AddonNotSupportedError(
|
||||
f"Add-on {self.slug} does not support writing to stdin!", _LOGGER.error
|
||||
)
|
||||
raise AddonNotSupportedWriteStdinError(_LOGGER.error, addon=self.slug)
|
||||
|
||||
try:
|
||||
return await self.instance.write_stdin(data)
|
||||
if not await self.is_running():
|
||||
raise AddonNotRunningError(_LOGGER.warning, addon=self.slug)
|
||||
|
||||
await self.instance.write_stdin(data)
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
_LOGGER.error(
|
||||
"Could not write stdin to container for addon %s: %s", self.slug, err
|
||||
)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
async def _backup_command(self, command: str) -> None:
|
||||
try:
|
||||
@@ -1189,15 +1232,14 @@ class Addon(AddonModel):
|
||||
_LOGGER.debug(
|
||||
"Pre-/Post backup command failed with: %s", command_return.output
|
||||
)
|
||||
raise AddonsError(
|
||||
f"Pre-/Post backup command returned error code: {command_return.exit_code}",
|
||||
_LOGGER.error,
|
||||
raise AddonPrePostBackupCommandReturnedError(
|
||||
_LOGGER.error, addon=self.slug, exit_code=command_return.exit_code
|
||||
)
|
||||
except DockerError as err:
|
||||
raise AddonsError(
|
||||
f"Failed running pre-/post backup command {command}: {str(err)}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
_LOGGER.error(
|
||||
"Failed running pre-/post backup command %s: %s", command, err
|
||||
)
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
@Job(
|
||||
name="addon_begin_backup",
|
||||
@@ -1286,15 +1328,14 @@ class Addon(AddonModel):
|
||||
try:
|
||||
self.instance.export_image(temp_path.joinpath("image.tar"))
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
raise BackupRestoreUnknownError() from err
|
||||
|
||||
# Store local configs/state
|
||||
try:
|
||||
write_json_file(temp_path.joinpath("addon.json"), metadata)
|
||||
except ConfigurationFileError as err:
|
||||
raise AddonsError(
|
||||
f"Can't save meta for {self.slug}", _LOGGER.error
|
||||
) from err
|
||||
_LOGGER.error("Can't save meta for %s: %s", self.slug, err)
|
||||
raise BackupRestoreUnknownError() from err
|
||||
|
||||
# Store AppArmor Profile
|
||||
if apparmor_profile:
|
||||
@@ -1304,9 +1345,7 @@ class Addon(AddonModel):
|
||||
apparmor_profile, profile_backup_file
|
||||
)
|
||||
except HostAppArmorError as err:
|
||||
raise AddonsError(
|
||||
"Can't backup AppArmor profile", _LOGGER.error
|
||||
) from err
|
||||
raise BackupRestoreUnknownError() from err
|
||||
|
||||
# Write tarfile
|
||||
with tar_file as backup:
|
||||
@@ -1360,7 +1399,8 @@ class Addon(AddonModel):
|
||||
)
|
||||
_LOGGER.info("Finish backup for addon %s", self.slug)
|
||||
except (tarfile.TarError, OSError, AddFileError) as err:
|
||||
raise AddonsError(f"Can't write tarfile: {err}", _LOGGER.error) from err
|
||||
_LOGGER.error("Can't write backup tarfile for addon %s: %s", self.slug, err)
|
||||
raise BackupRestoreUnknownError() from err
|
||||
finally:
|
||||
if was_running:
|
||||
wait_for_start = await self.end_backup()
|
||||
@@ -1402,28 +1442,24 @@ class Addon(AddonModel):
|
||||
try:
|
||||
tmp, data = await self.sys_run_in_executor(_extract_tarfile)
|
||||
except tarfile.TarError as err:
|
||||
raise AddonsError(
|
||||
f"Can't read tarfile {tar_file}: {err}", _LOGGER.error
|
||||
) from err
|
||||
_LOGGER.error("Can't extract backup tarfile for %s: %s", self.slug, err)
|
||||
raise BackupRestoreUnknownError() from err
|
||||
except ConfigurationFileError as err:
|
||||
raise AddonsError() from err
|
||||
raise AddonUnknownError(addon=self.slug) from err
|
||||
|
||||
try:
|
||||
# Validate
|
||||
try:
|
||||
data = SCHEMA_ADDON_BACKUP(data)
|
||||
except vol.Invalid as err:
|
||||
raise AddonsError(
|
||||
f"Can't validate {self.slug}, backup data: {humanize_error(data, err)}",
|
||||
raise AddonBackupMetadataInvalidError(
|
||||
_LOGGER.error,
|
||||
addon=self.slug,
|
||||
validation_error=humanize_error(data, err),
|
||||
) from err
|
||||
|
||||
# If available
|
||||
if not self._available(data[ATTR_SYSTEM]):
|
||||
raise AddonNotSupportedError(
|
||||
f"Add-on {self.slug} is not available for this platform",
|
||||
_LOGGER.error,
|
||||
)
|
||||
# Validate availability. Raises if not
|
||||
self._validate_availability(data[ATTR_SYSTEM], logger=_LOGGER.error)
|
||||
|
||||
# Restore local add-on information
|
||||
_LOGGER.info("Restore config for addon %s", self.slug)
|
||||
@@ -1482,9 +1518,10 @@ class Addon(AddonModel):
|
||||
try:
|
||||
await self.sys_run_in_executor(_restore_data)
|
||||
except shutil.Error as err:
|
||||
raise AddonsError(
|
||||
f"Can't restore origin data: {err}", _LOGGER.error
|
||||
) from err
|
||||
_LOGGER.error(
|
||||
"Can't restore origin data for %s: %s", self.slug, err
|
||||
)
|
||||
raise BackupRestoreUnknownError() from err
|
||||
|
||||
# Restore AppArmor
|
||||
profile_file = Path(tmp.name, "apparmor.txt")
|
||||
@@ -1495,10 +1532,11 @@ class Addon(AddonModel):
|
||||
)
|
||||
except HostAppArmorError as err:
|
||||
_LOGGER.error(
|
||||
"Can't restore AppArmor profile for add-on %s",
|
||||
"Can't restore AppArmor profile for add-on %s: %s",
|
||||
self.slug,
|
||||
err,
|
||||
)
|
||||
raise AddonsError() from err
|
||||
raise BackupRestoreUnknownError() from err
|
||||
|
||||
finally:
|
||||
# Is add-on loaded
|
||||
|
||||
@@ -5,6 +5,7 @@ from __future__ import annotations
|
||||
import base64
|
||||
from functools import cached_property
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
@@ -20,17 +21,25 @@ from ..const import (
|
||||
FILE_SUFFIX_CONFIGURATION,
|
||||
META_ADDON,
|
||||
SOCKET_DOCKER,
|
||||
CpuArch,
|
||||
)
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..docker.const import DOCKER_HUB
|
||||
from ..docker.const import DOCKER_HUB, DOCKER_HUB_LEGACY
|
||||
from ..docker.interface import MAP_ARCH
|
||||
from ..exceptions import ConfigurationFileError, HassioArchNotFound
|
||||
from ..exceptions import (
|
||||
AddonBuildArchitectureNotSupportedError,
|
||||
AddonBuildDockerfileMissingError,
|
||||
ConfigurationFileError,
|
||||
HassioArchNotFound,
|
||||
)
|
||||
from ..utils.common import FileConfiguration, find_one_filetype
|
||||
from .validate import SCHEMA_BUILD_CONFIG
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .manager import AnyAddon
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
"""Handle build options for add-ons."""
|
||||
@@ -67,7 +76,7 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
raise RuntimeError()
|
||||
|
||||
@cached_property
|
||||
def arch(self) -> str:
|
||||
def arch(self) -> CpuArch:
|
||||
"""Return arch of the add-on."""
|
||||
return self.sys_arch.match([self.addon.arch])
|
||||
|
||||
@@ -111,7 +120,7 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
return self.addon.path_location.joinpath(f"Dockerfile.{self.arch}")
|
||||
return self.addon.path_location.joinpath("Dockerfile")
|
||||
|
||||
async def is_valid(self) -> bool:
|
||||
async def is_valid(self) -> None:
|
||||
"""Return true if the build env is valid."""
|
||||
|
||||
def build_is_valid() -> bool:
|
||||
@@ -123,9 +132,17 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
)
|
||||
|
||||
try:
|
||||
return await self.sys_run_in_executor(build_is_valid)
|
||||
if not await self.sys_run_in_executor(build_is_valid):
|
||||
raise AddonBuildDockerfileMissingError(
|
||||
_LOGGER.error, addon=self.addon.slug
|
||||
)
|
||||
except HassioArchNotFound:
|
||||
return False
|
||||
raise AddonBuildArchitectureNotSupportedError(
|
||||
_LOGGER.error,
|
||||
addon=self.addon.slug,
|
||||
addon_arch_list=self.addon.supported_arch,
|
||||
system_arch_list=[arch.value for arch in self.sys_arch.supported],
|
||||
) from None
|
||||
|
||||
def get_docker_config_json(self) -> str | None:
|
||||
"""Generate Docker config.json content with registry credentials for base image.
|
||||
@@ -154,8 +171,11 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
|
||||
# Use the actual registry URL for the key
|
||||
# Docker Hub uses "https://index.docker.io/v1/" as the key
|
||||
# Support both docker.io (official) and hub.docker.com (legacy)
|
||||
registry_key = (
|
||||
"https://index.docker.io/v1/" if registry == DOCKER_HUB else registry
|
||||
"https://index.docker.io/v1/"
|
||||
if registry in (DOCKER_HUB, DOCKER_HUB_LEGACY)
|
||||
else registry
|
||||
)
|
||||
|
||||
config = {"auths": {registry_key: {"auth": auth_string}}}
|
||||
|
||||
@@ -87,6 +87,7 @@ from ..const import (
|
||||
AddonBootConfig,
|
||||
AddonStage,
|
||||
AddonStartup,
|
||||
CpuArch,
|
||||
)
|
||||
from ..coresys import CoreSys
|
||||
from ..docker.const import Capabilities
|
||||
@@ -315,12 +316,12 @@ class AddonModel(JobGroup, ABC):
|
||||
|
||||
@property
|
||||
def panel_title(self) -> str:
|
||||
"""Return panel icon for Ingress frame."""
|
||||
"""Return panel title for Ingress frame."""
|
||||
return self.data.get(ATTR_PANEL_TITLE, self.name)
|
||||
|
||||
@property
|
||||
def panel_admin(self) -> str:
|
||||
"""Return panel icon for Ingress frame."""
|
||||
def panel_admin(self) -> bool:
|
||||
"""Return if panel is only available for admin users."""
|
||||
return self.data[ATTR_PANEL_ADMIN]
|
||||
|
||||
@property
|
||||
@@ -488,7 +489,7 @@ class AddonModel(JobGroup, ABC):
|
||||
return self.data[ATTR_DEVICETREE]
|
||||
|
||||
@property
|
||||
def with_tmpfs(self) -> str | None:
|
||||
def with_tmpfs(self) -> bool:
|
||||
"""Return if tmp is in memory of add-on."""
|
||||
return self.data[ATTR_TMPFS]
|
||||
|
||||
@@ -508,7 +509,7 @@ class AddonModel(JobGroup, ABC):
|
||||
return self.data[ATTR_VIDEO]
|
||||
|
||||
@property
|
||||
def homeassistant_version(self) -> str | None:
|
||||
def homeassistant_version(self) -> AwesomeVersion | None:
|
||||
"""Return min Home Assistant version they needed by Add-on."""
|
||||
return self.data.get(ATTR_HOMEASSISTANT)
|
||||
|
||||
@@ -548,7 +549,7 @@ class AddonModel(JobGroup, ABC):
|
||||
return self.data.get(ATTR_MACHINE, [])
|
||||
|
||||
@property
|
||||
def arch(self) -> str:
|
||||
def arch(self) -> CpuArch:
|
||||
"""Return architecture to use for the addon's image."""
|
||||
if ATTR_IMAGE in self.data:
|
||||
return self.sys_arch.match(self.data[ATTR_ARCH])
|
||||
|
||||
@@ -75,7 +75,7 @@ class AddonOptions(CoreSysAttributes):
|
||||
"""Create a schema for add-on options."""
|
||||
return vol.Schema(vol.All(dict, self))
|
||||
|
||||
def __call__(self, struct):
|
||||
def __call__(self, struct: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Create schema validator for add-ons options."""
|
||||
options = {}
|
||||
|
||||
@@ -193,9 +193,7 @@ class AddonOptions(CoreSysAttributes):
|
||||
f"Fatal error for option '{key}' with type '{typ}' in {self._name} ({self._slug})"
|
||||
) from None
|
||||
|
||||
def _nested_validate_list(
|
||||
self, typ: Any, data_list: list[Any], key: str
|
||||
) -> list[Any]:
|
||||
def _nested_validate_list(self, typ: Any, data_list: Any, key: str) -> list[Any]:
|
||||
"""Validate nested items."""
|
||||
options = []
|
||||
|
||||
@@ -213,7 +211,7 @@ class AddonOptions(CoreSysAttributes):
|
||||
return options
|
||||
|
||||
def _nested_validate_dict(
|
||||
self, typ: dict[Any, Any], data_dict: dict[Any, Any], key: str
|
||||
self, typ: dict[Any, Any], data_dict: Any, key: str
|
||||
) -> dict[Any, Any]:
|
||||
"""Validate nested items."""
|
||||
options = {}
|
||||
@@ -264,7 +262,7 @@ class UiOptions(CoreSysAttributes):
|
||||
|
||||
def __init__(self, coresys: CoreSys) -> None:
|
||||
"""Initialize UI option render."""
|
||||
self.coresys = coresys
|
||||
self.coresys: CoreSys = coresys
|
||||
|
||||
def __call__(self, raw_schema: dict[str, Any]) -> list[dict[str, Any]]:
|
||||
"""Generate UI schema."""
|
||||
@@ -279,10 +277,10 @@ class UiOptions(CoreSysAttributes):
|
||||
def _ui_schema_element(
|
||||
self,
|
||||
ui_schema: list[dict[str, Any]],
|
||||
value: str,
|
||||
value: str | list[Any] | dict[str, Any],
|
||||
key: str,
|
||||
multiple: bool = False,
|
||||
):
|
||||
) -> None:
|
||||
if isinstance(value, list):
|
||||
# nested value list
|
||||
assert not multiple
|
||||
|
||||
@@ -100,6 +100,9 @@ from ..const import (
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..docker.stats import DockerStats
|
||||
from ..exceptions import (
|
||||
AddonBootConfigCannotChangeError,
|
||||
AddonConfigurationInvalidError,
|
||||
AddonNotSupportedWriteStdinError,
|
||||
APIAddonNotInstalled,
|
||||
APIError,
|
||||
APIForbidden,
|
||||
@@ -125,6 +128,7 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
|
||||
vol.Optional(ATTR_INGRESS_PANEL): vol.Boolean(),
|
||||
vol.Optional(ATTR_WATCHDOG): vol.Boolean(),
|
||||
vol.Optional(ATTR_OPTIONS): vol.Maybe(dict),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -300,19 +304,24 @@ class APIAddons(CoreSysAttributes):
|
||||
# Update secrets for validation
|
||||
await self.sys_homeassistant.secrets.reload()
|
||||
|
||||
# Extend schema with add-on specific validation
|
||||
addon_schema = SCHEMA_OPTIONS.extend(
|
||||
{vol.Optional(ATTR_OPTIONS): vol.Maybe(addon.schema)}
|
||||
)
|
||||
|
||||
# Validate/Process Body
|
||||
body = await api_validate(addon_schema, request)
|
||||
body = await api_validate(SCHEMA_OPTIONS, request)
|
||||
if ATTR_OPTIONS in body:
|
||||
addon.options = body[ATTR_OPTIONS]
|
||||
# None resets options to defaults, otherwise validate the options
|
||||
if body[ATTR_OPTIONS] is None:
|
||||
addon.options = None
|
||||
else:
|
||||
try:
|
||||
addon.options = addon.schema(body[ATTR_OPTIONS])
|
||||
except vol.Invalid as ex:
|
||||
raise AddonConfigurationInvalidError(
|
||||
addon=addon.slug,
|
||||
validation_error=humanize_error(body[ATTR_OPTIONS], ex),
|
||||
) from None
|
||||
if ATTR_BOOT in body:
|
||||
if addon.boot_config == AddonBootConfig.MANUAL_ONLY:
|
||||
raise APIError(
|
||||
f"Addon {addon.slug} boot option is set to {addon.boot_config} so it cannot be changed"
|
||||
raise AddonBootConfigCannotChangeError(
|
||||
addon=addon.slug, boot_config=addon.boot_config.value
|
||||
)
|
||||
addon.boot = body[ATTR_BOOT]
|
||||
if ATTR_AUTO_UPDATE in body:
|
||||
@@ -385,7 +394,7 @@ class APIAddons(CoreSysAttributes):
|
||||
return data
|
||||
|
||||
@api_process
|
||||
async def options_config(self, request: web.Request) -> None:
|
||||
async def options_config(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Validate user options for add-on."""
|
||||
slug: str = request.match_info["addon"]
|
||||
if slug != "self":
|
||||
@@ -430,11 +439,11 @@ class APIAddons(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def uninstall(self, request: web.Request) -> Awaitable[None]:
|
||||
async def uninstall(self, request: web.Request) -> None:
|
||||
"""Uninstall add-on."""
|
||||
addon = self.get_addon_for_request(request)
|
||||
body: dict[str, Any] = await api_validate(SCHEMA_UNINSTALL, request)
|
||||
return await asyncio.shield(
|
||||
await asyncio.shield(
|
||||
self.sys_addons.uninstall(
|
||||
addon.slug, remove_config=body[ATTR_REMOVE_CONFIG]
|
||||
)
|
||||
@@ -476,7 +485,7 @@ class APIAddons(CoreSysAttributes):
|
||||
"""Write to stdin of add-on."""
|
||||
addon = self.get_addon_for_request(request)
|
||||
if not addon.with_stdin:
|
||||
raise APIError(f"STDIN not supported the {addon.slug} add-on")
|
||||
raise AddonNotSupportedWriteStdinError(_LOGGER.error, addon=addon.slug)
|
||||
|
||||
data = await request.read()
|
||||
await asyncio.shield(addon.write_stdin(data))
|
||||
|
||||
@@ -15,7 +15,7 @@ import voluptuous as vol
|
||||
from ..addons.addon import Addon
|
||||
from ..const import ATTR_NAME, ATTR_PASSWORD, ATTR_USERNAME, REQUEST_FROM
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIForbidden
|
||||
from ..exceptions import APIForbidden, AuthInvalidNonStringValueError
|
||||
from .const import (
|
||||
ATTR_GROUP_IDS,
|
||||
ATTR_IS_ACTIVE,
|
||||
@@ -69,7 +69,9 @@ class APIAuth(CoreSysAttributes):
|
||||
try:
|
||||
_ = username.encode and password.encode # type: ignore
|
||||
except AttributeError:
|
||||
raise HTTPUnauthorized(headers=REALM_HEADER) from None
|
||||
raise AuthInvalidNonStringValueError(
|
||||
_LOGGER.error, headers=REALM_HEADER
|
||||
) from None
|
||||
|
||||
return self.sys_auth.check_login(
|
||||
addon, cast(str, username), cast(str, password)
|
||||
|
||||
@@ -211,7 +211,7 @@ class APIBackups(CoreSysAttributes):
|
||||
await self.sys_backups.save_data()
|
||||
|
||||
@api_process
|
||||
async def reload(self, _):
|
||||
async def reload(self, _: web.Request) -> bool:
|
||||
"""Reload backup list."""
|
||||
await asyncio.shield(self.sys_backups.reload())
|
||||
return True
|
||||
@@ -421,7 +421,7 @@ class APIBackups(CoreSysAttributes):
|
||||
await self.sys_backups.remove(backup, locations=locations)
|
||||
|
||||
@api_process
|
||||
async def download(self, request: web.Request):
|
||||
async def download(self, request: web.Request) -> web.StreamResponse:
|
||||
"""Download a backup file."""
|
||||
backup = self._extract_slug(request)
|
||||
# Query will give us '' for /backups, convert value to None
|
||||
@@ -451,7 +451,7 @@ class APIBackups(CoreSysAttributes):
|
||||
return response
|
||||
|
||||
@api_process
|
||||
async def upload(self, request: web.Request):
|
||||
async def upload(self, request: web.Request) -> dict[str, str] | bool:
|
||||
"""Upload a backup file."""
|
||||
location: LOCATION_TYPE = None
|
||||
locations: list[LOCATION_TYPE] | None = None
|
||||
|
||||
@@ -46,7 +46,7 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
|
||||
SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_STORAGE_DRIVER): vol.In(["overlayfs", "overlay2"]),
|
||||
vol.Required(ATTR_STORAGE_DRIVER): vol.In(["overlayfs"]),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -55,7 +55,7 @@ class APIDocker(CoreSysAttributes):
|
||||
"""Handle RESTful API for Docker configuration."""
|
||||
|
||||
@api_process
|
||||
async def info(self, request: web.Request):
|
||||
async def info(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Get docker info."""
|
||||
data_registries = {}
|
||||
for hostname, registry in self.sys_docker.config.registries.items():
|
||||
@@ -113,7 +113,7 @@ class APIDocker(CoreSysAttributes):
|
||||
return {ATTR_REGISTRIES: data_registries}
|
||||
|
||||
@api_process
|
||||
async def create_registry(self, request: web.Request):
|
||||
async def create_registry(self, request: web.Request) -> None:
|
||||
"""Create a new docker registry."""
|
||||
body = await api_validate(SCHEMA_DOCKER_REGISTRY, request)
|
||||
|
||||
@@ -123,7 +123,7 @@ class APIDocker(CoreSysAttributes):
|
||||
await self.sys_docker.config.save_data()
|
||||
|
||||
@api_process
|
||||
async def remove_registry(self, request: web.Request):
|
||||
async def remove_registry(self, request: web.Request) -> None:
|
||||
"""Delete a docker registry."""
|
||||
hostname = request.match_info.get(ATTR_HOSTNAME)
|
||||
if hostname not in self.sys_docker.config.registries:
|
||||
|
||||
@@ -18,6 +18,7 @@ from ..const import (
|
||||
ATTR_BLK_WRITE,
|
||||
ATTR_BOOT,
|
||||
ATTR_CPU_PERCENT,
|
||||
ATTR_DUPLICATE_LOG_FILE,
|
||||
ATTR_IMAGE,
|
||||
ATTR_IP_ADDRESS,
|
||||
ATTR_JOB_ID,
|
||||
@@ -55,6 +56,7 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(str),
|
||||
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
|
||||
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE): vol.Boolean(),
|
||||
vol.Optional(ATTR_DUPLICATE_LOG_FILE): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -112,6 +114,7 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
ATTR_AUDIO_INPUT: self.sys_homeassistant.audio_input,
|
||||
ATTR_AUDIO_OUTPUT: self.sys_homeassistant.audio_output,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE: self.sys_homeassistant.backups_exclude_database,
|
||||
ATTR_DUPLICATE_LOG_FILE: self.sys_homeassistant.duplicate_log_file,
|
||||
}
|
||||
|
||||
@api_process
|
||||
@@ -151,10 +154,13 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE
|
||||
]
|
||||
|
||||
if ATTR_DUPLICATE_LOG_FILE in body:
|
||||
self.sys_homeassistant.duplicate_log_file = body[ATTR_DUPLICATE_LOG_FILE]
|
||||
|
||||
await self.sys_homeassistant.save_data()
|
||||
|
||||
@api_process
|
||||
async def stats(self, request: web.Request) -> dict[Any, str]:
|
||||
async def stats(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return resource information."""
|
||||
stats = await self.sys_homeassistant.core.stats()
|
||||
if not stats:
|
||||
@@ -191,7 +197,7 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
return await update_task
|
||||
|
||||
@api_process
|
||||
async def stop(self, request: web.Request) -> Awaitable[None]:
|
||||
async def stop(self, request: web.Request) -> None:
|
||||
"""Stop Home Assistant."""
|
||||
body = await api_validate(SCHEMA_STOP, request)
|
||||
await self._check_offline_migration(force=body[ATTR_FORCE])
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Init file for Supervisor host RESTful API."""
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
import json
|
||||
import logging
|
||||
@@ -99,7 +100,7 @@ class APIHost(CoreSysAttributes):
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def info(self, request):
|
||||
async def info(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return host information."""
|
||||
return {
|
||||
ATTR_AGENT_VERSION: self.sys_dbus.agent.version,
|
||||
@@ -128,7 +129,7 @@ class APIHost(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def options(self, request):
|
||||
async def options(self, request: web.Request) -> None:
|
||||
"""Edit host settings."""
|
||||
body = await api_validate(SCHEMA_OPTIONS, request)
|
||||
|
||||
@@ -139,7 +140,7 @@ class APIHost(CoreSysAttributes):
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def reboot(self, request):
|
||||
async def reboot(self, request: web.Request) -> None:
|
||||
"""Reboot host."""
|
||||
body = await api_validate(SCHEMA_SHUTDOWN, request)
|
||||
await self._check_ha_offline_migration(force=body[ATTR_FORCE])
|
||||
@@ -147,7 +148,7 @@ class APIHost(CoreSysAttributes):
|
||||
return await asyncio.shield(self.sys_host.control.reboot())
|
||||
|
||||
@api_process
|
||||
async def shutdown(self, request):
|
||||
async def shutdown(self, request: web.Request) -> None:
|
||||
"""Poweroff host."""
|
||||
body = await api_validate(SCHEMA_SHUTDOWN, request)
|
||||
await self._check_ha_offline_migration(force=body[ATTR_FORCE])
|
||||
@@ -155,12 +156,12 @@ class APIHost(CoreSysAttributes):
|
||||
return await asyncio.shield(self.sys_host.control.shutdown())
|
||||
|
||||
@api_process
|
||||
def reload(self, request):
|
||||
def reload(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Reload host data."""
|
||||
return asyncio.shield(self.sys_host.reload())
|
||||
|
||||
@api_process
|
||||
async def services(self, request):
|
||||
async def services(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return list of available services."""
|
||||
services = []
|
||||
for unit in self.sys_host.services:
|
||||
@@ -175,7 +176,7 @@ class APIHost(CoreSysAttributes):
|
||||
return {ATTR_SERVICES: services}
|
||||
|
||||
@api_process
|
||||
async def list_boots(self, _: web.Request):
|
||||
async def list_boots(self, _: web.Request) -> dict[str, Any]:
|
||||
"""Return a list of boot IDs."""
|
||||
boot_ids = await self.sys_host.logs.get_boot_ids()
|
||||
return {
|
||||
@@ -186,7 +187,7 @@ class APIHost(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def list_identifiers(self, _: web.Request):
|
||||
async def list_identifiers(self, _: web.Request) -> dict[str, list[str]]:
|
||||
"""Return a list of syslog identifiers."""
|
||||
return {ATTR_IDENTIFIERS: await self.sys_host.logs.get_identifiers()}
|
||||
|
||||
@@ -332,7 +333,7 @@ class APIHost(CoreSysAttributes):
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def disk_usage(self, request: web.Request) -> dict:
|
||||
async def disk_usage(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return a breakdown of storage usage for the system."""
|
||||
|
||||
max_depth = request.query.get(ATTR_MAX_DEPTH, 1)
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
"""Handle security part of this API."""
|
||||
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Awaitable, Callable
|
||||
import logging
|
||||
import re
|
||||
from typing import Final
|
||||
from urllib.parse import unquote
|
||||
|
||||
from aiohttp.web import Request, Response, middleware
|
||||
from aiohttp.web import Request, StreamResponse, middleware
|
||||
from aiohttp.web_exceptions import HTTPBadRequest, HTTPForbidden, HTTPUnauthorized
|
||||
from awesomeversion import AwesomeVersion
|
||||
|
||||
@@ -89,7 +89,7 @@ CORE_ONLY_PATHS: Final = re.compile(
|
||||
)
|
||||
|
||||
# Policy role add-on API access
|
||||
ADDONS_ROLE_ACCESS: dict[str, re.Pattern] = {
|
||||
ADDONS_ROLE_ACCESS: dict[str, re.Pattern[str]] = {
|
||||
ROLE_DEFAULT: re.compile(
|
||||
r"^(?:"
|
||||
r"|/.+/info"
|
||||
@@ -180,7 +180,9 @@ class SecurityMiddleware(CoreSysAttributes):
|
||||
return unquoted
|
||||
|
||||
@middleware
|
||||
async def block_bad_requests(self, request: Request, handler: Callable) -> Response:
|
||||
async def block_bad_requests(
|
||||
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
|
||||
) -> StreamResponse:
|
||||
"""Process request and tblock commonly known exploit attempts."""
|
||||
if FILTERS.search(self._recursive_unquote(request.path)):
|
||||
_LOGGER.warning(
|
||||
@@ -198,7 +200,9 @@ class SecurityMiddleware(CoreSysAttributes):
|
||||
return await handler(request)
|
||||
|
||||
@middleware
|
||||
async def system_validation(self, request: Request, handler: Callable) -> Response:
|
||||
async def system_validation(
|
||||
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
|
||||
) -> StreamResponse:
|
||||
"""Check if core is ready to response."""
|
||||
if self.sys_core.state not in VALID_API_STATES:
|
||||
return api_return_error(
|
||||
@@ -208,7 +212,9 @@ class SecurityMiddleware(CoreSysAttributes):
|
||||
return await handler(request)
|
||||
|
||||
@middleware
|
||||
async def token_validation(self, request: Request, handler: Callable) -> Response:
|
||||
async def token_validation(
|
||||
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
|
||||
) -> StreamResponse:
|
||||
"""Check security access of this layer."""
|
||||
request_from: CoreSysAttributes | None = None
|
||||
supervisor_token = extract_supervisor_token(request)
|
||||
@@ -279,7 +285,9 @@ class SecurityMiddleware(CoreSysAttributes):
|
||||
raise HTTPForbidden()
|
||||
|
||||
@middleware
|
||||
async def core_proxy(self, request: Request, handler: Callable) -> Response:
|
||||
async def core_proxy(
|
||||
self, request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
|
||||
) -> StreamResponse:
|
||||
"""Validate user from Core API proxy."""
|
||||
if (
|
||||
request[REQUEST_FROM] != self.sys_homeassistant
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
"""Init file for Supervisor network RESTful API."""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from ..const import (
|
||||
ATTR_AVAILABLE,
|
||||
ATTR_PROVIDERS,
|
||||
@@ -25,7 +29,7 @@ class APIServices(CoreSysAttributes):
|
||||
return service
|
||||
|
||||
@api_process
|
||||
async def list_services(self, request):
|
||||
async def list_services(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Show register services."""
|
||||
services = []
|
||||
for service in self.sys_services.list_services:
|
||||
@@ -40,7 +44,7 @@ class APIServices(CoreSysAttributes):
|
||||
return {ATTR_SERVICES: services}
|
||||
|
||||
@api_process
|
||||
async def set_service(self, request):
|
||||
async def set_service(self, request: web.Request) -> None:
|
||||
"""Write data into a service."""
|
||||
service = self._extract_service(request)
|
||||
body = await api_validate(service.schema, request)
|
||||
@@ -50,7 +54,7 @@ class APIServices(CoreSysAttributes):
|
||||
await service.set_service_data(addon, body)
|
||||
|
||||
@api_process
|
||||
async def get_service(self, request):
|
||||
async def get_service(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Read data into a service."""
|
||||
service = self._extract_service(request)
|
||||
|
||||
@@ -62,7 +66,7 @@ class APIServices(CoreSysAttributes):
|
||||
return service.get_service_data()
|
||||
|
||||
@api_process
|
||||
async def del_service(self, request):
|
||||
async def del_service(self, request: web.Request) -> None:
|
||||
"""Delete data into a service."""
|
||||
service = self._extract_service(request)
|
||||
addon = request[REQUEST_FROM]
|
||||
|
||||
@@ -53,7 +53,7 @@ from ..const import (
|
||||
REQUEST_FROM,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIError, APIForbidden, APINotFound
|
||||
from ..exceptions import APIError, APIForbidden, APINotFound, StoreAddonNotFoundError
|
||||
from ..store.addon import AddonStore
|
||||
from ..store.repository import Repository
|
||||
from ..store.validate import validate_repository
|
||||
@@ -104,7 +104,7 @@ class APIStore(CoreSysAttributes):
|
||||
addon_slug: str = request.match_info["addon"]
|
||||
|
||||
if not (addon := self.sys_addons.get(addon_slug)):
|
||||
raise APINotFound(f"Addon {addon_slug} does not exist")
|
||||
raise StoreAddonNotFoundError(addon=addon_slug)
|
||||
|
||||
if installed and not addon.is_installed:
|
||||
raise APIError(f"Addon {addon_slug} is not installed")
|
||||
@@ -112,7 +112,7 @@ class APIStore(CoreSysAttributes):
|
||||
if not installed and addon.is_installed:
|
||||
addon = cast(Addon, addon)
|
||||
if not addon.addon_store:
|
||||
raise APINotFound(f"Addon {addon_slug} does not exist in the store")
|
||||
raise StoreAddonNotFoundError(addon=addon_slug)
|
||||
return addon.addon_store
|
||||
|
||||
return addon
|
||||
@@ -349,13 +349,13 @@ class APIStore(CoreSysAttributes):
|
||||
return self._generate_repository_information(repository)
|
||||
|
||||
@api_process
|
||||
async def add_repository(self, request: web.Request):
|
||||
async def add_repository(self, request: web.Request) -> None:
|
||||
"""Add repository to the store."""
|
||||
body = await api_validate(SCHEMA_ADD_REPOSITORY, request)
|
||||
await asyncio.shield(self.sys_store.add_repository(body[ATTR_REPOSITORY]))
|
||||
|
||||
@api_process
|
||||
async def remove_repository(self, request: web.Request):
|
||||
async def remove_repository(self, request: web.Request) -> None:
|
||||
"""Remove repository from the store."""
|
||||
repository: Repository = self._extract_repository(request)
|
||||
await asyncio.shield(self.sys_store.remove_repository(repository))
|
||||
|
||||
@@ -80,7 +80,7 @@ class APISupervisor(CoreSysAttributes):
|
||||
"""Handle RESTful API for Supervisor functions."""
|
||||
|
||||
@api_process
|
||||
async def ping(self, request):
|
||||
async def ping(self, request: web.Request) -> bool:
|
||||
"""Return ok for signal that the API is ready."""
|
||||
return True
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Init file for Supervisor util for RESTful API."""
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Mapping
|
||||
import json
|
||||
from typing import Any, cast
|
||||
|
||||
@@ -26,7 +26,7 @@ from ..const import (
|
||||
RESULT_OK,
|
||||
)
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import APIError, BackupFileNotFoundError, DockerAPIError, HassioError
|
||||
from ..exceptions import APIError, DockerAPIError, HassioError
|
||||
from ..jobs import JobSchedulerOptions, SupervisorJob
|
||||
from ..utils import check_exception_chain, get_message_from_exception_chain
|
||||
from ..utils.json import json_dumps, json_loads as json_loads_util
|
||||
@@ -67,10 +67,10 @@ def api_process(method):
|
||||
"""Return API information."""
|
||||
try:
|
||||
answer = await method(*args, **kwargs)
|
||||
except BackupFileNotFoundError as err:
|
||||
return api_return_error(err, status=404)
|
||||
except APIError as err:
|
||||
return api_return_error(err, status=err.status, job_id=err.job_id)
|
||||
return api_return_error(
|
||||
err, status=err.status, job_id=err.job_id, headers=err.headers
|
||||
)
|
||||
except HassioError as err:
|
||||
return api_return_error(err)
|
||||
|
||||
@@ -139,6 +139,7 @@ def api_return_error(
|
||||
error_type: str | None = None,
|
||||
status: int = 400,
|
||||
*,
|
||||
headers: Mapping[str, str] | None = None,
|
||||
job_id: str | None = None,
|
||||
) -> web.Response:
|
||||
"""Return an API error message."""
|
||||
@@ -151,10 +152,15 @@ def api_return_error(
|
||||
|
||||
match error_type:
|
||||
case const.CONTENT_TYPE_TEXT:
|
||||
return web.Response(body=message, content_type=error_type, status=status)
|
||||
return web.Response(
|
||||
body=message, content_type=error_type, status=status, headers=headers
|
||||
)
|
||||
case const.CONTENT_TYPE_BINARY:
|
||||
return web.Response(
|
||||
body=message.encode(), content_type=error_type, status=status
|
||||
body=message.encode(),
|
||||
content_type=error_type,
|
||||
status=status,
|
||||
headers=headers,
|
||||
)
|
||||
case _:
|
||||
result: dict[str, Any] = {
|
||||
@@ -172,6 +178,7 @@ def api_return_error(
|
||||
result,
|
||||
status=status,
|
||||
dumps=json_dumps,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import logging
|
||||
from pathlib import Path
|
||||
import platform
|
||||
|
||||
from .const import CpuArch
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .exceptions import ConfigurationFileError, HassioArchNotFound
|
||||
from .utils.json import read_json_file
|
||||
@@ -12,38 +13,40 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
ARCH_JSON: Path = Path(__file__).parent.joinpath("data/arch.json")
|
||||
|
||||
MAP_CPU = {
|
||||
"armv7": "armv7",
|
||||
"armv6": "armhf",
|
||||
"armv8": "aarch64",
|
||||
"aarch64": "aarch64",
|
||||
"i686": "i386",
|
||||
"x86_64": "amd64",
|
||||
MAP_CPU: dict[str, CpuArch] = {
|
||||
"armv7": CpuArch.ARMV7,
|
||||
"armv6": CpuArch.ARMHF,
|
||||
"armv8": CpuArch.AARCH64,
|
||||
"aarch64": CpuArch.AARCH64,
|
||||
"i686": CpuArch.I386,
|
||||
"x86_64": CpuArch.AMD64,
|
||||
}
|
||||
|
||||
|
||||
class CpuArch(CoreSysAttributes):
|
||||
class CpuArchManager(CoreSysAttributes):
|
||||
"""Manage available architectures."""
|
||||
|
||||
def __init__(self, coresys: CoreSys) -> None:
|
||||
"""Initialize CPU Architecture handler."""
|
||||
self.coresys = coresys
|
||||
self._supported_arch: list[str] = []
|
||||
self._supported_set: set[str] = set()
|
||||
self._default_arch: str
|
||||
self._supported_arch: list[CpuArch] = []
|
||||
self._supported_set: set[CpuArch] = set()
|
||||
self._default_arch: CpuArch
|
||||
|
||||
@property
|
||||
def default(self) -> str:
|
||||
def default(self) -> CpuArch:
|
||||
"""Return system default arch."""
|
||||
return self._default_arch
|
||||
|
||||
@property
|
||||
def supervisor(self) -> str:
|
||||
def supervisor(self) -> CpuArch:
|
||||
"""Return supervisor arch."""
|
||||
return self.sys_supervisor.arch or self._default_arch
|
||||
if self.sys_supervisor.arch:
|
||||
return CpuArch(self.sys_supervisor.arch)
|
||||
return self._default_arch
|
||||
|
||||
@property
|
||||
def supported(self) -> list[str]:
|
||||
def supported(self) -> list[CpuArch]:
|
||||
"""Return support arch by CPU/Machine."""
|
||||
return self._supported_arch
|
||||
|
||||
@@ -65,7 +68,7 @@ class CpuArch(CoreSysAttributes):
|
||||
return
|
||||
|
||||
# Use configs from arch.json
|
||||
self._supported_arch.extend(arch_data[self.sys_machine])
|
||||
self._supported_arch.extend(CpuArch(a) for a in arch_data[self.sys_machine])
|
||||
self._default_arch = self.supported[0]
|
||||
|
||||
# Make sure native support is in supported list
|
||||
@@ -78,14 +81,14 @@ class CpuArch(CoreSysAttributes):
|
||||
"""Return True if there is a supported arch by this platform."""
|
||||
return not self._supported_set.isdisjoint(arch_list)
|
||||
|
||||
def match(self, arch_list: list[str]) -> str:
|
||||
def match(self, arch_list: list[str]) -> CpuArch:
|
||||
"""Return best match for this CPU/Platform."""
|
||||
for self_arch in self.supported:
|
||||
if self_arch in arch_list:
|
||||
return self_arch
|
||||
raise HassioArchNotFound()
|
||||
|
||||
def detect_cpu(self) -> str:
|
||||
def detect_cpu(self) -> CpuArch:
|
||||
"""Return the arch type of local CPU."""
|
||||
cpu = platform.machine()
|
||||
for check, value in MAP_CPU.items():
|
||||
@@ -96,9 +99,10 @@ class CpuArch(CoreSysAttributes):
|
||||
"Unknown CPU architecture %s, falling back to Supervisor architecture.",
|
||||
cpu,
|
||||
)
|
||||
return self.sys_supervisor.arch
|
||||
return CpuArch(self.sys_supervisor.arch)
|
||||
_LOGGER.warning(
|
||||
"Unknown CPU architecture %s, assuming CPU architecture equals Supervisor architecture.",
|
||||
cpu,
|
||||
)
|
||||
return cpu
|
||||
# Return the cpu string as-is, wrapped in CpuArch (may fail if invalid)
|
||||
return CpuArch(cpu)
|
||||
|
||||
@@ -9,8 +9,10 @@ from .addons.addon import Addon
|
||||
from .const import ATTR_PASSWORD, ATTR_TYPE, ATTR_USERNAME, FILE_HASSIO_AUTH
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .exceptions import (
|
||||
AuthError,
|
||||
AuthHomeAssistantAPIValidationError,
|
||||
AuthInvalidNonStringValueError,
|
||||
AuthListUsersError,
|
||||
AuthListUsersNoneResponseError,
|
||||
AuthPasswordResetError,
|
||||
HomeAssistantAPIError,
|
||||
HomeAssistantWSError,
|
||||
@@ -83,10 +85,8 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
self, addon: Addon, username: str | None, password: str | None
|
||||
) -> bool:
|
||||
"""Check username login."""
|
||||
if password is None:
|
||||
raise AuthError("None as password is not supported!", _LOGGER.error)
|
||||
if username is None:
|
||||
raise AuthError("None as username is not supported!", _LOGGER.error)
|
||||
if username is None or password is None:
|
||||
raise AuthInvalidNonStringValueError(_LOGGER.error)
|
||||
|
||||
_LOGGER.info("Auth request from '%s' for '%s'", addon.slug, username)
|
||||
|
||||
@@ -137,7 +137,7 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
finally:
|
||||
self._running.pop(username, None)
|
||||
|
||||
raise AuthError()
|
||||
raise AuthHomeAssistantAPIValidationError()
|
||||
|
||||
async def change_password(self, username: str, password: str) -> None:
|
||||
"""Change user password login."""
|
||||
@@ -155,7 +155,7 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
except HomeAssistantAPIError as err:
|
||||
_LOGGER.error("Can't request password reset on Home Assistant: %s", err)
|
||||
|
||||
raise AuthPasswordResetError()
|
||||
raise AuthPasswordResetError(user=username)
|
||||
|
||||
async def list_users(self) -> list[dict[str, Any]]:
|
||||
"""List users on the Home Assistant instance."""
|
||||
@@ -166,15 +166,12 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
{ATTR_TYPE: "config/auth/list"}
|
||||
)
|
||||
except HomeAssistantWSError as err:
|
||||
raise AuthListUsersError(
|
||||
f"Can't request listing users on Home Assistant: {err}", _LOGGER.error
|
||||
) from err
|
||||
_LOGGER.error("Can't request listing users on Home Assistant: %s", err)
|
||||
raise AuthListUsersError() from err
|
||||
|
||||
if users is not None:
|
||||
return users
|
||||
raise AuthListUsersError(
|
||||
"Can't request listing users on Home Assistant!", _LOGGER.error
|
||||
)
|
||||
raise AuthListUsersNoneResponseError(_LOGGER.error)
|
||||
|
||||
@staticmethod
|
||||
def _rehash(value: str, salt2: str = "") -> str:
|
||||
|
||||
@@ -60,7 +60,6 @@ from ..utils.dt import parse_datetime, utcnow
|
||||
from ..utils.json import json_bytes
|
||||
from ..utils.sentinel import DEFAULT
|
||||
from .const import BUF_SIZE, LOCATION_CLOUD_BACKUP, BackupType
|
||||
from .utils import password_to_key
|
||||
from .validate import SCHEMA_BACKUP
|
||||
|
||||
IGNORED_COMPARISON_FIELDS = {ATTR_PROTECTED, ATTR_CRYPTO, ATTR_DOCKER}
|
||||
@@ -101,7 +100,7 @@ class Backup(JobGroup):
|
||||
self._data: dict[str, Any] = data or {ATTR_SLUG: slug}
|
||||
self._tmp: TemporaryDirectory | None = None
|
||||
self._outer_secure_tarfile: SecureTarFile | None = None
|
||||
self._key: bytes | None = None
|
||||
self._password: str | None = None
|
||||
self._locations: dict[str | None, BackupLocation] = {
|
||||
location: BackupLocation(
|
||||
path=tar_file,
|
||||
@@ -327,7 +326,7 @@ class Backup(JobGroup):
|
||||
|
||||
# Set password
|
||||
if password:
|
||||
self._init_password(password)
|
||||
self._password = password
|
||||
self._data[ATTR_PROTECTED] = True
|
||||
self._data[ATTR_CRYPTO] = CRYPTO_AES128
|
||||
self._locations[self.location].protected = True
|
||||
@@ -337,14 +336,7 @@ class Backup(JobGroup):
|
||||
|
||||
def set_password(self, password: str | None) -> None:
|
||||
"""Set the password for an existing backup."""
|
||||
if password:
|
||||
self._init_password(password)
|
||||
else:
|
||||
self._key = None
|
||||
|
||||
def _init_password(self, password: str) -> None:
|
||||
"""Create key from password."""
|
||||
self._key = password_to_key(password)
|
||||
self._password = password
|
||||
|
||||
async def validate_backup(self, location: str | None) -> None:
|
||||
"""Validate backup.
|
||||
@@ -374,9 +366,9 @@ class Backup(JobGroup):
|
||||
with SecureTarFile(
|
||||
ending, # Not used
|
||||
gzip=self.compressed,
|
||||
key=self._key,
|
||||
mode="r",
|
||||
fileobj=test_tar_file,
|
||||
password=self._password,
|
||||
):
|
||||
# If we can read the tar file, the password is correct
|
||||
return
|
||||
@@ -592,7 +584,7 @@ class Backup(JobGroup):
|
||||
addon_file = self._outer_secure_tarfile.create_inner_tar(
|
||||
f"./{tar_name}",
|
||||
gzip=self.compressed,
|
||||
key=self._key,
|
||||
password=self._password,
|
||||
)
|
||||
# Take backup
|
||||
try:
|
||||
@@ -628,9 +620,6 @@ class Backup(JobGroup):
|
||||
if start_task := await self._addon_save(addon):
|
||||
start_tasks.append(start_task)
|
||||
except BackupError as err:
|
||||
err = BackupError(
|
||||
f"Can't backup add-on {addon.slug}: {str(err)}", _LOGGER.error
|
||||
)
|
||||
self.sys_jobs.current.capture_error(err)
|
||||
|
||||
return start_tasks
|
||||
@@ -646,9 +635,9 @@ class Backup(JobGroup):
|
||||
addon_file = SecureTarFile(
|
||||
Path(self._tmp.name, tar_name),
|
||||
"r",
|
||||
key=self._key,
|
||||
gzip=self.compressed,
|
||||
bufsize=BUF_SIZE,
|
||||
password=self._password,
|
||||
)
|
||||
|
||||
# If exists inside backup
|
||||
@@ -744,7 +733,7 @@ class Backup(JobGroup):
|
||||
with outer_secure_tarfile.create_inner_tar(
|
||||
f"./{tar_name}",
|
||||
gzip=self.compressed,
|
||||
key=self._key,
|
||||
password=self._password,
|
||||
) as tar_file:
|
||||
atomic_contents_add(
|
||||
tar_file,
|
||||
@@ -805,9 +794,9 @@ class Backup(JobGroup):
|
||||
with SecureTarFile(
|
||||
tar_name,
|
||||
"r",
|
||||
key=self._key,
|
||||
gzip=self.compressed,
|
||||
bufsize=BUF_SIZE,
|
||||
password=self._password,
|
||||
) as tar_file:
|
||||
tar_file.extractall(
|
||||
path=origin_dir, members=tar_file, filter="fully_trusted"
|
||||
@@ -868,7 +857,7 @@ class Backup(JobGroup):
|
||||
homeassistant_file = self._outer_secure_tarfile.create_inner_tar(
|
||||
f"./{tar_name}",
|
||||
gzip=self.compressed,
|
||||
key=self._key,
|
||||
password=self._password,
|
||||
)
|
||||
|
||||
await self.sys_homeassistant.backup(homeassistant_file, exclude_database)
|
||||
@@ -891,7 +880,11 @@ class Backup(JobGroup):
|
||||
self._tmp.name, f"homeassistant.tar{'.gz' if self.compressed else ''}"
|
||||
)
|
||||
homeassistant_file = SecureTarFile(
|
||||
tar_name, "r", key=self._key, gzip=self.compressed, bufsize=BUF_SIZE
|
||||
tar_name,
|
||||
"r",
|
||||
gzip=self.compressed,
|
||||
bufsize=BUF_SIZE,
|
||||
password=self._password,
|
||||
)
|
||||
|
||||
await self.sys_homeassistant.restore(
|
||||
|
||||
@@ -6,21 +6,6 @@ import re
|
||||
RE_DIGITS = re.compile(r"\d+")
|
||||
|
||||
|
||||
def password_to_key(password: str) -> bytes:
|
||||
"""Generate a AES Key from password."""
|
||||
key: bytes = password.encode()
|
||||
for _ in range(100):
|
||||
key = hashlib.sha256(key).digest()
|
||||
return key[:16]
|
||||
|
||||
|
||||
def key_to_iv(key: bytes) -> bytes:
|
||||
"""Generate an iv from Key."""
|
||||
for _ in range(100):
|
||||
key = hashlib.sha256(key).digest()
|
||||
return key[:16]
|
||||
|
||||
|
||||
def create_slug(name: str, date_str: str) -> str:
|
||||
"""Generate a hash from repository."""
|
||||
key = f"{date_str} - {name}".lower().encode()
|
||||
|
||||
@@ -13,7 +13,7 @@ from colorlog import ColoredFormatter
|
||||
|
||||
from .addons.manager import AddonManager
|
||||
from .api import RestAPI
|
||||
from .arch import CpuArch
|
||||
from .arch import CpuArchManager
|
||||
from .auth import Auth
|
||||
from .backups.manager import BackupManager
|
||||
from .bus import Bus
|
||||
@@ -71,7 +71,7 @@ async def initialize_coresys() -> CoreSys:
|
||||
coresys.jobs = await JobManager(coresys).load_config()
|
||||
coresys.core = await Core(coresys).post_init()
|
||||
coresys.plugins = await PluginManager(coresys).load_config()
|
||||
coresys.arch = CpuArch(coresys)
|
||||
coresys.arch = CpuArchManager(coresys)
|
||||
coresys.auth = await Auth(coresys).load_config()
|
||||
coresys.updater = await Updater(coresys).load_config()
|
||||
coresys.api = RestAPI(coresys)
|
||||
|
||||
@@ -179,6 +179,7 @@ ATTR_DOCKER = "docker"
|
||||
ATTR_DOCKER_API = "docker_api"
|
||||
ATTR_DOCUMENTATION = "documentation"
|
||||
ATTR_DOMAINS = "domains"
|
||||
ATTR_DUPLICATE_LOG_FILE = "duplicate_log_file"
|
||||
ATTR_ENABLE = "enable"
|
||||
ATTR_ENABLE_IPV6 = "enable_ipv6"
|
||||
ATTR_ENABLED = "enabled"
|
||||
|
||||
@@ -29,7 +29,7 @@ from .const import (
|
||||
if TYPE_CHECKING:
|
||||
from .addons.manager import AddonManager
|
||||
from .api import RestAPI
|
||||
from .arch import CpuArch
|
||||
from .arch import CpuArchManager
|
||||
from .auth import Auth
|
||||
from .backups.manager import BackupManager
|
||||
from .bus import Bus
|
||||
@@ -78,7 +78,7 @@ class CoreSys:
|
||||
# Internal objects pointers
|
||||
self._docker: DockerAPI | None = None
|
||||
self._core: Core | None = None
|
||||
self._arch: CpuArch | None = None
|
||||
self._arch: CpuArchManager | None = None
|
||||
self._auth: Auth | None = None
|
||||
self._homeassistant: HomeAssistant | None = None
|
||||
self._supervisor: Supervisor | None = None
|
||||
@@ -266,17 +266,17 @@ class CoreSys:
|
||||
self._plugins = value
|
||||
|
||||
@property
|
||||
def arch(self) -> CpuArch:
|
||||
"""Return CpuArch object."""
|
||||
def arch(self) -> CpuArchManager:
|
||||
"""Return CpuArchManager object."""
|
||||
if self._arch is None:
|
||||
raise RuntimeError("CpuArch not set!")
|
||||
raise RuntimeError("CpuArchManager not set!")
|
||||
return self._arch
|
||||
|
||||
@arch.setter
|
||||
def arch(self, value: CpuArch) -> None:
|
||||
"""Set a CpuArch object."""
|
||||
def arch(self, value: CpuArchManager) -> None:
|
||||
"""Set a CpuArchManager object."""
|
||||
if self._arch:
|
||||
raise RuntimeError("CpuArch already set!")
|
||||
raise RuntimeError("CpuArchManager already set!")
|
||||
self._arch = value
|
||||
|
||||
@property
|
||||
@@ -733,8 +733,8 @@ class CoreSysAttributes:
|
||||
return self.coresys.plugins
|
||||
|
||||
@property
|
||||
def sys_arch(self) -> CpuArch:
|
||||
"""Return CpuArch object."""
|
||||
def sys_arch(self) -> CpuArchManager:
|
||||
"""Return CpuArchManager object."""
|
||||
return self.coresys.arch
|
||||
|
||||
@property
|
||||
|
||||
@@ -250,7 +250,7 @@ class ConnectionType(StrEnum):
|
||||
WIRELESS = "802-11-wireless"
|
||||
|
||||
|
||||
class ConnectionStateType(IntEnum):
|
||||
class ConnectionState(IntEnum):
|
||||
"""Connection states.
|
||||
|
||||
https://networkmanager.dev/docs/api/latest/nm-dbus-types.html#NMActiveConnectionState
|
||||
|
||||
@@ -90,8 +90,8 @@ class Ip4Properties(IpProperties):
|
||||
class Ip6Properties(IpProperties):
|
||||
"""IPv6 properties object for Network Manager."""
|
||||
|
||||
addr_gen_mode: int
|
||||
ip6_privacy: int
|
||||
addr_gen_mode: int | None
|
||||
ip6_privacy: int | None
|
||||
dns: list[bytes] | None
|
||||
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@ from ..const import (
|
||||
DBUS_IFACE_CONNECTION_ACTIVE,
|
||||
DBUS_NAME_NM,
|
||||
DBUS_OBJECT_BASE,
|
||||
ConnectionState,
|
||||
ConnectionStateFlags,
|
||||
ConnectionStateType,
|
||||
)
|
||||
from ..interface import DBusInterfaceProxy, dbus_property
|
||||
from ..utils import dbus_connected
|
||||
@@ -67,9 +67,9 @@ class NetworkConnection(DBusInterfaceProxy):
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def state(self) -> ConnectionStateType:
|
||||
def state(self) -> ConnectionState:
|
||||
"""Return the state of the connection."""
|
||||
return ConnectionStateType(self.properties[DBUS_ATTR_STATE])
|
||||
return ConnectionState(self.properties[DBUS_ATTR_STATE])
|
||||
|
||||
@property
|
||||
def state_flags(self) -> set[ConnectionStateFlags]:
|
||||
|
||||
@@ -16,7 +16,11 @@ from ....host.const import (
|
||||
InterfaceType,
|
||||
MulticastDnsMode,
|
||||
)
|
||||
from ...const import MulticastDnsValue
|
||||
from ...const import (
|
||||
InterfaceAddrGenMode as NMInterfaceAddrGenMode,
|
||||
InterfaceIp6Privacy as NMInterfaceIp6Privacy,
|
||||
MulticastDnsValue,
|
||||
)
|
||||
from .. import NetworkManager
|
||||
from . import (
|
||||
CONF_ATTR_802_ETHERNET,
|
||||
@@ -118,24 +122,41 @@ def _get_ipv6_connection_settings(
|
||||
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "auto")
|
||||
if ipv6setting:
|
||||
if ipv6setting.addr_gen_mode == InterfaceAddrGenMode.EUI64:
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 0)
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
|
||||
"i", NMInterfaceAddrGenMode.EUI64.value
|
||||
)
|
||||
elif (
|
||||
not support_addr_gen_mode_defaults
|
||||
or ipv6setting.addr_gen_mode == InterfaceAddrGenMode.STABLE_PRIVACY
|
||||
):
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 1)
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
|
||||
"i", NMInterfaceAddrGenMode.STABLE_PRIVACY.value
|
||||
)
|
||||
elif ipv6setting.addr_gen_mode == InterfaceAddrGenMode.DEFAULT_OR_EUI64:
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 2)
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
|
||||
"i", NMInterfaceAddrGenMode.DEFAULT_OR_EUI64.value
|
||||
)
|
||||
else:
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 3)
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant(
|
||||
"i", NMInterfaceAddrGenMode.DEFAULT.value
|
||||
)
|
||||
|
||||
if ipv6setting.ip6_privacy == InterfaceIp6Privacy.DISABLED:
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 0)
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
|
||||
"i", NMInterfaceIp6Privacy.DISABLED.value
|
||||
)
|
||||
elif ipv6setting.ip6_privacy == InterfaceIp6Privacy.ENABLED_PREFER_PUBLIC:
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 1)
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
|
||||
"i", NMInterfaceIp6Privacy.ENABLED_PREFER_PUBLIC.value
|
||||
)
|
||||
elif ipv6setting.ip6_privacy == InterfaceIp6Privacy.ENABLED:
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 2)
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
|
||||
"i", NMInterfaceIp6Privacy.ENABLED.value
|
||||
)
|
||||
else:
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", -1)
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant(
|
||||
"i", NMInterfaceIp6Privacy.DEFAULT.value
|
||||
)
|
||||
elif ipv6setting.method == InterfaceMethod.DISABLED:
|
||||
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "link-local")
|
||||
elif ipv6setting.method == InterfaceMethod.STATIC:
|
||||
|
||||
@@ -2,20 +2,21 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
from ipaddress import IPv4Address
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from socket import SocketIO
|
||||
import tempfile
|
||||
from typing import TYPE_CHECKING, cast
|
||||
from typing import TYPE_CHECKING, Literal, cast
|
||||
|
||||
import aiodocker
|
||||
from attr import evolve
|
||||
from awesomeversion import AwesomeVersion
|
||||
import docker
|
||||
import docker.errors
|
||||
from docker.types import Mount
|
||||
import requests
|
||||
|
||||
from ..addons.build import AddonBuild
|
||||
@@ -34,6 +35,7 @@ from ..coresys import CoreSys
|
||||
from ..exceptions import (
|
||||
CoreDNSError,
|
||||
DBusError,
|
||||
DockerBuildError,
|
||||
DockerError,
|
||||
DockerJobError,
|
||||
DockerNotFound,
|
||||
@@ -65,8 +67,11 @@ from .const import (
|
||||
PATH_SHARE,
|
||||
PATH_SSL,
|
||||
Capabilities,
|
||||
DockerMount,
|
||||
MountBindOptions,
|
||||
MountType,
|
||||
PropagationMode,
|
||||
Ulimit,
|
||||
)
|
||||
from .interface import DockerInterface
|
||||
|
||||
@@ -269,7 +274,7 @@ class DockerAddon(DockerInterface):
|
||||
}
|
||||
|
||||
@property
|
||||
def network_mode(self) -> str | None:
|
||||
def network_mode(self) -> Literal["host"] | None:
|
||||
"""Return network mode for add-on."""
|
||||
if self.addon.host_network:
|
||||
return "host"
|
||||
@@ -308,28 +313,28 @@ class DockerAddon(DockerInterface):
|
||||
return None
|
||||
|
||||
@property
|
||||
def ulimits(self) -> list[docker.types.Ulimit] | None:
|
||||
def ulimits(self) -> list[Ulimit] | None:
|
||||
"""Generate ulimits for add-on."""
|
||||
limits: list[docker.types.Ulimit] = []
|
||||
limits: list[Ulimit] = []
|
||||
|
||||
# Need schedule functions
|
||||
if self.addon.with_realtime:
|
||||
limits.append(docker.types.Ulimit(name="rtprio", soft=90, hard=99))
|
||||
limits.append(Ulimit(name="rtprio", soft=90, hard=99))
|
||||
|
||||
# Set available memory for memlock to 128MB
|
||||
mem = 128 * 1024 * 1024
|
||||
limits.append(docker.types.Ulimit(name="memlock", soft=mem, hard=mem))
|
||||
limits.append(Ulimit(name="memlock", soft=mem, hard=mem))
|
||||
|
||||
# Add configurable ulimits from add-on config
|
||||
for name, config in self.addon.ulimits.items():
|
||||
if isinstance(config, int):
|
||||
# Simple format: both soft and hard limits are the same
|
||||
limits.append(docker.types.Ulimit(name=name, soft=config, hard=config))
|
||||
limits.append(Ulimit(name=name, soft=config, hard=config))
|
||||
elif isinstance(config, dict):
|
||||
# Detailed format: both soft and hard limits are mandatory
|
||||
soft = config["soft"]
|
||||
hard = config["hard"]
|
||||
limits.append(docker.types.Ulimit(name=name, soft=soft, hard=hard))
|
||||
limits.append(Ulimit(name=name, soft=soft, hard=hard))
|
||||
|
||||
# Return None if no ulimits are present
|
||||
if limits:
|
||||
@@ -348,7 +353,7 @@ class DockerAddon(DockerInterface):
|
||||
return None
|
||||
|
||||
@property
|
||||
def mounts(self) -> list[Mount]:
|
||||
def mounts(self) -> list[DockerMount]:
|
||||
"""Return mounts for container."""
|
||||
addon_mapping = self.addon.map_volumes
|
||||
|
||||
@@ -358,8 +363,8 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
mounts = [
|
||||
MOUNT_DEV,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.addon.path_extern_data.as_posix(),
|
||||
target=target_data_path or PATH_PRIVATE_DATA.as_posix(),
|
||||
read_only=False,
|
||||
@@ -369,8 +374,8 @@ class DockerAddon(DockerInterface):
|
||||
# setup config mappings
|
||||
if MappingType.CONFIG in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target=addon_mapping[MappingType.CONFIG].path
|
||||
or PATH_HOMEASSISTANT_CONFIG_LEGACY.as_posix(),
|
||||
@@ -382,8 +387,8 @@ class DockerAddon(DockerInterface):
|
||||
# Map addon's public config folder if not using deprecated config option
|
||||
if self.addon.addon_config_used:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.addon.path_extern_config.as_posix(),
|
||||
target=addon_mapping[MappingType.ADDON_CONFIG].path
|
||||
or PATH_PUBLIC_CONFIG.as_posix(),
|
||||
@@ -394,8 +399,8 @@ class DockerAddon(DockerInterface):
|
||||
# Map Home Assistant config in new way
|
||||
if MappingType.HOMEASSISTANT_CONFIG in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target=addon_mapping[MappingType.HOMEASSISTANT_CONFIG].path
|
||||
or PATH_HOMEASSISTANT_CONFIG.as_posix(),
|
||||
@@ -407,8 +412,8 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
if MappingType.ALL_ADDON_CONFIGS in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_addon_configs.as_posix(),
|
||||
target=addon_mapping[MappingType.ALL_ADDON_CONFIGS].path
|
||||
or PATH_ALL_ADDON_CONFIGS.as_posix(),
|
||||
@@ -418,8 +423,8 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
if MappingType.SSL in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target=addon_mapping[MappingType.SSL].path or PATH_SSL.as_posix(),
|
||||
read_only=addon_mapping[MappingType.SSL].read_only,
|
||||
@@ -428,8 +433,8 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
if MappingType.ADDONS in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_addons_local.as_posix(),
|
||||
target=addon_mapping[MappingType.ADDONS].path
|
||||
or PATH_LOCAL_ADDONS.as_posix(),
|
||||
@@ -439,8 +444,8 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
if MappingType.BACKUP in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_backup.as_posix(),
|
||||
target=addon_mapping[MappingType.BACKUP].path
|
||||
or PATH_BACKUP.as_posix(),
|
||||
@@ -450,25 +455,25 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
if MappingType.SHARE in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target=addon_mapping[MappingType.SHARE].path
|
||||
or PATH_SHARE.as_posix(),
|
||||
read_only=addon_mapping[MappingType.SHARE].read_only,
|
||||
propagation=PropagationMode.RSLAVE,
|
||||
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
|
||||
)
|
||||
)
|
||||
|
||||
if MappingType.MEDIA in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_media.as_posix(),
|
||||
target=addon_mapping[MappingType.MEDIA].path
|
||||
or PATH_MEDIA.as_posix(),
|
||||
read_only=addon_mapping[MappingType.MEDIA].read_only,
|
||||
propagation=PropagationMode.RSLAVE,
|
||||
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
|
||||
)
|
||||
)
|
||||
|
||||
@@ -480,8 +485,8 @@ class DockerAddon(DockerInterface):
|
||||
if not Path(gpio_path).exists():
|
||||
continue
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=gpio_path,
|
||||
target=gpio_path,
|
||||
read_only=False,
|
||||
@@ -491,8 +496,8 @@ class DockerAddon(DockerInterface):
|
||||
# DeviceTree support
|
||||
if self.addon.with_devicetree:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/sys/firmware/devicetree/base",
|
||||
target="/device-tree",
|
||||
read_only=True,
|
||||
@@ -506,8 +511,8 @@ class DockerAddon(DockerInterface):
|
||||
# Kernel Modules support
|
||||
if self.addon.with_kernel_modules:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/lib/modules",
|
||||
target="/lib/modules",
|
||||
read_only=True,
|
||||
@@ -525,20 +530,20 @@ class DockerAddon(DockerInterface):
|
||||
# Configuration Audio
|
||||
if self.addon.with_audio:
|
||||
mounts += [
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.addon.path_extern_pulse.as_posix(),
|
||||
target="/etc/pulse/client.conf",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
||||
target="/run/audio",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
||||
target="/etc/asound.conf",
|
||||
read_only=True,
|
||||
@@ -548,14 +553,14 @@ class DockerAddon(DockerInterface):
|
||||
# System Journal access
|
||||
if self.addon.with_journald:
|
||||
mounts += [
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
|
||||
target=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
|
||||
target=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
|
||||
read_only=True,
|
||||
@@ -681,13 +686,12 @@ class DockerAddon(DockerInterface):
|
||||
async def _build(self, version: AwesomeVersion, image: str | None = None) -> None:
|
||||
"""Build a Docker container."""
|
||||
build_env = await AddonBuild(self.coresys, self.addon).load_config()
|
||||
if not await build_env.is_valid():
|
||||
_LOGGER.error("Invalid build environment, can't build this add-on!")
|
||||
raise DockerError()
|
||||
# Check if the build environment is valid, raises if not
|
||||
await build_env.is_valid()
|
||||
|
||||
_LOGGER.info("Starting build for %s:%s", self.image, version)
|
||||
|
||||
def build_image():
|
||||
def build_image() -> tuple[str, str]:
|
||||
if build_env.squash:
|
||||
_LOGGER.warning(
|
||||
"Ignoring squash build option for %s as Docker BuildKit does not support it.",
|
||||
@@ -704,7 +708,9 @@ class DockerAddon(DockerInterface):
|
||||
# Remove dangling builder container if it exists by any chance
|
||||
# E.g. because of an abrupt host shutdown/reboot during a build
|
||||
with suppress(docker.errors.NotFound):
|
||||
self.sys_docker.containers.get(builder_name).remove(force=True, v=True)
|
||||
self.sys_docker.containers_legacy.get(builder_name).remove(
|
||||
force=True, v=True
|
||||
)
|
||||
|
||||
# Generate Docker config with registry credentials for base image if needed
|
||||
docker_config_path: Path | None = None
|
||||
@@ -760,8 +766,9 @@ class DockerAddon(DockerInterface):
|
||||
requests.RequestException,
|
||||
aiodocker.DockerError,
|
||||
) as err:
|
||||
_LOGGER.error("Can't build %s:%s: %s", self.image, version, err)
|
||||
raise DockerError() from err
|
||||
raise DockerBuildError(
|
||||
f"Can't build {self.image}:{version}: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Build %s:%s done", self.image, version)
|
||||
|
||||
@@ -819,12 +826,9 @@ class DockerAddon(DockerInterface):
|
||||
on_condition=DockerJobError,
|
||||
concurrency=JobConcurrency.GROUP_REJECT,
|
||||
)
|
||||
async def write_stdin(self, data: bytes) -> None:
|
||||
def write_stdin(self, data: bytes) -> Awaitable[None]:
|
||||
"""Write to add-on stdin."""
|
||||
if not await self.is_running():
|
||||
raise DockerError()
|
||||
|
||||
await self.sys_run_in_executor(self._write_stdin, data)
|
||||
return self.sys_run_in_executor(self._write_stdin, data)
|
||||
|
||||
def _write_stdin(self, data: bytes) -> None:
|
||||
"""Write to add-on stdin.
|
||||
@@ -833,8 +837,11 @@ class DockerAddon(DockerInterface):
|
||||
"""
|
||||
try:
|
||||
# Load needed docker objects
|
||||
container = self.sys_docker.containers.get(self.name)
|
||||
socket = container.attach_socket(params={"stdin": 1, "stream": 1})
|
||||
container = self.sys_docker.containers_legacy.get(self.name)
|
||||
# attach_socket returns SocketIO for local Docker connections (Unix socket)
|
||||
socket = cast(
|
||||
SocketIO, container.attach_socket(params={"stdin": 1, "stream": 1})
|
||||
)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't attach to %s stdin: %s", self.name, err)
|
||||
raise DockerError() from err
|
||||
@@ -893,7 +900,7 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
self.sys_docker.containers_legacy.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
if self._hw_listener:
|
||||
|
||||
@@ -2,9 +2,6 @@
|
||||
|
||||
import logging
|
||||
|
||||
import docker
|
||||
from docker.types import Mount
|
||||
|
||||
from ..const import DOCKER_CPU_RUNTIME_ALLOCATION
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerJobError
|
||||
@@ -19,7 +16,9 @@ from .const import (
|
||||
MOUNT_UDEV,
|
||||
PATH_PRIVATE_DATA,
|
||||
Capabilities,
|
||||
DockerMount,
|
||||
MountType,
|
||||
Ulimit,
|
||||
)
|
||||
from .interface import DockerInterface
|
||||
|
||||
@@ -42,12 +41,12 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
return AUDIO_DOCKER_NAME
|
||||
|
||||
@property
|
||||
def mounts(self) -> list[Mount]:
|
||||
def mounts(self) -> list[DockerMount]:
|
||||
"""Return mounts for container."""
|
||||
mounts = [
|
||||
MOUNT_DEV,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_audio.as_posix(),
|
||||
target=PATH_PRIVATE_DATA.as_posix(),
|
||||
read_only=False,
|
||||
@@ -75,10 +74,10 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
return [Capabilities.SYS_NICE, Capabilities.SYS_RESOURCE]
|
||||
|
||||
@property
|
||||
def ulimits(self) -> list[docker.types.Ulimit]:
|
||||
def ulimits(self) -> list[Ulimit]:
|
||||
"""Generate ulimits for audio."""
|
||||
# Pulseaudio by default tries to use real-time scheduling with priority of 5.
|
||||
return [docker.types.Ulimit(name="rtprio", soft=10, hard=10)]
|
||||
return [Ulimit(name="rtprio", soft=10, hard=10)]
|
||||
|
||||
@property
|
||||
def cpu_rt_runtime(self) -> int | None:
|
||||
|
||||
@@ -3,23 +3,23 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import suppress
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum, StrEnum
|
||||
from functools import total_ordering
|
||||
from pathlib import PurePath
|
||||
import re
|
||||
from typing import cast
|
||||
|
||||
from docker.types import Mount
|
||||
from typing import Any, cast
|
||||
|
||||
from ..const import MACHINE_ID
|
||||
|
||||
RE_RETRYING_DOWNLOAD_STATUS = re.compile(r"Retrying in \d+ seconds?")
|
||||
|
||||
# Docker Hub registry identifier
|
||||
DOCKER_HUB = "hub.docker.com"
|
||||
# Docker Hub registry identifier (official default)
|
||||
# Docker's default registry is docker.io
|
||||
DOCKER_HUB = "docker.io"
|
||||
|
||||
# Regex to match images with a registry host (e.g., ghcr.io/org/image)
|
||||
IMAGE_WITH_HOST = re.compile(r"^((?:[a-z0-9]+(?:-[a-z0-9]+)*\.)+[a-z]{2,})\/.+")
|
||||
# Legacy Docker Hub identifier for backward compatibility
|
||||
DOCKER_HUB_LEGACY = "hub.docker.com"
|
||||
|
||||
|
||||
class Capabilities(StrEnum):
|
||||
@@ -132,33 +132,94 @@ class PullImageLayerStage(Enum):
|
||||
return None
|
||||
|
||||
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class MountBindOptions:
|
||||
"""Bind options for docker mount."""
|
||||
|
||||
propagation: PropagationMode | None = None
|
||||
read_only_non_recursive: bool | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""To dictionary representation."""
|
||||
out: dict[str, Any] = {}
|
||||
if self.propagation:
|
||||
out["Propagation"] = self.propagation.value
|
||||
if self.read_only_non_recursive is not None:
|
||||
out["ReadOnlyNonRecursive"] = self.read_only_non_recursive
|
||||
return out
|
||||
|
||||
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class DockerMount:
|
||||
"""A docker mount."""
|
||||
|
||||
type: MountType
|
||||
source: str
|
||||
target: str
|
||||
read_only: bool
|
||||
bind_options: MountBindOptions | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""To dictionary representation."""
|
||||
out: dict[str, Any] = {
|
||||
"Type": self.type.value,
|
||||
"Source": self.source,
|
||||
"Target": self.target,
|
||||
"ReadOnly": self.read_only,
|
||||
}
|
||||
if self.bind_options:
|
||||
out["BindOptions"] = self.bind_options.to_dict()
|
||||
return out
|
||||
|
||||
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class Ulimit:
|
||||
"""A linux user limit."""
|
||||
|
||||
name: str
|
||||
soft: int
|
||||
hard: int
|
||||
|
||||
def to_dict(self) -> dict[str, str | int]:
|
||||
"""To dictionary representation."""
|
||||
return {
|
||||
"Name": self.name,
|
||||
"Soft": self.soft,
|
||||
"Hard": self.hard,
|
||||
}
|
||||
|
||||
|
||||
ENV_DUPLICATE_LOG_FILE = "HA_DUPLICATE_LOG_FILE"
|
||||
ENV_TIME = "TZ"
|
||||
ENV_TOKEN = "SUPERVISOR_TOKEN"
|
||||
ENV_TOKEN_OLD = "HASSIO_TOKEN"
|
||||
|
||||
LABEL_MANAGED = "supervisor_managed"
|
||||
|
||||
MOUNT_DBUS = Mount(
|
||||
type=MountType.BIND.value, source="/run/dbus", target="/run/dbus", read_only=True
|
||||
MOUNT_DBUS = DockerMount(
|
||||
type=MountType.BIND, source="/run/dbus", target="/run/dbus", read_only=True
|
||||
)
|
||||
MOUNT_DEV = Mount(
|
||||
type=MountType.BIND.value, source="/dev", target="/dev", read_only=True
|
||||
MOUNT_DEV = DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/dev",
|
||||
target="/dev",
|
||||
read_only=True,
|
||||
bind_options=MountBindOptions(read_only_non_recursive=True),
|
||||
)
|
||||
MOUNT_DEV.setdefault("BindOptions", {})["ReadOnlyNonRecursive"] = True
|
||||
MOUNT_DOCKER = Mount(
|
||||
type=MountType.BIND.value,
|
||||
MOUNT_DOCKER = DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/docker.sock",
|
||||
target="/run/docker.sock",
|
||||
read_only=True,
|
||||
)
|
||||
MOUNT_MACHINE_ID = Mount(
|
||||
type=MountType.BIND.value,
|
||||
MOUNT_MACHINE_ID = DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=MACHINE_ID.as_posix(),
|
||||
target=MACHINE_ID.as_posix(),
|
||||
read_only=True,
|
||||
)
|
||||
MOUNT_UDEV = Mount(
|
||||
type=MountType.BIND.value, source="/run/udev", target="/run/udev", read_only=True
|
||||
MOUNT_UDEV = DockerMount(
|
||||
type=MountType.BIND, source="/run/udev", target="/run/udev", read_only=True
|
||||
)
|
||||
|
||||
PATH_PRIVATE_DATA = PurePath("/data")
|
||||
|
||||
@@ -2,13 +2,11 @@
|
||||
|
||||
import logging
|
||||
|
||||
from docker.types import Mount
|
||||
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerJobError
|
||||
from ..jobs.const import JobConcurrency
|
||||
from ..jobs.decorator import Job
|
||||
from .const import ENV_TIME, MOUNT_DBUS, MountType
|
||||
from .const import ENV_TIME, MOUNT_DBUS, DockerMount, MountType
|
||||
from .interface import DockerInterface
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@@ -47,8 +45,8 @@ class DockerDNS(DockerInterface, CoreSysAttributes):
|
||||
security_opt=self.security_opt,
|
||||
environment={ENV_TIME: self.sys_timezone},
|
||||
mounts=[
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_dns.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
|
||||
@@ -5,7 +5,6 @@ import logging
|
||||
import re
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
from docker.types import Mount
|
||||
|
||||
from ..const import LABEL_MACHINE
|
||||
from ..exceptions import DockerJobError
|
||||
@@ -14,6 +13,7 @@ from ..homeassistant.const import LANDINGPAGE
|
||||
from ..jobs.const import JobConcurrency
|
||||
from ..jobs.decorator import Job
|
||||
from .const import (
|
||||
ENV_DUPLICATE_LOG_FILE,
|
||||
ENV_TIME,
|
||||
ENV_TOKEN,
|
||||
ENV_TOKEN_OLD,
|
||||
@@ -25,6 +25,8 @@ from .const import (
|
||||
PATH_PUBLIC_CONFIG,
|
||||
PATH_SHARE,
|
||||
PATH_SSL,
|
||||
DockerMount,
|
||||
MountBindOptions,
|
||||
MountType,
|
||||
PropagationMode,
|
||||
)
|
||||
@@ -90,15 +92,15 @@ class DockerHomeAssistant(DockerInterface):
|
||||
)
|
||||
|
||||
@property
|
||||
def mounts(self) -> list[Mount]:
|
||||
def mounts(self) -> list[DockerMount]:
|
||||
"""Return mounts for container."""
|
||||
mounts = [
|
||||
MOUNT_DEV,
|
||||
MOUNT_DBUS,
|
||||
MOUNT_UDEV,
|
||||
# HA config folder
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target=PATH_PUBLIC_CONFIG.as_posix(),
|
||||
read_only=False,
|
||||
@@ -110,41 +112,45 @@ class DockerHomeAssistant(DockerInterface):
|
||||
mounts.extend(
|
||||
[
|
||||
# All other folders
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target=PATH_SSL.as_posix(),
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target=PATH_SHARE.as_posix(),
|
||||
read_only=False,
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
bind_options=MountBindOptions(
|
||||
propagation=PropagationMode.RSLAVE
|
||||
),
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_media.as_posix(),
|
||||
target=PATH_MEDIA.as_posix(),
|
||||
read_only=False,
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
bind_options=MountBindOptions(
|
||||
propagation=PropagationMode.RSLAVE
|
||||
),
|
||||
),
|
||||
# Configuration audio
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_homeassistant.path_extern_pulse.as_posix(),
|
||||
target="/etc/pulse/client.conf",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
||||
target="/run/audio",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
||||
target="/etc/asound.conf",
|
||||
read_only=True,
|
||||
@@ -174,6 +180,8 @@ class DockerHomeAssistant(DockerInterface):
|
||||
}
|
||||
if restore_job_id:
|
||||
environment[ENV_RESTORE_JOB_ID] = restore_job_id
|
||||
if self.sys_homeassistant.duplicate_log_file:
|
||||
environment[ENV_DUPLICATE_LOG_FILE] = "1"
|
||||
await self._run(
|
||||
tag=(self.sys_homeassistant.version),
|
||||
name=self.name,
|
||||
@@ -213,20 +221,20 @@ class DockerHomeAssistant(DockerInterface):
|
||||
init=True,
|
||||
entrypoint=[],
|
||||
mounts=[
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target="/ssl",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target="/share",
|
||||
read_only=False,
|
||||
|
||||
@@ -45,14 +45,20 @@ from ..jobs.decorator import Job
|
||||
from ..jobs.job_group import JobGroup
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import DOCKER_HUB, ContainerState, PullImageLayerStage, RestartPolicy
|
||||
from .const import (
|
||||
DOCKER_HUB,
|
||||
DOCKER_HUB_LEGACY,
|
||||
ContainerState,
|
||||
PullImageLayerStage,
|
||||
RestartPolicy,
|
||||
)
|
||||
from .manager import CommandReturn, PullLogEntry
|
||||
from .monitor import DockerContainerStateEvent
|
||||
from .stats import DockerStats
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
MAP_ARCH: dict[CpuArch | str, str] = {
|
||||
MAP_ARCH: dict[CpuArch, str] = {
|
||||
CpuArch.ARMV7: "linux/arm/v7",
|
||||
CpuArch.ARMHF: "linux/arm/v6",
|
||||
CpuArch.AARCH64: "linux/arm64",
|
||||
@@ -184,7 +190,8 @@ class DockerInterface(JobGroup, ABC):
|
||||
stored = self.sys_docker.config.registries[registry]
|
||||
credentials[ATTR_USERNAME] = stored[ATTR_USERNAME]
|
||||
credentials[ATTR_PASSWORD] = stored[ATTR_PASSWORD]
|
||||
if registry != DOCKER_HUB:
|
||||
# Don't include registry for Docker Hub (both official and legacy)
|
||||
if registry not in (DOCKER_HUB, DOCKER_HUB_LEGACY):
|
||||
credentials[ATTR_REGISTRY] = registry
|
||||
|
||||
_LOGGER.debug(
|
||||
@@ -366,7 +373,7 @@ class DockerInterface(JobGroup, ABC):
|
||||
if not image:
|
||||
raise ValueError("Cannot pull without an image!")
|
||||
|
||||
image_arch = str(arch) if arch else self.sys_arch.supervisor
|
||||
image_arch = arch or self.sys_arch.supervisor
|
||||
listener: EventListener | None = None
|
||||
|
||||
_LOGGER.info("Downloading docker image %s with tag %s.", image, version)
|
||||
@@ -450,35 +457,34 @@ class DockerInterface(JobGroup, ABC):
|
||||
return True
|
||||
return False
|
||||
|
||||
async def is_running(self) -> bool:
|
||||
"""Return True if Docker is running."""
|
||||
async def _get_container(self) -> Container | None:
|
||||
"""Get docker container, returns None if not found."""
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
return await self.sys_run_in_executor(
|
||||
self.sys_docker.containers_legacy.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return False
|
||||
return None
|
||||
except docker.errors.DockerException as err:
|
||||
raise DockerAPIError() from err
|
||||
raise DockerAPIError(
|
||||
f"Docker API error occurred while getting container information: {err!s}"
|
||||
) from err
|
||||
except requests.RequestException as err:
|
||||
raise DockerRequestError() from err
|
||||
raise DockerRequestError(
|
||||
f"Error communicating with Docker to get container information: {err!s}"
|
||||
) from err
|
||||
|
||||
return docker_container.status == "running"
|
||||
async def is_running(self) -> bool:
|
||||
"""Return True if Docker is running."""
|
||||
if docker_container := await self._get_container():
|
||||
return docker_container.status == "running"
|
||||
return False
|
||||
|
||||
async def current_state(self) -> ContainerState:
|
||||
"""Return current state of container."""
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return ContainerState.UNKNOWN
|
||||
except docker.errors.DockerException as err:
|
||||
raise DockerAPIError() from err
|
||||
except requests.RequestException as err:
|
||||
raise DockerRequestError() from err
|
||||
|
||||
return _container_state_from_model(docker_container)
|
||||
if docker_container := await self._get_container():
|
||||
return _container_state_from_model(docker_container)
|
||||
return ContainerState.UNKNOWN
|
||||
|
||||
@Job(name="docker_interface_attach", concurrency=JobConcurrency.GROUP_QUEUE)
|
||||
async def attach(
|
||||
@@ -487,7 +493,7 @@ class DockerInterface(JobGroup, ABC):
|
||||
"""Attach to running Docker container."""
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
self.sys_docker.containers_legacy.get, self.name
|
||||
)
|
||||
self._meta = docker_container.attrs
|
||||
self.sys_docker.monitor.watch_container(docker_container)
|
||||
@@ -513,7 +519,9 @@ class DockerInterface(JobGroup, ABC):
|
||||
|
||||
# Successful?
|
||||
if not self._meta:
|
||||
raise DockerError()
|
||||
raise DockerError(
|
||||
f"Could not get metadata on container or image for {self.name}"
|
||||
)
|
||||
_LOGGER.info("Attaching to %s with version %s", self.image, self.version)
|
||||
|
||||
@Job(
|
||||
@@ -525,8 +533,11 @@ class DockerInterface(JobGroup, ABC):
|
||||
"""Run Docker image."""
|
||||
raise NotImplementedError()
|
||||
|
||||
async def _run(self, **kwargs) -> None:
|
||||
"""Run Docker image with retry inf necessary."""
|
||||
async def _run(self, *, name: str, **kwargs) -> None:
|
||||
"""Run Docker image with retry if necessary."""
|
||||
if not (image := self.image):
|
||||
raise ValueError(f"Cannot determine image to use to run {self.name}!")
|
||||
|
||||
if await self.is_running():
|
||||
return
|
||||
|
||||
@@ -535,16 +546,14 @@ class DockerInterface(JobGroup, ABC):
|
||||
|
||||
# Create & Run container
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run, self.image, **kwargs
|
||||
)
|
||||
container_metadata = await self.sys_docker.run(image, name=name, **kwargs)
|
||||
except DockerNotFound as err:
|
||||
# If image is missing, capture the exception as this shouldn't happen
|
||||
await async_capture_exception(err)
|
||||
raise
|
||||
|
||||
# Store metadata
|
||||
self._meta = docker_container.attrs
|
||||
self._meta = container_metadata
|
||||
|
||||
@Job(
|
||||
name="docker_interface_stop",
|
||||
@@ -603,9 +612,7 @@ class DockerInterface(JobGroup, ABC):
|
||||
expected_cpu_arch: CpuArch | None = None,
|
||||
) -> None:
|
||||
"""Check we have expected image with correct arch."""
|
||||
expected_image_cpu_arch = (
|
||||
str(expected_cpu_arch) if expected_cpu_arch else self.sys_arch.supervisor
|
||||
)
|
||||
arch = expected_cpu_arch or self.sys_arch.supervisor
|
||||
image_name = f"{expected_image}:{version!s}"
|
||||
if self.image == expected_image:
|
||||
try:
|
||||
@@ -623,7 +630,7 @@ class DockerInterface(JobGroup, ABC):
|
||||
# If we have an image and its the right arch, all set
|
||||
# It seems that newer Docker version return a variant for arm64 images.
|
||||
# Make sure we match linux/arm64 and linux/arm64/v8.
|
||||
expected_image_arch = MAP_ARCH[expected_image_cpu_arch]
|
||||
expected_image_arch = MAP_ARCH[arch]
|
||||
if image_arch.startswith(expected_image_arch):
|
||||
return
|
||||
_LOGGER.info(
|
||||
@@ -636,7 +643,7 @@ class DockerInterface(JobGroup, ABC):
|
||||
# We're missing the image we need. Stop and clean up what we have then pull the right one
|
||||
with suppress(DockerError):
|
||||
await self.remove()
|
||||
await self.install(version, expected_image, arch=expected_image_cpu_arch)
|
||||
await self.install(version, expected_image, arch=arch)
|
||||
|
||||
@Job(
|
||||
name="docker_interface_update",
|
||||
@@ -718,14 +725,8 @@ class DockerInterface(JobGroup, ABC):
|
||||
|
||||
async def is_failed(self) -> bool:
|
||||
"""Return True if Docker is failing state."""
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
if not (docker_container := await self._get_container()):
|
||||
return False
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
# container is not running
|
||||
if docker_container.status != "exited":
|
||||
|
||||
@@ -13,10 +13,12 @@ import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import re
|
||||
from typing import Any, Final, Self, cast
|
||||
from typing import Any, Final, Literal, Self, cast
|
||||
|
||||
import aiodocker
|
||||
from aiodocker.containers import DockerContainers
|
||||
from aiodocker.images import DockerImages
|
||||
from aiodocker.types import JSONObject
|
||||
from aiohttp import ClientSession, ClientTimeout, UnixConnector
|
||||
import attr
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
@@ -49,9 +51,19 @@ from ..exceptions import (
|
||||
)
|
||||
from ..utils.common import FileConfiguration
|
||||
from ..validate import SCHEMA_DOCKER_CONFIG
|
||||
from .const import DOCKER_HUB, IMAGE_WITH_HOST, LABEL_MANAGED
|
||||
from .const import (
|
||||
DOCKER_HUB,
|
||||
DOCKER_HUB_LEGACY,
|
||||
LABEL_MANAGED,
|
||||
Capabilities,
|
||||
DockerMount,
|
||||
MountType,
|
||||
RestartPolicy,
|
||||
Ulimit,
|
||||
)
|
||||
from .monitor import DockerMonitor
|
||||
from .network import DockerNetwork
|
||||
from .utils import get_registry_from_image
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -212,19 +224,25 @@ class DockerConfig(FileConfiguration):
|
||||
|
||||
Matches the image against configured registries and returns the registry
|
||||
name if found, or None if no matching credentials are configured.
|
||||
|
||||
Uses Docker's domain detection logic from:
|
||||
vendor/github.com/distribution/reference/normalize.go
|
||||
"""
|
||||
if not self.registries:
|
||||
return None
|
||||
|
||||
# Check if image uses a custom registry (e.g., ghcr.io/org/image)
|
||||
matcher = IMAGE_WITH_HOST.match(image)
|
||||
if matcher:
|
||||
registry = matcher.group(1)
|
||||
registry = get_registry_from_image(image)
|
||||
if registry:
|
||||
if registry in self.registries:
|
||||
return registry
|
||||
# If no registry prefix, check for Docker Hub credentials
|
||||
elif DOCKER_HUB in self.registries:
|
||||
return DOCKER_HUB
|
||||
else:
|
||||
# No registry prefix means Docker Hub
|
||||
# Support both docker.io (official) and hub.docker.com (legacy)
|
||||
if DOCKER_HUB in self.registries:
|
||||
return DOCKER_HUB
|
||||
if DOCKER_HUB_LEGACY in self.registries:
|
||||
return DOCKER_HUB_LEGACY
|
||||
|
||||
return None
|
||||
|
||||
@@ -290,8 +308,13 @@ class DockerAPI(CoreSysAttributes):
|
||||
return self.docker.images
|
||||
|
||||
@property
|
||||
def containers(self) -> ContainerCollection:
|
||||
def containers(self) -> DockerContainers:
|
||||
"""Return API containers."""
|
||||
return self.docker.containers
|
||||
|
||||
@property
|
||||
def containers_legacy(self) -> ContainerCollection:
|
||||
"""Return API containers from Dockerpy."""
|
||||
return self.dockerpy.containers
|
||||
|
||||
@property
|
||||
@@ -324,50 +347,137 @@ class DockerAPI(CoreSysAttributes):
|
||||
"""Stop docker events monitor."""
|
||||
await self.monitor.unload()
|
||||
|
||||
def run(
|
||||
def _create_container_config(
|
||||
self,
|
||||
image: str,
|
||||
*,
|
||||
tag: str = "latest",
|
||||
dns: bool = True,
|
||||
ipv4: IPv4Address | None = None,
|
||||
**kwargs: Any,
|
||||
) -> Container:
|
||||
"""Create a Docker container and run it.
|
||||
init: bool = False,
|
||||
hostname: str | None = None,
|
||||
detach: bool = True,
|
||||
security_opt: list[str] | None = None,
|
||||
restart_policy: dict[str, RestartPolicy] | None = None,
|
||||
extra_hosts: dict[str, IPv4Address] | None = None,
|
||||
environment: dict[str, str | None] | None = None,
|
||||
mounts: list[DockerMount] | None = None,
|
||||
ports: dict[str, str | int | None] | None = None,
|
||||
oom_score_adj: int | None = None,
|
||||
network_mode: Literal["host"] | None = None,
|
||||
privileged: bool = False,
|
||||
device_cgroup_rules: list[str] | None = None,
|
||||
tmpfs: dict[str, str] | None = None,
|
||||
entrypoint: list[str] | None = None,
|
||||
cap_add: list[Capabilities] | None = None,
|
||||
ulimits: list[Ulimit] | None = None,
|
||||
cpu_rt_runtime: int | None = None,
|
||||
stdin_open: bool = False,
|
||||
pid_mode: str | None = None,
|
||||
uts_mode: str | None = None,
|
||||
) -> JSONObject:
|
||||
"""Map kwargs to create container config.
|
||||
|
||||
Need run inside executor.
|
||||
This only covers the docker options we currently use. It is not intended
|
||||
to be exhaustive as its dockerpy equivalent was. We'll add to it as we
|
||||
make use of new feature.
|
||||
"""
|
||||
name: str | None = kwargs.get("name")
|
||||
network_mode: str | None = kwargs.get("network_mode")
|
||||
hostname: str | None = kwargs.get("hostname")
|
||||
# Set up host dependent config for container
|
||||
host_config: dict[str, Any] = {
|
||||
"NetworkMode": network_mode if network_mode else "default",
|
||||
"Init": init,
|
||||
"Privileged": privileged,
|
||||
}
|
||||
if security_opt:
|
||||
host_config["SecurityOpt"] = security_opt
|
||||
if restart_policy:
|
||||
host_config["RestartPolicy"] = restart_policy
|
||||
if extra_hosts:
|
||||
host_config["ExtraHosts"] = [f"{k}:{v}" for k, v in extra_hosts.items()]
|
||||
if mounts:
|
||||
host_config["Mounts"] = [mount.to_dict() for mount in mounts]
|
||||
if oom_score_adj is not None:
|
||||
host_config["OomScoreAdj"] = oom_score_adj
|
||||
if device_cgroup_rules:
|
||||
host_config["DeviceCgroupRules"] = device_cgroup_rules
|
||||
if tmpfs:
|
||||
host_config["Tmpfs"] = tmpfs
|
||||
if cap_add:
|
||||
host_config["CapAdd"] = cap_add
|
||||
if cpu_rt_runtime is not None:
|
||||
host_config["CPURealtimeRuntime"] = cpu_rt_runtime
|
||||
if pid_mode:
|
||||
host_config["PidMode"] = pid_mode
|
||||
if uts_mode:
|
||||
host_config["UtsMode"] = uts_mode
|
||||
if ulimits:
|
||||
host_config["Ulimits"] = [limit.to_dict() for limit in ulimits]
|
||||
|
||||
if "labels" not in kwargs:
|
||||
kwargs["labels"] = {}
|
||||
elif isinstance(kwargs["labels"], list):
|
||||
kwargs["labels"] = dict.fromkeys(kwargs["labels"], "")
|
||||
# Full container config
|
||||
config: dict[str, Any] = {
|
||||
"Image": f"{image}:{tag}",
|
||||
"Labels": {LABEL_MANAGED: ""},
|
||||
"OpenStdin": stdin_open,
|
||||
"StdinOnce": not detach and stdin_open,
|
||||
"AttachStdin": not detach and stdin_open,
|
||||
"AttachStdout": not detach,
|
||||
"AttachStderr": not detach,
|
||||
"HostConfig": host_config,
|
||||
}
|
||||
if hostname:
|
||||
config["Hostname"] = hostname
|
||||
if environment:
|
||||
config["Env"] = [
|
||||
env if val is None else f"{env}={val}"
|
||||
for env, val in environment.items()
|
||||
]
|
||||
if entrypoint:
|
||||
config["Entrypoint"] = entrypoint
|
||||
|
||||
kwargs["labels"][LABEL_MANAGED] = ""
|
||||
|
||||
# Setup DNS
|
||||
# Set up networking
|
||||
if dns:
|
||||
kwargs["dns"] = [str(self.network.dns)]
|
||||
kwargs["dns_search"] = [DNS_SUFFIX]
|
||||
host_config["Dns"] = [str(self.network.dns)]
|
||||
host_config["DnsSearch"] = [DNS_SUFFIX]
|
||||
# CoreDNS forward plug-in fails in ~6s, then fallback triggers.
|
||||
# However, the default timeout of glibc and musl is 5s. Increase
|
||||
# default timeout to make sure CoreDNS fallback is working
|
||||
# on first query.
|
||||
kwargs["dns_opt"] = ["timeout:10"]
|
||||
host_config["DnsOptions"] = ["timeout:10"]
|
||||
if hostname:
|
||||
kwargs["domainname"] = DNS_SUFFIX
|
||||
config["Domainname"] = DNS_SUFFIX
|
||||
|
||||
# Setup network
|
||||
if not network_mode:
|
||||
kwargs["network"] = None
|
||||
# Setup ports
|
||||
if ports:
|
||||
port_bindings = {
|
||||
port if "/" in port else f"{port}/tcp": [
|
||||
{"HostIp": "", "HostPort": str(host_port) if host_port else ""}
|
||||
]
|
||||
for port, host_port in ports.items()
|
||||
}
|
||||
config["ExposedPorts"] = {port: {} for port in port_bindings}
|
||||
host_config["PortBindings"] = port_bindings
|
||||
|
||||
return config
|
||||
|
||||
async def run(
|
||||
self,
|
||||
image: str,
|
||||
*,
|
||||
name: str,
|
||||
tag: str = "latest",
|
||||
hostname: str | None = None,
|
||||
mounts: list[DockerMount] | None = None,
|
||||
network_mode: Literal["host"] | None = None,
|
||||
ipv4: IPv4Address | None = None,
|
||||
**kwargs,
|
||||
) -> dict[str, Any]:
|
||||
"""Create a Docker container and run it."""
|
||||
if not image or not name:
|
||||
raise ValueError("image, name and tag cannot be an empty string!")
|
||||
|
||||
# Setup cidfile and bind mount it
|
||||
cidfile_path = None
|
||||
if name:
|
||||
cidfile_path = self.coresys.config.path_cid_files / f"{name}.cid"
|
||||
cidfile_path = self.coresys.config.path_cid_files / f"{name}.cid"
|
||||
|
||||
def create_cidfile() -> None:
|
||||
# Remove the file/directory if it exists e.g. as a leftover from unclean shutdown
|
||||
# Note: Can be a directory if Docker auto-started container with restart policy
|
||||
# before Supervisor could write the CID file
|
||||
@@ -381,31 +491,37 @@ class DockerAPI(CoreSysAttributes):
|
||||
# from creating it as a directory if container auto-starts
|
||||
cidfile_path.touch()
|
||||
|
||||
extern_cidfile_path = (
|
||||
self.coresys.config.path_extern_cid_files / f"{name}.cid"
|
||||
)
|
||||
await self.sys_run_in_executor(create_cidfile)
|
||||
|
||||
# Bind mount to /run/cid in container
|
||||
if "volumes" not in kwargs:
|
||||
kwargs["volumes"] = {}
|
||||
kwargs["volumes"][str(extern_cidfile_path)] = {
|
||||
"bind": "/run/cid",
|
||||
"mode": "ro",
|
||||
}
|
||||
# Bind mount to /run/cid in container
|
||||
extern_cidfile_path = self.coresys.config.path_extern_cid_files / f"{name}.cid"
|
||||
cid_mount = DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=extern_cidfile_path.as_posix(),
|
||||
target="/run/cid",
|
||||
read_only=True,
|
||||
)
|
||||
if mounts is None:
|
||||
mounts = [cid_mount]
|
||||
else:
|
||||
mounts = [*mounts, cid_mount]
|
||||
|
||||
# Create container
|
||||
config = self._create_container_config(
|
||||
image,
|
||||
tag=tag,
|
||||
hostname=hostname,
|
||||
mounts=mounts,
|
||||
network_mode=network_mode,
|
||||
**kwargs,
|
||||
)
|
||||
try:
|
||||
container = self.containers.create(
|
||||
f"{image}:{tag}", use_config_proxy=False, **kwargs
|
||||
)
|
||||
if cidfile_path:
|
||||
with cidfile_path.open("w", encoding="ascii") as cidfile:
|
||||
cidfile.write(str(container.id))
|
||||
except docker_errors.NotFound as err:
|
||||
raise DockerNotFound(
|
||||
f"Image {image}:{tag} does not exist for {name}", _LOGGER.error
|
||||
) from err
|
||||
except docker_errors.DockerException as err:
|
||||
container = await self.containers.create(config, name=name)
|
||||
except aiodocker.DockerError as err:
|
||||
if err.status == HTTPStatus.NOT_FOUND:
|
||||
raise DockerNotFound(
|
||||
f"Image {image}:{tag} does not exist for {name}", _LOGGER.error
|
||||
) from err
|
||||
raise DockerAPIError(
|
||||
f"Can't create container from {name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
@@ -414,43 +530,62 @@ class DockerAPI(CoreSysAttributes):
|
||||
f"Dockerd connection issue for {name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
# Attach network
|
||||
if not network_mode:
|
||||
alias = [hostname] if hostname else None
|
||||
try:
|
||||
self.network.attach_container(container, alias=alias, ipv4=ipv4)
|
||||
except DockerError:
|
||||
_LOGGER.warning("Can't attach %s to hassio-network!", name)
|
||||
else:
|
||||
with suppress(DockerError):
|
||||
self.network.detach_default_bridge(container)
|
||||
else:
|
||||
host_network: Network = self.dockerpy.networks.get(DOCKER_NETWORK_HOST)
|
||||
# Get container metadata
|
||||
try:
|
||||
container_attrs = await container.show()
|
||||
except aiodocker.DockerError as err:
|
||||
raise DockerAPIError(
|
||||
f"Can't inspect new container {name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
except requests.RequestException as err:
|
||||
raise DockerRequestError(
|
||||
f"Dockerd connection issue for {name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
# Check if container is register on host
|
||||
# https://github.com/moby/moby/issues/23302
|
||||
if name and name in (
|
||||
val.get("Name")
|
||||
for val in host_network.attrs.get("Containers", {}).values()
|
||||
):
|
||||
with suppress(docker_errors.NotFound):
|
||||
host_network.disconnect(name, force=True)
|
||||
# Setup network and store container id in cidfile
|
||||
def setup_network_and_cidfile() -> None:
|
||||
# Write cidfile
|
||||
with cidfile_path.open("w", encoding="ascii") as cidfile:
|
||||
cidfile.write(str(container.id))
|
||||
|
||||
# Attach network
|
||||
if not network_mode:
|
||||
alias = [hostname] if hostname else None
|
||||
try:
|
||||
self.network.attach_container(
|
||||
container.id, name, alias=alias, ipv4=ipv4
|
||||
)
|
||||
except DockerError:
|
||||
_LOGGER.warning("Can't attach %s to hassio-network!", name)
|
||||
else:
|
||||
with suppress(DockerError):
|
||||
self.network.detach_default_bridge(container.id, name)
|
||||
else:
|
||||
host_network: Network = self.dockerpy.networks.get(DOCKER_NETWORK_HOST)
|
||||
|
||||
# Check if container is register on host
|
||||
# https://github.com/moby/moby/issues/23302
|
||||
if name and name in (
|
||||
val.get("Name")
|
||||
for val in host_network.attrs.get("Containers", {}).values()
|
||||
):
|
||||
with suppress(docker_errors.NotFound):
|
||||
host_network.disconnect(name, force=True)
|
||||
|
||||
await self.sys_run_in_executor(setup_network_and_cidfile)
|
||||
|
||||
# Run container
|
||||
try:
|
||||
container.start()
|
||||
except docker_errors.DockerException as err:
|
||||
await container.start()
|
||||
except aiodocker.DockerError as err:
|
||||
raise DockerAPIError(f"Can't start {name}: {err}", _LOGGER.error) from err
|
||||
except requests.RequestException as err:
|
||||
raise DockerRequestError(
|
||||
f"Dockerd connection issue for {name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
# Update metadata
|
||||
with suppress(docker_errors.DockerException, requests.RequestException):
|
||||
container.reload()
|
||||
|
||||
return container
|
||||
# Return metadata
|
||||
return container_attrs
|
||||
|
||||
async def pull_image(
|
||||
self,
|
||||
@@ -467,8 +602,10 @@ class DockerAPI(CoreSysAttributes):
|
||||
raises only if the get fails afterwards. Additionally it fires progress reports for the pull
|
||||
on the bus so listeners can use that to update status for users.
|
||||
"""
|
||||
# Use timeout=None to disable timeout for pull operations, matching docker-py behavior.
|
||||
# aiodocker converts None to ClientTimeout(total=None) which disables the timeout.
|
||||
async for e in self.images.pull(
|
||||
repository, tag=tag, platform=platform, auth=auth, stream=True
|
||||
repository, tag=tag, platform=platform, auth=auth, stream=True, timeout=None
|
||||
):
|
||||
entry = PullLogEntry.from_pull_log_dict(job_id, e)
|
||||
if entry.error:
|
||||
@@ -601,16 +738,24 @@ class DockerAPI(CoreSysAttributes):
|
||||
) -> bool:
|
||||
"""Return True if docker container exists in good state and is built from expected image."""
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(self.containers.get, name)
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.containers_legacy.get, name
|
||||
)
|
||||
docker_image = await self.images.inspect(f"{image}:{version}")
|
||||
except docker_errors.NotFound:
|
||||
return False
|
||||
except aiodocker.DockerError as err:
|
||||
if err.status == HTTPStatus.NOT_FOUND:
|
||||
return False
|
||||
raise DockerError() from err
|
||||
raise DockerError(
|
||||
f"Could not get container {name} or image {image}:{version} to check state: {err!s}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
raise DockerError(
|
||||
f"Could not get container {name} or image {image}:{version} to check state: {err!s}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
# Check the image is correct and state is good
|
||||
return (
|
||||
@@ -624,11 +769,15 @@ class DockerAPI(CoreSysAttributes):
|
||||
) -> None:
|
||||
"""Stop/remove Docker container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
docker_container: Container = self.containers_legacy.get(name)
|
||||
except docker_errors.NotFound:
|
||||
# Generally suppressed so we don't log this
|
||||
raise DockerNotFound() from None
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
raise DockerError(
|
||||
f"Could not get container {name} for stopping: {err!s}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
if docker_container.status == "running":
|
||||
_LOGGER.info("Stopping %s application", name)
|
||||
@@ -647,7 +796,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
def start_container(self, name: str) -> None:
|
||||
"""Start Docker container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
docker_container: Container = self.containers_legacy.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound(
|
||||
f"{name} not found for starting up", _LOGGER.error
|
||||
@@ -666,11 +815,15 @@ class DockerAPI(CoreSysAttributes):
|
||||
def restart_container(self, name: str, timeout: int) -> None:
|
||||
"""Restart docker container."""
|
||||
try:
|
||||
container: Container = self.containers.get(name)
|
||||
container: Container = self.containers_legacy.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound() from None
|
||||
raise DockerNotFound(
|
||||
f"Container {name} not found for restarting", _LOGGER.warning
|
||||
) from None
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
raise DockerError(
|
||||
f"Could not get container {name} for restarting: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Restarting %s", name)
|
||||
try:
|
||||
@@ -681,11 +834,15 @@ class DockerAPI(CoreSysAttributes):
|
||||
def container_logs(self, name: str, tail: int = 100) -> bytes:
|
||||
"""Return Docker logs of container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
docker_container: Container = self.containers_legacy.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound() from None
|
||||
raise DockerNotFound(
|
||||
f"Container {name} not found for logs", _LOGGER.warning
|
||||
) from None
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
raise DockerError(
|
||||
f"Could not get container {name} for logs: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
try:
|
||||
return docker_container.logs(tail=tail, stdout=True, stderr=True)
|
||||
@@ -697,18 +854,23 @@ class DockerAPI(CoreSysAttributes):
|
||||
def container_stats(self, name: str) -> dict[str, Any]:
|
||||
"""Read and return stats from container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
docker_container: Container = self.containers_legacy.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound() from None
|
||||
raise DockerNotFound(
|
||||
f"Container {name} not found for stats", _LOGGER.warning
|
||||
) from None
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
raise DockerError(
|
||||
f"Could not inspect container '{name}': {err!s}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
# container is not running
|
||||
if docker_container.status != "running":
|
||||
raise DockerError(f"Container {name} is not running", _LOGGER.error)
|
||||
|
||||
try:
|
||||
return docker_container.stats(stream=False)
|
||||
# When stream=False, stats() returns dict, not Iterator
|
||||
return cast(dict[str, Any], docker_container.stats(stream=False))
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't read stats from {name}: {err}", _LOGGER.error
|
||||
@@ -717,17 +879,23 @@ class DockerAPI(CoreSysAttributes):
|
||||
def container_run_inside(self, name: str, command: str) -> CommandReturn:
|
||||
"""Execute a command inside Docker container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
docker_container: Container = self.containers_legacy.get(name)
|
||||
except docker_errors.NotFound:
|
||||
raise DockerNotFound() from None
|
||||
raise DockerNotFound(
|
||||
f"Container {name} not found for running command", _LOGGER.warning
|
||||
) from None
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
raise DockerError(
|
||||
f"Can't get container {name} to run command: {err!s}"
|
||||
) from err
|
||||
|
||||
# Execute
|
||||
try:
|
||||
code, output = docker_container.exec_run(command)
|
||||
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
raise DockerError(
|
||||
f"Can't run command in container {name}: {err!s}"
|
||||
) from err
|
||||
|
||||
return CommandReturn(code, output)
|
||||
|
||||
@@ -760,7 +928,7 @@ class DockerAPI(CoreSysAttributes):
|
||||
"""Import a tar file as image."""
|
||||
try:
|
||||
with tar_file.open("rb") as read_tar:
|
||||
resp: list[dict[str, Any]] = self.images.import_image(read_tar)
|
||||
resp: list[dict[str, Any]] = await self.images.import_image(read_tar)
|
||||
except (aiodocker.DockerError, OSError) as err:
|
||||
raise DockerError(
|
||||
f"Can't import image from tar: {err}", _LOGGER.error
|
||||
|
||||
@@ -7,6 +7,7 @@ import logging
|
||||
from typing import Self, cast
|
||||
|
||||
import docker
|
||||
from docker.models.networks import Network
|
||||
import requests
|
||||
|
||||
from ..const import (
|
||||
@@ -59,7 +60,7 @@ class DockerNetwork:
|
||||
def __init__(self, docker_client: docker.DockerClient):
|
||||
"""Initialize internal Supervisor network."""
|
||||
self.docker: docker.DockerClient = docker_client
|
||||
self._network: docker.models.networks.Network
|
||||
self._network: Network
|
||||
|
||||
async def post_init(
|
||||
self, enable_ipv6: bool | None = None, mtu: int | None = None
|
||||
@@ -76,7 +77,7 @@ class DockerNetwork:
|
||||
return DOCKER_NETWORK
|
||||
|
||||
@property
|
||||
def network(self) -> docker.models.networks.Network:
|
||||
def network(self) -> Network:
|
||||
"""Return docker network."""
|
||||
return self._network
|
||||
|
||||
@@ -117,7 +118,7 @@ class DockerNetwork:
|
||||
|
||||
def _get_network(
|
||||
self, enable_ipv6: bool | None = None, mtu: int | None = None
|
||||
) -> docker.models.networks.Network:
|
||||
) -> Network:
|
||||
"""Get supervisor network."""
|
||||
try:
|
||||
if network := self.docker.networks.get(DOCKER_NETWORK):
|
||||
@@ -218,7 +219,8 @@ class DockerNetwork:
|
||||
|
||||
def attach_container(
|
||||
self,
|
||||
container: docker.models.containers.Container,
|
||||
container_id: str,
|
||||
name: str,
|
||||
alias: list[str] | None = None,
|
||||
ipv4: IPv4Address | None = None,
|
||||
) -> None:
|
||||
@@ -231,15 +233,15 @@ class DockerNetwork:
|
||||
self.network.reload()
|
||||
|
||||
# Check stale Network
|
||||
if container.name and container.name in (
|
||||
if name in (
|
||||
val.get("Name") for val in self.network.attrs.get("Containers", {}).values()
|
||||
):
|
||||
self.stale_cleanup(container.name)
|
||||
self.stale_cleanup(name)
|
||||
|
||||
# Attach Network
|
||||
try:
|
||||
self.network.connect(
|
||||
container, aliases=alias, ipv4_address=str(ipv4) if ipv4 else None
|
||||
container_id, aliases=alias, ipv4_address=str(ipv4) if ipv4 else None
|
||||
)
|
||||
except (
|
||||
docker.errors.NotFound,
|
||||
@@ -248,7 +250,7 @@ class DockerNetwork:
|
||||
requests.RequestException,
|
||||
) as err:
|
||||
raise DockerError(
|
||||
f"Can't connect {container.name} to Supervisor network: {err}",
|
||||
f"Can't connect {name} to Supervisor network: {err}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
@@ -272,19 +274,20 @@ class DockerNetwork:
|
||||
) as err:
|
||||
raise DockerError(f"Can't find {name}: {err}", _LOGGER.error) from err
|
||||
|
||||
if container.id not in self.containers:
|
||||
self.attach_container(container, alias, ipv4)
|
||||
if not (container_id := container.id):
|
||||
raise DockerError(f"Received invalid metadata from docker for {name}")
|
||||
|
||||
def detach_default_bridge(
|
||||
self, container: docker.models.containers.Container
|
||||
) -> None:
|
||||
if container_id not in self.containers:
|
||||
self.attach_container(container_id, name, alias, ipv4)
|
||||
|
||||
def detach_default_bridge(self, container_id: str, name: str) -> None:
|
||||
"""Detach default Docker bridge.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
default_network = self.docker.networks.get(DOCKER_NETWORK_DRIVER)
|
||||
default_network.disconnect(container)
|
||||
default_network.disconnect(container_id)
|
||||
except docker.errors.NotFound:
|
||||
pass
|
||||
except (
|
||||
@@ -293,7 +296,7 @@ class DockerNetwork:
|
||||
requests.RequestException,
|
||||
) as err:
|
||||
raise DockerError(
|
||||
f"Can't disconnect {container.name} from default network: {err}",
|
||||
f"Can't disconnect {name} from default network: {err}",
|
||||
_LOGGER.warning,
|
||||
) from err
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ class DockerSupervisor(DockerInterface):
|
||||
"""Attach to running docker container."""
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
self.sys_docker.containers_legacy.get, self.name
|
||||
)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
@@ -74,7 +74,8 @@ class DockerSupervisor(DockerInterface):
|
||||
_LOGGER.info("Connecting Supervisor to hassio-network")
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.network.attach_container,
|
||||
docker_container,
|
||||
docker_container.id,
|
||||
self.name,
|
||||
alias=["supervisor"],
|
||||
ipv4=self.sys_docker.network.supervisor,
|
||||
)
|
||||
@@ -90,7 +91,7 @@ class DockerSupervisor(DockerInterface):
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_container = self.sys_docker.containers_legacy.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Could not get Supervisor container for retag: {err}", _LOGGER.error
|
||||
@@ -118,7 +119,7 @@ class DockerSupervisor(DockerInterface):
|
||||
"""Update start tag to new version."""
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
self.sys_docker.containers_legacy.get, self.name
|
||||
)
|
||||
docker_image = await self.sys_docker.images.inspect(f"{image}:{version!s}")
|
||||
except (
|
||||
|
||||
57
supervisor/docker/utils.py
Normal file
57
supervisor/docker/utils.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""Docker utilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
# Docker image reference domain regex
|
||||
# Based on Docker's reference implementation:
|
||||
# vendor/github.com/distribution/reference/normalize.go
|
||||
#
|
||||
# A domain is detected if the part before the first / contains:
|
||||
# - "localhost" (with optional port)
|
||||
# - Contains "." (like registry.example.com or 127.0.0.1)
|
||||
# - Contains ":" (like myregistry:5000)
|
||||
# - IPv6 addresses in brackets (like [::1]:5000)
|
||||
#
|
||||
# Note: Docker also treats uppercase letters as registry indicators since
|
||||
# namespaces must be lowercase, but this regex handles lowercase matching
|
||||
# and the get_registry_from_image() function validates the registry rules.
|
||||
IMAGE_REGISTRY_REGEX = re.compile(
|
||||
r"^(?P<registry>"
|
||||
r"localhost(?::[0-9]+)?|" # localhost with optional port
|
||||
r"(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])" # domain component
|
||||
r"(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))*" # more components
|
||||
r"(?::[0-9]+)?|" # optional port
|
||||
r"\[[a-fA-F0-9:]+\](?::[0-9]+)?" # IPv6 with optional port
|
||||
r")/" # must be followed by /
|
||||
)
|
||||
|
||||
|
||||
def get_registry_from_image(image_ref: str) -> str | None:
|
||||
"""Extract registry from Docker image reference.
|
||||
|
||||
Returns the registry if the image reference contains one,
|
||||
or None if the image uses Docker Hub (docker.io).
|
||||
|
||||
Based on Docker's reference implementation:
|
||||
vendor/github.com/distribution/reference/normalize.go
|
||||
|
||||
Examples:
|
||||
get_registry_from_image("nginx") -> None (docker.io)
|
||||
get_registry_from_image("library/nginx") -> None (docker.io)
|
||||
get_registry_from_image("myregistry.com/nginx") -> "myregistry.com"
|
||||
get_registry_from_image("localhost/myimage") -> "localhost"
|
||||
get_registry_from_image("localhost:5000/myimage") -> "localhost:5000"
|
||||
get_registry_from_image("registry.io:5000/org/app:v1") -> "registry.io:5000"
|
||||
get_registry_from_image("[::1]:5000/myimage") -> "[::1]:5000"
|
||||
|
||||
"""
|
||||
match = IMAGE_REGISTRY_REGEX.match(image_ref)
|
||||
if match:
|
||||
registry = match.group("registry")
|
||||
# Must contain '.' or ':' or be 'localhost' to be a real registry
|
||||
# This prevents treating "myuser/myimage" as having registry "myuser"
|
||||
if "." in registry or ":" in registry or registry == "localhost":
|
||||
return registry
|
||||
return None # No registry = Docker Hub (docker.io)
|
||||
@@ -1,25 +1,25 @@
|
||||
"""Core Exceptions."""
|
||||
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Mapping
|
||||
from typing import Any
|
||||
|
||||
MESSAGE_CHECK_SUPERVISOR_LOGS = (
|
||||
"Check supervisor logs for details (check with '{logs_command}')"
|
||||
)
|
||||
EXTRA_FIELDS_LOGS_COMMAND = {"logs_command": "ha supervisor logs"}
|
||||
|
||||
|
||||
class HassioError(Exception):
|
||||
"""Root exception."""
|
||||
|
||||
error_key: str | None = None
|
||||
message_template: str | None = None
|
||||
extra_fields: dict[str, Any] | None = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str | None = None,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
extra_fields: dict[str, Any] | None = None,
|
||||
self, message: str | None = None, logger: Callable[..., None] | None = None
|
||||
) -> None:
|
||||
"""Raise & log."""
|
||||
self.extra_fields = extra_fields or {}
|
||||
|
||||
if not message and self.message_template:
|
||||
message = (
|
||||
self.message_template.format(**self.extra_fields)
|
||||
@@ -41,6 +41,94 @@ class HassioNotSupportedError(HassioError):
|
||||
"""Function is not supported."""
|
||||
|
||||
|
||||
# API
|
||||
|
||||
|
||||
class APIError(HassioError, RuntimeError):
|
||||
"""API errors."""
|
||||
|
||||
status = 400
|
||||
headers: Mapping[str, str] | None = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str | None = None,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
headers: Mapping[str, str] | None = None,
|
||||
job_id: str | None = None,
|
||||
) -> None:
|
||||
"""Raise & log, optionally with job."""
|
||||
super().__init__(message, logger)
|
||||
self.headers = headers
|
||||
self.job_id = job_id
|
||||
|
||||
|
||||
class APIUnauthorized(APIError):
|
||||
"""API unauthorized error."""
|
||||
|
||||
status = 401
|
||||
|
||||
|
||||
class APIForbidden(APIError):
|
||||
"""API forbidden error."""
|
||||
|
||||
status = 403
|
||||
|
||||
|
||||
class APINotFound(APIError):
|
||||
"""API not found error."""
|
||||
|
||||
status = 404
|
||||
|
||||
|
||||
class APIGone(APIError):
|
||||
"""API is no longer available."""
|
||||
|
||||
status = 410
|
||||
|
||||
|
||||
class APITooManyRequests(APIError):
|
||||
"""API too many requests error."""
|
||||
|
||||
status = 429
|
||||
|
||||
|
||||
class APIInternalServerError(APIError):
|
||||
"""API internal server error."""
|
||||
|
||||
status = 500
|
||||
|
||||
|
||||
class APIAddonNotInstalled(APIError):
|
||||
"""Not installed addon requested at addons API."""
|
||||
|
||||
|
||||
class APIDBMigrationInProgress(APIError):
|
||||
"""Service is unavailable due to an offline DB migration is in progress."""
|
||||
|
||||
status = 503
|
||||
|
||||
|
||||
class APIUnknownSupervisorError(APIError):
|
||||
"""Unknown error occurred within supervisor. Adds supervisor check logs rider to message template."""
|
||||
|
||||
status = 500
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
job_id: str | None = None,
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.message_template = (
|
||||
f"{self.message_template}. {MESSAGE_CHECK_SUPERVISOR_LOGS}"
|
||||
)
|
||||
self.extra_fields = (self.extra_fields or {}) | EXTRA_FIELDS_LOGS_COMMAND
|
||||
super().__init__(None, logger, job_id=job_id)
|
||||
|
||||
|
||||
# JobManager
|
||||
|
||||
|
||||
@@ -122,6 +210,13 @@ class SupervisorAppArmorError(SupervisorError):
|
||||
"""Supervisor AppArmor error."""
|
||||
|
||||
|
||||
class SupervisorUnknownError(SupervisorError, APIUnknownSupervisorError):
|
||||
"""Raise when an unknown error occurs interacting with Supervisor or its container."""
|
||||
|
||||
error_key = "supervisor_unknown_error"
|
||||
message_template = "An unknown error occurred with Supervisor"
|
||||
|
||||
|
||||
class SupervisorJobError(SupervisorError, JobException):
|
||||
"""Raise on job errors."""
|
||||
|
||||
@@ -250,6 +345,54 @@ class AddonConfigurationError(AddonsError):
|
||||
"""Error with add-on configuration."""
|
||||
|
||||
|
||||
class AddonConfigurationInvalidError(AddonConfigurationError, APIError):
|
||||
"""Raise if invalid configuration provided for addon."""
|
||||
|
||||
error_key = "addon_configuration_invalid_error"
|
||||
message_template = "Add-on {addon} has invalid options: {validation_error}"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
addon: str,
|
||||
validation_error: str,
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon, "validation_error": validation_error}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonBootConfigCannotChangeError(AddonsError, APIError):
|
||||
"""Raise if user attempts to change addon boot config when it can't be changed."""
|
||||
|
||||
error_key = "addon_boot_config_cannot_change_error"
|
||||
message_template = (
|
||||
"Addon {addon} boot option is set to {boot_config} so it cannot be changed"
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str, boot_config: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon, "boot_config": boot_config}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonNotRunningError(AddonsError, APIError):
|
||||
"""Raise when an addon is not running."""
|
||||
|
||||
error_key = "addon_not_running_error"
|
||||
message_template = "Add-on {addon} is not running"
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonNotSupportedError(HassioNotSupportedError):
|
||||
"""Addon doesn't support a function."""
|
||||
|
||||
@@ -268,11 +411,8 @@ class AddonNotSupportedArchitectureError(AddonNotSupportedError):
|
||||
architectures: list[str],
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
super().__init__(
|
||||
None,
|
||||
logger,
|
||||
extra_fields={"slug": slug, "architectures": ", ".join(architectures)},
|
||||
)
|
||||
self.extra_fields = {"slug": slug, "architectures": ", ".join(architectures)}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonNotSupportedMachineTypeError(AddonNotSupportedError):
|
||||
@@ -289,11 +429,8 @@ class AddonNotSupportedMachineTypeError(AddonNotSupportedError):
|
||||
machine_types: list[str],
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
super().__init__(
|
||||
None,
|
||||
logger,
|
||||
extra_fields={"slug": slug, "machine_types": ", ".join(machine_types)},
|
||||
)
|
||||
self.extra_fields = {"slug": slug, "machine_types": ", ".join(machine_types)}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonNotSupportedHomeAssistantVersionError(AddonNotSupportedError):
|
||||
@@ -310,11 +447,96 @@ class AddonNotSupportedHomeAssistantVersionError(AddonNotSupportedError):
|
||||
version: str,
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
super().__init__(
|
||||
None,
|
||||
logger,
|
||||
extra_fields={"slug": slug, "version": version},
|
||||
)
|
||||
self.extra_fields = {"slug": slug, "version": version}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonNotSupportedWriteStdinError(AddonNotSupportedError, APIError):
|
||||
"""Addon does not support writing to stdin."""
|
||||
|
||||
error_key = "addon_not_supported_write_stdin_error"
|
||||
message_template = "Add-on {addon} does not support writing to stdin"
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonBuildDockerfileMissingError(AddonNotSupportedError, APIError):
|
||||
"""Raise when addon build invalid because dockerfile is missing."""
|
||||
|
||||
error_key = "addon_build_dockerfile_missing_error"
|
||||
message_template = (
|
||||
"Cannot build addon '{addon}' because dockerfile is missing. A repair "
|
||||
"using '{repair_command}' will fix this if the cause is data "
|
||||
"corruption. Otherwise please report this to the addon developer."
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon, "repair_command": "ha supervisor repair"}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonBuildArchitectureNotSupportedError(AddonNotSupportedError, APIError):
|
||||
"""Raise when addon cannot be built on system because it doesn't support its architecture."""
|
||||
|
||||
error_key = "addon_build_architecture_not_supported_error"
|
||||
message_template = (
|
||||
"Cannot build addon '{addon}' because its supported architectures "
|
||||
"({addon_arches}) do not match the system supported architectures ({system_arches})"
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
addon: str,
|
||||
addon_arch_list: list[str],
|
||||
system_arch_list: list[str],
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {
|
||||
"addon": addon,
|
||||
"addon_arches": ", ".join(addon_arch_list),
|
||||
"system_arches": ", ".join(system_arch_list),
|
||||
}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonUnknownError(AddonsError, APIUnknownSupervisorError):
|
||||
"""Raise when unknown error occurs taking an action for an addon."""
|
||||
|
||||
error_key = "addon_unknown_error"
|
||||
message_template = "An unknown error occurred with addon {addon}"
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon}
|
||||
super().__init__(logger)
|
||||
|
||||
|
||||
class AddonBuildFailedUnknownError(AddonsError, APIUnknownSupervisorError):
|
||||
"""Raise when the build failed for an addon due to an unknown error."""
|
||||
|
||||
error_key = "addon_build_failed_unknown_error"
|
||||
message_template = (
|
||||
"An unknown error occurred while trying to build the image for addon {addon}"
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon}
|
||||
super().__init__(logger)
|
||||
|
||||
|
||||
class AddonsJobError(AddonsError, JobException):
|
||||
@@ -346,13 +568,64 @@ class AuthError(HassioError):
|
||||
"""Auth errors."""
|
||||
|
||||
|
||||
class AuthPasswordResetError(HassioError):
|
||||
class AuthPasswordResetError(AuthError, APIError):
|
||||
"""Auth error if password reset failed."""
|
||||
|
||||
error_key = "auth_password_reset_error"
|
||||
message_template = "Username '{user}' does not exist. Check list of users using '{auth_list_command}'."
|
||||
|
||||
class AuthListUsersError(HassioError):
|
||||
def __init__(
|
||||
self,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
user: str,
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"user": user, "auth_list_command": "ha auth list"}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AuthListUsersError(AuthError, APIUnknownSupervisorError):
|
||||
"""Auth error if listing users failed."""
|
||||
|
||||
error_key = "auth_list_users_error"
|
||||
message_template = "Can't request listing users on Home Assistant"
|
||||
|
||||
|
||||
class AuthListUsersNoneResponseError(AuthError, APIInternalServerError):
|
||||
"""Auth error if listing users returned invalid None response."""
|
||||
|
||||
error_key = "auth_list_users_none_response_error"
|
||||
message_template = "Home Assistant returned invalid response of `{none}` instead of a list of users. Check Home Assistant logs for details (check with `{logs_command}`)"
|
||||
extra_fields = {"none": "None", "logs_command": "ha core logs"}
|
||||
|
||||
def __init__(self, logger: Callable[..., None] | None = None) -> None:
|
||||
"""Initialize exception."""
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AuthInvalidNonStringValueError(AuthError, APIUnauthorized):
|
||||
"""Auth error if something besides a string provided as username or password."""
|
||||
|
||||
error_key = "auth_invalid_non_string_value_error"
|
||||
message_template = "Username and password must be strings"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
headers: Mapping[str, str] | None = None,
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
super().__init__(None, logger, headers=headers)
|
||||
|
||||
|
||||
class AuthHomeAssistantAPIValidationError(AuthError, APIUnknownSupervisorError):
|
||||
"""Error encountered trying to validate auth details via Home Assistant API."""
|
||||
|
||||
error_key = "auth_home_assistant_api_validation_error"
|
||||
message_template = "Unable to validate authentication details with Home Assistant"
|
||||
|
||||
|
||||
# Host
|
||||
|
||||
@@ -385,60 +658,6 @@ class HostLogError(HostError):
|
||||
"""Internal error with host log."""
|
||||
|
||||
|
||||
# API
|
||||
|
||||
|
||||
class APIError(HassioError, RuntimeError):
|
||||
"""API errors."""
|
||||
|
||||
status = 400
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str | None = None,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
job_id: str | None = None,
|
||||
error: HassioError | None = None,
|
||||
) -> None:
|
||||
"""Raise & log, optionally with job."""
|
||||
# Allow these to be set from another error here since APIErrors essentially wrap others to add a status
|
||||
self.error_key = error.error_key if error else None
|
||||
self.message_template = error.message_template if error else None
|
||||
super().__init__(
|
||||
message, logger, extra_fields=error.extra_fields if error else None
|
||||
)
|
||||
self.job_id = job_id
|
||||
|
||||
|
||||
class APIForbidden(APIError):
|
||||
"""API forbidden error."""
|
||||
|
||||
status = 403
|
||||
|
||||
|
||||
class APINotFound(APIError):
|
||||
"""API not found error."""
|
||||
|
||||
status = 404
|
||||
|
||||
|
||||
class APIGone(APIError):
|
||||
"""API is no longer available."""
|
||||
|
||||
status = 410
|
||||
|
||||
|
||||
class APIAddonNotInstalled(APIError):
|
||||
"""Not installed addon requested at addons API."""
|
||||
|
||||
|
||||
class APIDBMigrationInProgress(APIError):
|
||||
"""Service is unavailable due to an offline DB migration is in progress."""
|
||||
|
||||
status = 503
|
||||
|
||||
|
||||
# Service / Discovery
|
||||
|
||||
|
||||
@@ -616,6 +835,10 @@ class DockerError(HassioError):
|
||||
"""Docker API/Transport errors."""
|
||||
|
||||
|
||||
class DockerBuildError(DockerError):
|
||||
"""Docker error during build."""
|
||||
|
||||
|
||||
class DockerAPIError(DockerError):
|
||||
"""Docker API error."""
|
||||
|
||||
@@ -647,7 +870,7 @@ class DockerNoSpaceOnDevice(DockerError):
|
||||
super().__init__(None, logger=logger)
|
||||
|
||||
|
||||
class DockerHubRateLimitExceeded(DockerError):
|
||||
class DockerHubRateLimitExceeded(DockerError, APITooManyRequests):
|
||||
"""Raise for docker hub rate limit exceeded error."""
|
||||
|
||||
error_key = "dockerhub_rate_limit_exceeded"
|
||||
@@ -655,16 +878,13 @@ class DockerHubRateLimitExceeded(DockerError):
|
||||
"Your IP address has made too many requests to Docker Hub which activated a rate limit. "
|
||||
"For more details see {dockerhub_rate_limit_url}"
|
||||
)
|
||||
extra_fields = {
|
||||
"dockerhub_rate_limit_url": "https://www.home-assistant.io/more-info/dockerhub-rate-limit"
|
||||
}
|
||||
|
||||
def __init__(self, logger: Callable[..., None] | None = None) -> None:
|
||||
"""Raise & log."""
|
||||
super().__init__(
|
||||
None,
|
||||
logger=logger,
|
||||
extra_fields={
|
||||
"dockerhub_rate_limit_url": "https://www.home-assistant.io/more-info/dockerhub-rate-limit"
|
||||
},
|
||||
)
|
||||
super().__init__(None, logger=logger)
|
||||
|
||||
|
||||
class DockerJobError(DockerError, JobException):
|
||||
@@ -735,6 +955,20 @@ class StoreNotFound(StoreError):
|
||||
"""Raise if slug is not known."""
|
||||
|
||||
|
||||
class StoreAddonNotFoundError(StoreError, APINotFound):
|
||||
"""Raise if a requested addon is not in the store."""
|
||||
|
||||
error_key = "store_addon_not_found_error"
|
||||
message_template = "Addon {addon} does not exist in the store"
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class StoreJobError(StoreError, JobException):
|
||||
"""Raise on job error with git."""
|
||||
|
||||
@@ -770,7 +1004,7 @@ class BackupJobError(BackupError, JobException):
|
||||
"""Raise on Backup job error."""
|
||||
|
||||
|
||||
class BackupFileNotFoundError(BackupError):
|
||||
class BackupFileNotFoundError(BackupError, APINotFound):
|
||||
"""Raise if the backup file hasn't been found."""
|
||||
|
||||
|
||||
@@ -782,6 +1016,55 @@ class BackupFileExistError(BackupError):
|
||||
"""Raise if the backup file already exists."""
|
||||
|
||||
|
||||
class AddonBackupMetadataInvalidError(BackupError, APIError):
|
||||
"""Raise if invalid metadata file provided for addon in backup."""
|
||||
|
||||
error_key = "addon_backup_metadata_invalid_error"
|
||||
message_template = (
|
||||
"Metadata file for add-on {addon} in backup is invalid: {validation_error}"
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger: Callable[..., None] | None = None,
|
||||
*,
|
||||
addon: str,
|
||||
validation_error: str,
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {"addon": addon, "validation_error": validation_error}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class AddonPrePostBackupCommandReturnedError(BackupError, APIError):
|
||||
"""Raise when addon's pre/post backup command returns an error."""
|
||||
|
||||
error_key = "addon_pre_post_backup_command_returned_error"
|
||||
message_template = (
|
||||
"Pre-/Post backup command for add-on {addon} returned error code: "
|
||||
"{exit_code}. Please report this to the addon developer. Enable debug "
|
||||
"logging to capture complete command output using {debug_logging_command}"
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, logger: Callable[..., None] | None = None, *, addon: str, exit_code: int
|
||||
) -> None:
|
||||
"""Initialize exception."""
|
||||
self.extra_fields = {
|
||||
"addon": addon,
|
||||
"exit_code": exit_code,
|
||||
"debug_logging_command": "ha supervisor options --logging debug",
|
||||
}
|
||||
super().__init__(None, logger)
|
||||
|
||||
|
||||
class BackupRestoreUnknownError(BackupError, APIUnknownSupervisorError):
|
||||
"""Raise when an unknown error occurs during backup or restore."""
|
||||
|
||||
error_key = "backup_restore_unknown_error"
|
||||
message_template = "An unknown error occurred during backup/restore"
|
||||
|
||||
|
||||
# Security
|
||||
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
SECONDS_BETWEEN_API_CHECKS: Final[int] = 5
|
||||
# Core Stage 1 and some wiggle room
|
||||
STARTUP_API_RESPONSE_TIMEOUT: Final[timedelta] = timedelta(minutes=3)
|
||||
STARTUP_API_RESPONSE_TIMEOUT: Final[timedelta] = timedelta(minutes=10)
|
||||
# All stages plus event start timeout and some wiggle rooom
|
||||
STARTUP_API_CHECK_RUNNING_TIMEOUT: Final[timedelta] = timedelta(minutes=15)
|
||||
# While database migration is running, the timeout will be extended
|
||||
|
||||
@@ -23,6 +23,7 @@ from ..const import (
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE,
|
||||
ATTR_BOOT,
|
||||
ATTR_DUPLICATE_LOG_FILE,
|
||||
ATTR_IMAGE,
|
||||
ATTR_MESSAGE,
|
||||
ATTR_PORT,
|
||||
@@ -299,6 +300,16 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
"""Set whether backups should exclude database by default."""
|
||||
self._data[ATTR_BACKUPS_EXCLUDE_DATABASE] = value
|
||||
|
||||
@property
|
||||
def duplicate_log_file(self) -> bool:
|
||||
"""Return True if Home Assistant should duplicate logs to file."""
|
||||
return self._data[ATTR_DUPLICATE_LOG_FILE]
|
||||
|
||||
@duplicate_log_file.setter
|
||||
def duplicate_log_file(self, value: bool) -> None:
|
||||
"""Set whether Home Assistant should duplicate logs to file."""
|
||||
self._data[ATTR_DUPLICATE_LOG_FILE] = value
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Prepare Home Assistant object."""
|
||||
await asyncio.wait(
|
||||
|
||||
@@ -10,6 +10,7 @@ from ..const import (
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE,
|
||||
ATTR_BOOT,
|
||||
ATTR_DUPLICATE_LOG_FILE,
|
||||
ATTR_IMAGE,
|
||||
ATTR_PORT,
|
||||
ATTR_REFRESH_TOKEN,
|
||||
@@ -36,6 +37,7 @@ SCHEMA_HASS_CONFIG = vol.Schema(
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT, default=None): vol.Maybe(str),
|
||||
vol.Optional(ATTR_AUDIO_INPUT, default=None): vol.Maybe(str),
|
||||
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_DUPLICATE_LOG_FILE, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_OVERRIDE_IMAGE, default=False): vol.Boolean(),
|
||||
},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
|
||||
@@ -6,8 +6,8 @@ import logging
|
||||
import socket
|
||||
|
||||
from ..dbus.const import (
|
||||
ConnectionState,
|
||||
ConnectionStateFlags,
|
||||
ConnectionStateType,
|
||||
DeviceType,
|
||||
InterfaceAddrGenMode as NMInterfaceAddrGenMode,
|
||||
InterfaceIp6Privacy as NMInterfaceIp6Privacy,
|
||||
@@ -267,25 +267,47 @@ class Interface:
|
||||
return InterfaceMethod.DISABLED
|
||||
|
||||
@staticmethod
|
||||
def _map_nm_addr_gen_mode(addr_gen_mode: int) -> InterfaceAddrGenMode:
|
||||
"""Map IPv6 interface addr_gen_mode."""
|
||||
def _map_nm_addr_gen_mode(addr_gen_mode: int | None) -> InterfaceAddrGenMode:
|
||||
"""Map IPv6 interface addr_gen_mode.
|
||||
|
||||
NetworkManager omits the addr_gen_mode property when set to DEFAULT, so we
|
||||
treat None as DEFAULT here.
|
||||
"""
|
||||
mapping = {
|
||||
NMInterfaceAddrGenMode.EUI64.value: InterfaceAddrGenMode.EUI64,
|
||||
NMInterfaceAddrGenMode.STABLE_PRIVACY.value: InterfaceAddrGenMode.STABLE_PRIVACY,
|
||||
NMInterfaceAddrGenMode.DEFAULT_OR_EUI64.value: InterfaceAddrGenMode.DEFAULT_OR_EUI64,
|
||||
NMInterfaceAddrGenMode.DEFAULT.value: InterfaceAddrGenMode.DEFAULT,
|
||||
None: InterfaceAddrGenMode.DEFAULT,
|
||||
}
|
||||
|
||||
if addr_gen_mode not in mapping:
|
||||
_LOGGER.warning(
|
||||
"Unknown addr_gen_mode value from NetworkManager: %s", addr_gen_mode
|
||||
)
|
||||
|
||||
return mapping.get(addr_gen_mode, InterfaceAddrGenMode.DEFAULT)
|
||||
|
||||
@staticmethod
|
||||
def _map_nm_ip6_privacy(ip6_privacy: int) -> InterfaceIp6Privacy:
|
||||
"""Map IPv6 interface ip6_privacy."""
|
||||
def _map_nm_ip6_privacy(ip6_privacy: int | None) -> InterfaceIp6Privacy:
|
||||
"""Map IPv6 interface ip6_privacy.
|
||||
|
||||
NetworkManager omits the ip6_privacy property when set to DEFAULT, so we
|
||||
treat None as DEFAULT here.
|
||||
"""
|
||||
mapping = {
|
||||
NMInterfaceIp6Privacy.DISABLED.value: InterfaceIp6Privacy.DISABLED,
|
||||
NMInterfaceIp6Privacy.ENABLED_PREFER_PUBLIC.value: InterfaceIp6Privacy.ENABLED_PREFER_PUBLIC,
|
||||
NMInterfaceIp6Privacy.ENABLED.value: InterfaceIp6Privacy.ENABLED,
|
||||
NMInterfaceIp6Privacy.DEFAULT.value: InterfaceIp6Privacy.DEFAULT,
|
||||
None: InterfaceIp6Privacy.DEFAULT,
|
||||
}
|
||||
|
||||
if ip6_privacy not in mapping:
|
||||
_LOGGER.warning(
|
||||
"Unknown ip6_privacy value from NetworkManager: %s", ip6_privacy
|
||||
)
|
||||
|
||||
return mapping.get(ip6_privacy, InterfaceIp6Privacy.DEFAULT)
|
||||
|
||||
@staticmethod
|
||||
@@ -295,8 +317,8 @@ class Interface:
|
||||
return False
|
||||
|
||||
return connection.state in (
|
||||
ConnectionStateType.ACTIVATED,
|
||||
ConnectionStateType.ACTIVATING,
|
||||
ConnectionState.ACTIVATED,
|
||||
ConnectionState.ACTIVATING,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -16,7 +16,7 @@ from ..dbus.const import (
|
||||
DBUS_IFACE_DNS,
|
||||
DBUS_IFACE_NM,
|
||||
DBUS_SIGNAL_NM_CONNECTION_ACTIVE_CHANGED,
|
||||
ConnectionStateType,
|
||||
ConnectionState,
|
||||
ConnectivityState,
|
||||
DeviceType,
|
||||
WirelessMethodType,
|
||||
@@ -338,16 +338,16 @@ class NetworkManager(CoreSysAttributes):
|
||||
# the state change before this point. Get the state currently to
|
||||
# avoid any race condition.
|
||||
await con.update()
|
||||
state: ConnectionStateType = con.state
|
||||
state: ConnectionState = con.state
|
||||
|
||||
while state != ConnectionStateType.ACTIVATED:
|
||||
if state == ConnectionStateType.DEACTIVATED:
|
||||
while state != ConnectionState.ACTIVATED:
|
||||
if state == ConnectionState.DEACTIVATED:
|
||||
raise HostNetworkError(
|
||||
"Activating connection failed, check connection settings."
|
||||
)
|
||||
|
||||
msg = await signal.wait_for_signal()
|
||||
state = msg[0]
|
||||
state = ConnectionState(msg[0])
|
||||
_LOGGER.debug("Active connection state changed to %s", state)
|
||||
|
||||
# update_only means not done by user so don't force a check afterwards
|
||||
|
||||
@@ -102,13 +102,17 @@ class SupervisorJobError:
|
||||
"Unknown error, see Supervisor logs (check with 'ha supervisor logs')"
|
||||
)
|
||||
stage: str | None = None
|
||||
error_key: str | None = None
|
||||
extra_fields: dict[str, Any] | None = None
|
||||
|
||||
def as_dict(self) -> dict[str, str | None]:
|
||||
def as_dict(self) -> dict[str, Any]:
|
||||
"""Return dictionary representation."""
|
||||
return {
|
||||
"type": self.type_.__name__,
|
||||
"message": self.message,
|
||||
"stage": self.stage,
|
||||
"error_key": self.error_key,
|
||||
"extra_fields": self.extra_fields,
|
||||
}
|
||||
|
||||
|
||||
@@ -158,7 +162,9 @@ class SupervisorJob:
|
||||
def capture_error(self, err: HassioError | None = None) -> None:
|
||||
"""Capture an error or record that an unknown error has occurred."""
|
||||
if err:
|
||||
new_error = SupervisorJobError(type(err), str(err), self.stage)
|
||||
new_error = SupervisorJobError(
|
||||
type(err), str(err), self.stage, err.error_key, err.extra_fields
|
||||
)
|
||||
else:
|
||||
new_error = SupervisorJobError(stage=self.stage)
|
||||
self.errors += [new_error]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from ...const import CoreState
|
||||
from ...coresys import CoreSys
|
||||
from ...dbus.const import ConnectionStateFlags, ConnectionStateType
|
||||
from ...dbus.const import ConnectionState, ConnectionStateFlags
|
||||
from ...dbus.network.interface import NetworkInterface
|
||||
from ...exceptions import NetworkInterfaceNotFound
|
||||
from ..const import ContextType, IssueType
|
||||
@@ -47,7 +47,7 @@ class CheckNetworkInterfaceIPV4(CheckBase):
|
||||
|
||||
return not (
|
||||
interface.connection.state
|
||||
in [ConnectionStateType.ACTIVATED, ConnectionStateType.ACTIVATING]
|
||||
in [ConnectionState.ACTIVATED, ConnectionState.ACTIVATING]
|
||||
and ConnectionStateFlags.IP4_READY in interface.connection.state_flags
|
||||
)
|
||||
|
||||
|
||||
@@ -74,7 +74,9 @@ class EvaluateContainer(EvaluateBase):
|
||||
self._images.clear()
|
||||
|
||||
try:
|
||||
containers = await self.sys_run_in_executor(self.sys_docker.containers.list)
|
||||
containers = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers_legacy.list
|
||||
)
|
||||
except (DockerException, RequestException) as err:
|
||||
_LOGGER.error("Corrupt docker overlayfs detect: %s", err)
|
||||
self.sys_resolution.create_issue(
|
||||
|
||||
@@ -183,19 +183,22 @@ class GitRepo(CoreSysAttributes):
|
||||
raise StoreGitError() from err
|
||||
|
||||
try:
|
||||
branch = self.repo.active_branch.name
|
||||
repo = self.repo
|
||||
|
||||
# Download data
|
||||
await self.sys_run_in_executor(
|
||||
ft.partial(
|
||||
self.repo.remotes.origin.fetch,
|
||||
**{"update-shallow": True, "depth": 1}, # type: ignore
|
||||
def _fetch_and_check() -> tuple[str, bool]:
|
||||
"""Fetch from origin and check if changed."""
|
||||
# This property access is I/O bound
|
||||
branch = repo.active_branch.name
|
||||
repo.remotes.origin.fetch(
|
||||
**{"update-shallow": True, "depth": 1} # type: ignore[arg-type]
|
||||
)
|
||||
)
|
||||
changed = repo.commit(branch) != repo.commit(f"origin/{branch}")
|
||||
return branch, changed
|
||||
|
||||
if changed := self.repo.commit(branch) != self.repo.commit(
|
||||
f"origin/{branch}"
|
||||
):
|
||||
# Download data and check for changes
|
||||
branch, changed = await self.sys_run_in_executor(_fetch_and_check)
|
||||
|
||||
if changed:
|
||||
# Jump on top of that
|
||||
await self.sys_run_in_executor(
|
||||
ft.partial(self.repo.git.reset, f"origin/{branch}", hard=True)
|
||||
@@ -224,6 +227,7 @@ class GitRepo(CoreSysAttributes):
|
||||
git.CommandError,
|
||||
ValueError,
|
||||
AssertionError,
|
||||
AttributeError,
|
||||
UnicodeDecodeError,
|
||||
) as err:
|
||||
_LOGGER.error("Can't update %s repo: %s.", self.url, err)
|
||||
|
||||
@@ -28,8 +28,8 @@ from .exceptions import (
|
||||
DockerError,
|
||||
HostAppArmorError,
|
||||
SupervisorAppArmorError,
|
||||
SupervisorError,
|
||||
SupervisorJobError,
|
||||
SupervisorUnknownError,
|
||||
SupervisorUpdateError,
|
||||
)
|
||||
from .jobs.const import JobCondition, JobThrottle
|
||||
@@ -261,7 +261,7 @@ class Supervisor(CoreSysAttributes):
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerError as err:
|
||||
raise SupervisorError() from err
|
||||
raise SupervisorUnknownError() from err
|
||||
|
||||
async def repair(self):
|
||||
"""Repair local Supervisor data."""
|
||||
|
||||
@@ -5,6 +5,7 @@ from datetime import timedelta
|
||||
import errno
|
||||
from http import HTTPStatus
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import MagicMock, PropertyMock, call, patch
|
||||
|
||||
import aiodocker
|
||||
@@ -23,7 +24,13 @@ from supervisor.docker.addon import DockerAddon
|
||||
from supervisor.docker.const import ContainerState
|
||||
from supervisor.docker.manager import CommandReturn, DockerAPI
|
||||
from supervisor.docker.monitor import DockerContainerStateEvent
|
||||
from supervisor.exceptions import AddonsError, AddonsJobError, AudioUpdateError
|
||||
from supervisor.exceptions import (
|
||||
AddonPrePostBackupCommandReturnedError,
|
||||
AddonsJobError,
|
||||
AddonUnknownError,
|
||||
AudioUpdateError,
|
||||
HassioError,
|
||||
)
|
||||
from supervisor.hardware.helper import HwHelper
|
||||
from supervisor.ingress import Ingress
|
||||
from supervisor.store.repository import Repository
|
||||
@@ -220,7 +227,7 @@ async def test_listener_attached_on_install(
|
||||
container_collection.get.side_effect = DockerException()
|
||||
with (
|
||||
patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers",
|
||||
"supervisor.docker.manager.DockerAPI.containers_legacy",
|
||||
new=PropertyMock(return_value=container_collection),
|
||||
),
|
||||
patch("pathlib.Path.is_dir", return_value=True),
|
||||
@@ -502,31 +509,26 @@ async def test_backup_with_pre_post_command(
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"get_error,exception_on_exec",
|
||||
("container_get_side_effect", "exec_run_side_effect", "exc_type_raised"),
|
||||
[
|
||||
(NotFound("missing"), False),
|
||||
(DockerException(), False),
|
||||
(None, True),
|
||||
(None, False),
|
||||
(NotFound("missing"), [(1, None)], AddonUnknownError),
|
||||
(DockerException(), [(1, None)], AddonUnknownError),
|
||||
(None, DockerException(), AddonUnknownError),
|
||||
(None, [(1, None)], AddonPrePostBackupCommandReturnedError),
|
||||
],
|
||||
)
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
|
||||
async def test_backup_with_pre_command_error(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
get_error: DockerException | None,
|
||||
exception_on_exec: bool,
|
||||
tmp_supervisor_data,
|
||||
path_extern,
|
||||
container_get_side_effect: DockerException | None,
|
||||
exec_run_side_effect: DockerException | list[tuple[int, Any]],
|
||||
exc_type_raised: type[HassioError],
|
||||
) -> None:
|
||||
"""Test backing up an addon with error running pre command."""
|
||||
if get_error:
|
||||
coresys.docker.containers.get.side_effect = get_error
|
||||
|
||||
if exception_on_exec:
|
||||
container.exec_run.side_effect = DockerException()
|
||||
else:
|
||||
container.exec_run.return_value = (1, None)
|
||||
coresys.docker.containers_legacy.get.side_effect = container_get_side_effect
|
||||
container.exec_run.side_effect = exec_run_side_effect
|
||||
|
||||
install_addon_ssh.path_data.mkdir()
|
||||
await install_addon_ssh.load()
|
||||
@@ -535,7 +537,7 @@ async def test_backup_with_pre_command_error(
|
||||
with (
|
||||
patch.object(DockerAddon, "is_running", return_value=True),
|
||||
patch.object(Addon, "backup_pre", new=PropertyMock(return_value="backup_pre")),
|
||||
pytest.raises(AddonsError),
|
||||
pytest.raises(exc_type_raised),
|
||||
):
|
||||
assert await install_addon_ssh.backup(tarfile) is None
|
||||
|
||||
@@ -947,7 +949,7 @@ async def test_addon_load_succeeds_with_docker_errors(
|
||||
)
|
||||
caplog.clear()
|
||||
await install_addon_ssh.load()
|
||||
assert "Invalid build environment" in caplog.text
|
||||
assert "Cannot build addon 'local_ssh' because dockerfile is missing" in caplog.text
|
||||
|
||||
# Image build failure
|
||||
caplog.clear()
|
||||
|
||||
@@ -6,11 +6,13 @@ from pathlib import Path
|
||||
from unittest.mock import PropertyMock, patch
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
import pytest
|
||||
|
||||
from supervisor.addons.addon import Addon
|
||||
from supervisor.addons.build import AddonBuild
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import DOCKER_HUB
|
||||
from supervisor.exceptions import AddonBuildDockerfileMissingError
|
||||
|
||||
from tests.common import is_in_list
|
||||
|
||||
@@ -106,11 +108,11 @@ async def test_build_valid(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
type(coresys.arch), "default", new=PropertyMock(return_value="aarch64")
|
||||
),
|
||||
):
|
||||
assert await build.is_valid()
|
||||
assert (await build.is_valid()) is None
|
||||
|
||||
|
||||
async def test_build_invalid(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test platform set in docker args."""
|
||||
"""Test build not supported because Dockerfile missing for specified architecture."""
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
with (
|
||||
patch.object(
|
||||
@@ -119,8 +121,9 @@ async def test_build_invalid(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
patch.object(
|
||||
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
|
||||
),
|
||||
pytest.raises(AddonBuildDockerfileMissingError),
|
||||
):
|
||||
assert not await build.is_valid()
|
||||
await build.is_valid()
|
||||
|
||||
|
||||
async def test_docker_config_no_registries(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
|
||||
@@ -10,7 +10,7 @@ from awesomeversion import AwesomeVersion
|
||||
import pytest
|
||||
|
||||
from supervisor.addons.addon import Addon
|
||||
from supervisor.arch import CpuArch
|
||||
from supervisor.arch import CpuArchManager
|
||||
from supervisor.config import CoreConfig
|
||||
from supervisor.const import AddonBoot, AddonStartup, AddonState, BusEvent
|
||||
from supervisor.coresys import CoreSys
|
||||
@@ -54,7 +54,9 @@ async def fixture_mock_arch_disk() -> AsyncGenerator[None]:
|
||||
"""Mock supported arch and disk space."""
|
||||
with (
|
||||
patch("shutil.disk_usage", return_value=(42, 42, 2 * (1024.0**3))),
|
||||
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||
patch.object(
|
||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
||||
),
|
||||
):
|
||||
yield
|
||||
|
||||
|
||||
@@ -5,11 +5,12 @@ from unittest.mock import MagicMock, PropertyMock, patch
|
||||
|
||||
from aiohttp import ClientResponse
|
||||
from aiohttp.test_utils import TestClient
|
||||
from docker.errors import DockerException
|
||||
import pytest
|
||||
|
||||
from supervisor.addons.addon import Addon
|
||||
from supervisor.addons.build import AddonBuild
|
||||
from supervisor.arch import CpuArch
|
||||
from supervisor.arch import CpuArchManager
|
||||
from supervisor.const import AddonState
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.addon import DockerAddon
|
||||
@@ -236,7 +237,9 @@ async def test_api_addon_rebuild_healthcheck(
|
||||
patch.object(AddonBuild, "is_valid", return_value=True),
|
||||
patch.object(DockerAddon, "is_running", return_value=False),
|
||||
patch.object(Addon, "need_build", new=PropertyMock(return_value=True)),
|
||||
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||
patch.object(
|
||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
||||
),
|
||||
patch.object(DockerAddon, "run", new=container_events_task),
|
||||
patch.object(
|
||||
coresys.docker,
|
||||
@@ -308,7 +311,9 @@ async def test_api_addon_rebuild_force(
|
||||
patch.object(
|
||||
Addon, "need_build", new=PropertyMock(return_value=False)
|
||||
), # Image-based
|
||||
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||
patch.object(
|
||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
||||
),
|
||||
):
|
||||
resp = await api_client.post("/addons/local_ssh/rebuild")
|
||||
|
||||
@@ -326,7 +331,9 @@ async def test_api_addon_rebuild_force(
|
||||
patch.object(
|
||||
Addon, "need_build", new=PropertyMock(return_value=False)
|
||||
), # Image-based
|
||||
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||
patch.object(
|
||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
||||
),
|
||||
patch.object(DockerAddon, "run", new=container_events_task),
|
||||
patch.object(
|
||||
coresys.docker,
|
||||
@@ -471,6 +478,11 @@ async def test_addon_options_boot_mode_manual_only_invalid(
|
||||
body["message"]
|
||||
== "Addon local_example boot option is set to manual_only so it cannot be changed"
|
||||
)
|
||||
assert body["error_key"] == "addon_boot_config_cannot_change_error"
|
||||
assert body["extra_fields"] == {
|
||||
"addon": "local_example",
|
||||
"boot_config": "manual_only",
|
||||
}
|
||||
|
||||
|
||||
async def get_message(resp: ClientResponse, json_expected: bool) -> str:
|
||||
@@ -539,3 +551,154 @@ async def test_addon_not_installed(
|
||||
resp = await api_client.request(method, url)
|
||||
assert resp.status == 400
|
||||
assert await get_message(resp, json_expected) == "Addon is not installed"
|
||||
|
||||
|
||||
async def test_addon_set_options(api_client: TestClient, install_addon_example: Addon):
|
||||
"""Test setting options for an addon."""
|
||||
resp = await api_client.post(
|
||||
"/addons/local_example/options", json={"options": {"message": "test"}}
|
||||
)
|
||||
assert resp.status == 200
|
||||
assert install_addon_example.options == {"message": "test"}
|
||||
|
||||
|
||||
async def test_addon_reset_options(
|
||||
api_client: TestClient, install_addon_example: Addon
|
||||
):
|
||||
"""Test resetting options for an addon to defaults.
|
||||
|
||||
Fixes SUPERVISOR-171F.
|
||||
"""
|
||||
# First set some custom options
|
||||
install_addon_example.options = {"message": "custom"}
|
||||
assert install_addon_example.persist["options"] == {"message": "custom"}
|
||||
|
||||
# Reset to defaults by sending null
|
||||
resp = await api_client.post(
|
||||
"/addons/local_example/options", json={"options": None}
|
||||
)
|
||||
assert resp.status == 200
|
||||
|
||||
# Persisted options should be empty (meaning defaults will be used)
|
||||
assert install_addon_example.persist["options"] == {}
|
||||
|
||||
|
||||
async def test_addon_set_options_error(
|
||||
api_client: TestClient, install_addon_example: Addon
|
||||
):
|
||||
"""Test setting options for an addon."""
|
||||
resp = await api_client.post(
|
||||
"/addons/local_example/options", json={"options": {"message": True}}
|
||||
)
|
||||
assert resp.status == 400
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "Add-on local_example has invalid options: not a valid value. Got {'message': True}"
|
||||
)
|
||||
assert body["error_key"] == "addon_configuration_invalid_error"
|
||||
assert body["extra_fields"] == {
|
||||
"addon": "local_example",
|
||||
"validation_error": "not a valid value. Got {'message': True}",
|
||||
}
|
||||
|
||||
|
||||
async def test_addon_start_options_error(
|
||||
api_client: TestClient,
|
||||
install_addon_example: Addon,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
):
|
||||
"""Test error writing options when trying to start addon."""
|
||||
install_addon_example.options = {"message": "hello"}
|
||||
|
||||
# Simulate OS error trying to write the file
|
||||
with patch("supervisor.utils.json.atomic_write", side_effect=OSError("fail")):
|
||||
resp = await api_client.post("/addons/local_example/start")
|
||||
assert resp.status == 500
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "An unknown error occurred with addon local_example. Check supervisor logs for details (check with 'ha supervisor logs')"
|
||||
)
|
||||
assert body["error_key"] == "addon_unknown_error"
|
||||
assert body["extra_fields"] == {
|
||||
"addon": "local_example",
|
||||
"logs_command": "ha supervisor logs",
|
||||
}
|
||||
assert "Add-on local_example can't write options" in caplog.text
|
||||
|
||||
# Simulate an update with a breaking change for options schema creating failure on start
|
||||
caplog.clear()
|
||||
install_addon_example.data["schema"] = {"message": "bool"}
|
||||
resp = await api_client.post("/addons/local_example/start")
|
||||
assert resp.status == 400
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "Add-on local_example has invalid options: expected boolean. Got {'message': 'hello'}"
|
||||
)
|
||||
assert body["error_key"] == "addon_configuration_invalid_error"
|
||||
assert body["extra_fields"] == {
|
||||
"addon": "local_example",
|
||||
"validation_error": "expected boolean. Got {'message': 'hello'}",
|
||||
}
|
||||
assert (
|
||||
"Add-on local_example has invalid options: expected boolean. Got {'message': 'hello'}"
|
||||
in caplog.text
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("method", "action"), [("get", "stats"), ("post", "stdin")])
|
||||
@pytest.mark.usefixtures("install_addon_example")
|
||||
async def test_addon_not_running_error(
|
||||
api_client: TestClient, method: str, action: str
|
||||
):
|
||||
"""Test addon not running error for endpoints that require that."""
|
||||
with patch.object(Addon, "with_stdin", new=PropertyMock(return_value=True)):
|
||||
resp = await api_client.request(method, f"/addons/local_example/{action}")
|
||||
|
||||
assert resp.status == 400
|
||||
body = await resp.json()
|
||||
assert body["message"] == "Add-on local_example is not running"
|
||||
assert body["error_key"] == "addon_not_running_error"
|
||||
assert body["extra_fields"] == {"addon": "local_example"}
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("install_addon_example")
|
||||
async def test_addon_write_stdin_not_supported_error(api_client: TestClient):
|
||||
"""Test error when trying to write stdin to addon that does not support it."""
|
||||
resp = await api_client.post("/addons/local_example/stdin")
|
||||
assert resp.status == 400
|
||||
body = await resp.json()
|
||||
assert body["message"] == "Add-on local_example does not support writing to stdin"
|
||||
assert body["error_key"] == "addon_not_supported_write_stdin_error"
|
||||
assert body["extra_fields"] == {"addon": "local_example"}
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("install_addon_ssh")
|
||||
async def test_addon_rebuild_fails_error(api_client: TestClient, coresys: CoreSys):
|
||||
"""Test error when build fails during rebuild for addon."""
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
coresys.docker.containers_legacy.run.side_effect = DockerException("fail")
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
CpuArchManager, "supported", new=PropertyMock(return_value=["aarch64"])
|
||||
),
|
||||
patch.object(
|
||||
CpuArchManager, "default", new=PropertyMock(return_value="aarch64")
|
||||
),
|
||||
patch.object(AddonBuild, "get_docker_args", return_value={}),
|
||||
):
|
||||
resp = await api_client.post("/addons/local_ssh/rebuild")
|
||||
assert resp.status == 500
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "An unknown error occurred while trying to build the image for addon local_ssh. Check supervisor logs for details (check with 'ha supervisor logs')"
|
||||
)
|
||||
assert body["error_key"] == "addon_build_failed_unknown_error"
|
||||
assert body["extra_fields"] == {
|
||||
"addon": "local_ssh",
|
||||
"logs_command": "ha supervisor logs",
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Test auth API."""
|
||||
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from typing import Any
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
from aiohttp.hdrs import WWW_AUTHENTICATE
|
||||
@@ -9,6 +10,8 @@ import pytest
|
||||
|
||||
from supervisor.addons.addon import Addon
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.exceptions import HomeAssistantAPIError, HomeAssistantWSError
|
||||
from supervisor.homeassistant.api import HomeAssistantAPI
|
||||
|
||||
from tests.common import MockResponse
|
||||
from tests.const import TEST_ADDON_SLUG
|
||||
@@ -100,6 +103,52 @@ async def test_password_reset(
|
||||
assert "Successful password reset for 'john'" in caplog.text
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("post_mock", "expected_log"),
|
||||
[
|
||||
(
|
||||
MagicMock(return_value=MockResponse(status=400)),
|
||||
"The user 'john' is not registered",
|
||||
),
|
||||
(
|
||||
MagicMock(side_effect=HomeAssistantAPIError("fail")),
|
||||
"Can't request password reset on Home Assistant: fail",
|
||||
),
|
||||
],
|
||||
)
|
||||
async def test_failed_password_reset(
|
||||
api_client: TestClient,
|
||||
coresys: CoreSys,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
websession: MagicMock,
|
||||
post_mock: MagicMock,
|
||||
expected_log: str,
|
||||
):
|
||||
"""Test failed password reset."""
|
||||
coresys.homeassistant.api.access_token = "abc123"
|
||||
# pylint: disable-next=protected-access
|
||||
coresys.homeassistant.api._access_token_expires = datetime.now(tz=UTC) + timedelta(
|
||||
days=1
|
||||
)
|
||||
|
||||
websession.post = post_mock
|
||||
resp = await api_client.post(
|
||||
"/auth/reset", json={"username": "john", "password": "doe"}
|
||||
)
|
||||
assert resp.status == 400
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "Username 'john' does not exist. Check list of users using 'ha auth list'."
|
||||
)
|
||||
assert body["error_key"] == "auth_password_reset_error"
|
||||
assert body["extra_fields"] == {
|
||||
"user": "john",
|
||||
"auth_list_command": "ha auth list",
|
||||
}
|
||||
assert expected_log in caplog.text
|
||||
|
||||
|
||||
async def test_list_users(
|
||||
api_client: TestClient, coresys: CoreSys, ha_ws_client: AsyncMock
|
||||
):
|
||||
@@ -120,6 +169,48 @@ async def test_list_users(
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("send_command_mock", "error_response", "expected_log"),
|
||||
[
|
||||
(
|
||||
AsyncMock(return_value=None),
|
||||
{
|
||||
"result": "error",
|
||||
"message": "Home Assistant returned invalid response of `None` instead of a list of users. Check Home Assistant logs for details (check with `ha core logs`)",
|
||||
"error_key": "auth_list_users_none_response_error",
|
||||
"extra_fields": {"none": "None", "logs_command": "ha core logs"},
|
||||
},
|
||||
"Home Assistant returned invalid response of `None` instead of a list of users. Check Home Assistant logs for details (check with `ha core logs`)",
|
||||
),
|
||||
(
|
||||
AsyncMock(side_effect=HomeAssistantWSError("fail")),
|
||||
{
|
||||
"result": "error",
|
||||
"message": "Can't request listing users on Home Assistant. Check supervisor logs for details (check with 'ha supervisor logs')",
|
||||
"error_key": "auth_list_users_error",
|
||||
"extra_fields": {"logs_command": "ha supervisor logs"},
|
||||
},
|
||||
"Can't request listing users on Home Assistant: fail",
|
||||
),
|
||||
],
|
||||
)
|
||||
async def test_list_users_failure(
|
||||
api_client: TestClient,
|
||||
ha_ws_client: AsyncMock,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
send_command_mock: AsyncMock,
|
||||
error_response: dict[str, Any],
|
||||
expected_log: str,
|
||||
):
|
||||
"""Test failure listing users via API."""
|
||||
ha_ws_client.async_send_command = send_command_mock
|
||||
resp = await api_client.get("/auth/list")
|
||||
assert resp.status == 500
|
||||
result = await resp.json()
|
||||
assert result == error_response
|
||||
assert expected_log in caplog.text
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("field", "api_client"),
|
||||
[("username", TEST_ADDON_SLUG), ("user", TEST_ADDON_SLUG)],
|
||||
@@ -156,6 +247,13 @@ async def test_auth_json_failure_none(
|
||||
mock_check_login.return_value = True
|
||||
resp = await api_client.post("/auth", json={"username": user, "password": password})
|
||||
assert resp.status == 401
|
||||
assert (
|
||||
resp.headers["WWW-Authenticate"]
|
||||
== 'Basic realm="Home Assistant Authentication"'
|
||||
)
|
||||
body = await resp.json()
|
||||
assert body["message"] == "Username and password must be strings"
|
||||
assert body["error_key"] == "auth_invalid_non_string_value_error"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("api_client", [TEST_ADDON_SLUG], indirect=True)
|
||||
@@ -267,3 +365,26 @@ async def test_non_addon_token_no_auth_access(api_client: TestClient):
|
||||
"""Test auth where add-on is not allowed to access auth API."""
|
||||
resp = await api_client.post("/auth", json={"username": "test", "password": "pass"})
|
||||
assert resp.status == 403
|
||||
|
||||
|
||||
@pytest.mark.parametrize("api_client", [TEST_ADDON_SLUG], indirect=True)
|
||||
@pytest.mark.usefixtures("install_addon_ssh")
|
||||
async def test_auth_backend_login_failure(api_client: TestClient):
|
||||
"""Test backend login failure on auth."""
|
||||
with (
|
||||
patch.object(HomeAssistantAPI, "check_api_state", return_value=True),
|
||||
patch.object(
|
||||
HomeAssistantAPI, "make_request", side_effect=HomeAssistantAPIError("fail")
|
||||
),
|
||||
):
|
||||
resp = await api_client.post(
|
||||
"/auth", json={"username": "test", "password": "pass"}
|
||||
)
|
||||
assert resp.status == 500
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "Unable to validate authentication details with Home Assistant. Check supervisor logs for details (check with 'ha supervisor logs')"
|
||||
)
|
||||
assert body["error_key"] == "auth_home_assistant_api_validation_error"
|
||||
assert body["extra_fields"] == {"logs_command": "ha supervisor logs"}
|
||||
|
||||
@@ -17,6 +17,7 @@ from supervisor.const import CoreState
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
from supervisor.exceptions import (
|
||||
AddonPrePostBackupCommandReturnedError,
|
||||
AddonsError,
|
||||
BackupInvalidError,
|
||||
HomeAssistantBackupError,
|
||||
@@ -24,6 +25,7 @@ from supervisor.exceptions import (
|
||||
from supervisor.homeassistant.core import HomeAssistantCore
|
||||
from supervisor.homeassistant.module import HomeAssistant
|
||||
from supervisor.homeassistant.websocket import HomeAssistantWebSocket
|
||||
from supervisor.jobs import SupervisorJob
|
||||
from supervisor.mounts.mount import Mount
|
||||
from supervisor.supervisor import Supervisor
|
||||
|
||||
@@ -401,6 +403,8 @@ async def test_api_backup_errors(
|
||||
"type": "BackupError",
|
||||
"message": str(err),
|
||||
"stage": None,
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
]
|
||||
assert job["child_jobs"][2]["name"] == "backup_store_folders"
|
||||
@@ -437,6 +441,8 @@ async def test_api_backup_errors(
|
||||
"type": "HomeAssistantBackupError",
|
||||
"message": "Backup error",
|
||||
"stage": "home_assistant",
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
]
|
||||
assert job["child_jobs"][0]["name"] == "backup_store_homeassistant"
|
||||
@@ -445,6 +451,8 @@ async def test_api_backup_errors(
|
||||
"type": "HomeAssistantBackupError",
|
||||
"message": "Backup error",
|
||||
"stage": None,
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
]
|
||||
assert len(job["child_jobs"]) == 1
|
||||
@@ -749,6 +757,8 @@ async def test_backup_to_multiple_locations_error_on_copy(
|
||||
"type": "BackupError",
|
||||
"message": "Could not copy backup to .cloud_backup due to: ",
|
||||
"stage": None,
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1191,10 +1201,8 @@ async def test_restore_homeassistant_adds_env(
|
||||
|
||||
assert docker.containers.create.call_args.kwargs["name"] == "homeassistant"
|
||||
assert (
|
||||
docker.containers.create.call_args.kwargs["environment"][
|
||||
"SUPERVISOR_RESTORE_JOB_ID"
|
||||
]
|
||||
== job.uuid
|
||||
f"SUPERVISOR_RESTORE_JOB_ID={job.uuid}"
|
||||
in docker.containers.create.call_args.args[0]["Env"]
|
||||
)
|
||||
|
||||
|
||||
@@ -1483,3 +1491,44 @@ async def test_immediate_list_after_missing_file_restore(
|
||||
result = await resp.json()
|
||||
assert len(result["data"]["backups"]) == 1
|
||||
assert result["data"]["backups"][0]["slug"] == "93b462f8"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("command", ["backup_pre", "backup_post"])
|
||||
@pytest.mark.usefixtures("install_addon_example", "tmp_supervisor_data")
|
||||
async def test_pre_post_backup_command_error(
|
||||
api_client: TestClient, coresys: CoreSys, container: MagicMock, command: str
|
||||
):
|
||||
"""Test pre/post backup command error."""
|
||||
await coresys.core.set_state(CoreState.RUNNING)
|
||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||
|
||||
container.status = "running"
|
||||
container.exec_run.return_value = (1, b"")
|
||||
with patch.object(Addon, command, new=PropertyMock(return_value="test")):
|
||||
resp = await api_client.post(
|
||||
"/backups/new/partial", json={"addons": ["local_example"]}
|
||||
)
|
||||
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
job_id = body["data"]["job_id"]
|
||||
job: SupervisorJob | None = None
|
||||
for j in coresys.jobs.jobs:
|
||||
if j.name == "backup_store_addons" and j.parent_id == job_id:
|
||||
job = j
|
||||
break
|
||||
|
||||
assert job
|
||||
assert job.done is True
|
||||
assert job.errors[0].type_ == AddonPrePostBackupCommandReturnedError
|
||||
assert job.errors[0].message == (
|
||||
"Pre-/Post backup command for add-on local_example returned error code: "
|
||||
"1. Please report this to the addon developer. Enable debug "
|
||||
"logging to capture complete command output using ha supervisor options --logging debug"
|
||||
)
|
||||
assert job.errors[0].error_key == "addon_pre_post_backup_command_returned_error"
|
||||
assert job.errors[0].extra_fields == {
|
||||
"addon": "local_example",
|
||||
"exit_code": 1,
|
||||
"debug_logging_command": "ha supervisor options --logging debug",
|
||||
}
|
||||
|
||||
@@ -118,15 +118,6 @@ async def test_api_migrate_docker_storage_driver(
|
||||
in coresys.resolution.suggestions
|
||||
)
|
||||
|
||||
# Test migration back to overlay2 (graph driver)
|
||||
system_service.MigrateDockerStorageDriver.calls.clear()
|
||||
resp = await api_client.post(
|
||||
"/docker/migrate-storage-driver",
|
||||
json={"storage_driver": "overlay2"},
|
||||
)
|
||||
assert resp.status == 200
|
||||
assert system_service.MigrateDockerStorageDriver.calls == [("overlay2",)]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
|
||||
async def test_api_migrate_docker_storage_driver_invalid_backend(
|
||||
|
||||
@@ -35,9 +35,9 @@ async def test_api_core_logs(
|
||||
|
||||
async def test_api_stats(api_client: TestClient, coresys: CoreSys):
|
||||
"""Test stats."""
|
||||
coresys.docker.containers.get.return_value.status = "running"
|
||||
coresys.docker.containers.get.return_value.stats.return_value = load_json_fixture(
|
||||
"container_stats.json"
|
||||
coresys.docker.containers_legacy.get.return_value.status = "running"
|
||||
coresys.docker.containers_legacy.get.return_value.stats.return_value = (
|
||||
load_json_fixture("container_stats.json")
|
||||
)
|
||||
|
||||
resp = await api_client.get("/homeassistant/stats")
|
||||
@@ -138,14 +138,14 @@ async def test_api_rebuild(
|
||||
await api_client.post("/homeassistant/rebuild")
|
||||
|
||||
assert container.remove.call_count == 2
|
||||
container.start.assert_called_once()
|
||||
coresys.docker.containers.create.return_value.start.assert_called_once()
|
||||
assert not safe_mode_marker.exists()
|
||||
|
||||
with patch.object(HomeAssistantCore, "_block_till_run"):
|
||||
await api_client.post("/homeassistant/rebuild", json={"safe_mode": True})
|
||||
|
||||
assert container.remove.call_count == 4
|
||||
assert container.start.call_count == 2
|
||||
assert coresys.docker.containers.create.return_value.start.call_count == 2
|
||||
assert safe_mode_marker.exists()
|
||||
|
||||
|
||||
|
||||
@@ -374,6 +374,8 @@ async def test_job_with_error(
|
||||
"type": "SupervisorError",
|
||||
"message": "bad",
|
||||
"stage": "test",
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
],
|
||||
"child_jobs": [
|
||||
@@ -391,6 +393,8 @@ async def test_job_with_error(
|
||||
"type": "SupervisorError",
|
||||
"message": "bad",
|
||||
"stage": None,
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
],
|
||||
"child_jobs": [],
|
||||
|
||||
@@ -4,13 +4,12 @@ import asyncio
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
|
||||
|
||||
from aiohttp import ClientResponse
|
||||
from aiohttp.test_utils import TestClient
|
||||
from awesomeversion import AwesomeVersion
|
||||
import pytest
|
||||
|
||||
from supervisor.addons.addon import Addon
|
||||
from supervisor.arch import CpuArch
|
||||
from supervisor.arch import CpuArchManager
|
||||
from supervisor.backups.manager import BackupManager
|
||||
from supervisor.config import CoreConfig
|
||||
from supervisor.const import AddonState, CoreState
|
||||
@@ -191,7 +190,9 @@ async def test_api_store_update_healthcheck(
|
||||
patch.object(DockerAddon, "run", new=container_events_task),
|
||||
patch.object(DockerInterface, "install"),
|
||||
patch.object(DockerAddon, "is_running", return_value=False),
|
||||
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||
patch.object(
|
||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
||||
),
|
||||
):
|
||||
resp = await api_client.post(f"/store/addons/{TEST_ADDON_SLUG}/update")
|
||||
|
||||
@@ -290,14 +291,6 @@ async def test_api_detached_addon_documentation(
|
||||
assert result == "Addon local_ssh does not exist in the store"
|
||||
|
||||
|
||||
async def get_message(resp: ClientResponse, json_expected: bool) -> str:
|
||||
"""Get message from response based on response type."""
|
||||
if json_expected:
|
||||
body = await resp.json()
|
||||
return body["message"]
|
||||
return await resp.text()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("method", "url", "json_expected"),
|
||||
[
|
||||
@@ -323,7 +316,13 @@ async def test_store_addon_not_found(
|
||||
"""Test store addon not found error."""
|
||||
resp = await api_client.request(method, url)
|
||||
assert resp.status == 404
|
||||
assert await get_message(resp, json_expected) == "Addon bad does not exist"
|
||||
if json_expected:
|
||||
body = await resp.json()
|
||||
assert body["message"] == "Addon bad does not exist in the store"
|
||||
assert body["error_key"] == "store_addon_not_found_error"
|
||||
assert body["extra_fields"] == {"addon": "bad"}
|
||||
else:
|
||||
assert await resp.text() == "Addon bad does not exist in the store"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -548,7 +547,9 @@ async def test_api_store_addons_addon_availability_arch_not_supported(
|
||||
coresys.addons.data.user[addon_obj.slug] = {"version": AwesomeVersion("0.0.1")}
|
||||
|
||||
# Mock the system architecture to be different
|
||||
with patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])):
|
||||
with patch.object(
|
||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
||||
):
|
||||
resp = await api_client.request(
|
||||
api_method, f"/store/addons/{addon_obj.slug}/{api_action}"
|
||||
)
|
||||
|
||||
@@ -7,6 +7,7 @@ from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
|
||||
from aiohttp.test_utils import TestClient
|
||||
from awesomeversion import AwesomeVersion
|
||||
from blockbuster import BlockingError
|
||||
from docker.errors import DockerException
|
||||
import pytest
|
||||
|
||||
from supervisor.const import CoreState
|
||||
@@ -407,3 +408,37 @@ async def test_api_progress_updates_supervisor_update(
|
||||
"done": True,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
async def test_api_supervisor_stats(api_client: TestClient, coresys: CoreSys):
|
||||
"""Test supervisor stats."""
|
||||
coresys.docker.containers_legacy.get.return_value.status = "running"
|
||||
coresys.docker.containers_legacy.get.return_value.stats.return_value = (
|
||||
load_json_fixture("container_stats.json")
|
||||
)
|
||||
|
||||
resp = await api_client.get("/supervisor/stats")
|
||||
assert resp.status == 200
|
||||
result = await resp.json()
|
||||
assert result["data"]["cpu_percent"] == 90.0
|
||||
assert result["data"]["memory_usage"] == 59700000
|
||||
assert result["data"]["memory_limit"] == 4000000000
|
||||
assert result["data"]["memory_percent"] == 1.49
|
||||
|
||||
|
||||
async def test_supervisor_api_stats_failure(
|
||||
api_client: TestClient, coresys: CoreSys, caplog: pytest.LogCaptureFixture
|
||||
):
|
||||
"""Test supervisor stats failure."""
|
||||
coresys.docker.containers_legacy.get.side_effect = DockerException("fail")
|
||||
|
||||
resp = await api_client.get("/supervisor/stats")
|
||||
assert resp.status == 500
|
||||
body = await resp.json()
|
||||
assert (
|
||||
body["message"]
|
||||
== "An unknown error occurred with Supervisor. Check supervisor logs for details (check with 'ha supervisor logs')"
|
||||
)
|
||||
assert body["error_key"] == "supervisor_unknown_error"
|
||||
assert body["extra_fields"] == {"logs_command": "ha supervisor logs"}
|
||||
assert "Could not inspect container 'hassio_supervisor': fail" in caplog.text
|
||||
|
||||
@@ -9,6 +9,7 @@ import subprocess
|
||||
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
|
||||
from uuid import uuid4
|
||||
|
||||
from aiodocker.containers import DockerContainer, DockerContainers
|
||||
from aiodocker.docker import DockerImages
|
||||
from aiohttp import ClientSession, web
|
||||
from aiohttp.test_utils import TestClient
|
||||
@@ -120,11 +121,13 @@ async def docker() -> DockerAPI:
|
||||
"Id": "test123",
|
||||
"RepoTags": ["ghcr.io/home-assistant/amd64-hassio-supervisor:latest"],
|
||||
}
|
||||
container_inspect = image_inspect | {"State": {"ExitCode": 0}}
|
||||
|
||||
with (
|
||||
patch("supervisor.docker.manager.DockerClient", return_value=MagicMock()),
|
||||
patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers", return_value=MagicMock()
|
||||
"supervisor.docker.manager.DockerAPI.containers_legacy",
|
||||
return_value=MagicMock(),
|
||||
),
|
||||
patch("supervisor.docker.manager.DockerAPI.api", return_value=MagicMock()),
|
||||
patch("supervisor.docker.manager.DockerAPI.info", return_value=MagicMock()),
|
||||
@@ -136,6 +139,12 @@ async def docker() -> DockerAPI:
|
||||
return_value=(docker_images := MagicMock(spec=DockerImages))
|
||||
),
|
||||
),
|
||||
patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers",
|
||||
new=PropertyMock(
|
||||
return_value=(docker_containers := MagicMock(spec=DockerContainers))
|
||||
),
|
||||
),
|
||||
):
|
||||
docker_obj = await DockerAPI(MagicMock()).post_init()
|
||||
docker_obj.config._data = {"registries": {}}
|
||||
@@ -144,12 +153,18 @@ async def docker() -> DockerAPI:
|
||||
|
||||
docker_images.inspect.return_value = image_inspect
|
||||
docker_images.list.return_value = [image_inspect]
|
||||
docker_images.import_image.return_value = [
|
||||
{"stream": "Loaded image: test:latest\n"}
|
||||
]
|
||||
|
||||
docker_images.import_image = AsyncMock(
|
||||
return_value=[{"stream": "Loaded image: test:latest\n"}]
|
||||
)
|
||||
docker_images.pull.return_value = AsyncIterator([{}])
|
||||
|
||||
docker_containers.get.return_value = docker_container = MagicMock(
|
||||
spec=DockerContainer
|
||||
)
|
||||
docker_containers.list.return_value = [docker_container]
|
||||
docker_containers.create.return_value = docker_container
|
||||
docker_container.show.return_value = container_inspect
|
||||
|
||||
docker_obj.info.logging = "journald"
|
||||
docker_obj.info.storage = "overlay2"
|
||||
docker_obj.info.version = AwesomeVersion("1.0.0")
|
||||
@@ -790,7 +805,7 @@ async def docker_logs(docker: DockerAPI, supervisor_name) -> MagicMock:
|
||||
"""Mock log output for a container from docker."""
|
||||
container_mock = MagicMock()
|
||||
container_mock.logs.return_value = load_binary_fixture("logs_docker_container.txt")
|
||||
docker.containers.get.return_value = container_mock
|
||||
docker.containers_legacy.get.return_value = container_mock
|
||||
yield container_mock.logs
|
||||
|
||||
|
||||
@@ -824,7 +839,7 @@ async def os_available(request: pytest.FixtureRequest) -> None:
|
||||
@pytest.fixture
|
||||
async def mount_propagation(docker: DockerAPI, coresys: CoreSys) -> None:
|
||||
"""Mock supervisor connected to container with propagation set."""
|
||||
docker.containers.get.return_value = supervisor = MagicMock()
|
||||
docker.containers_legacy.get.return_value = supervisor = MagicMock()
|
||||
supervisor.attrs = {
|
||||
"Mounts": [
|
||||
{
|
||||
@@ -844,10 +859,11 @@ async def mount_propagation(docker: DockerAPI, coresys: CoreSys) -> None:
|
||||
@pytest.fixture
|
||||
async def container(docker: DockerAPI) -> MagicMock:
|
||||
"""Mock attrs and status for container on attach."""
|
||||
docker.containers.get.return_value = addon = MagicMock()
|
||||
docker.containers.create.return_value = addon
|
||||
addon.status = "stopped"
|
||||
addon.attrs = {"State": {"ExitCode": 0}}
|
||||
attrs = {"State": {"ExitCode": 0}}
|
||||
docker.containers_legacy.get.return_value = addon = MagicMock(
|
||||
status="stopped", attrs=attrs
|
||||
)
|
||||
docker.containers.create.return_value.show.return_value = attrs
|
||||
yield addon
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ from unittest.mock import Mock, PropertyMock, patch
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
import pytest
|
||||
|
||||
from supervisor.dbus.const import ConnectionStateType
|
||||
from supervisor.dbus.const import ConnectionState
|
||||
from supervisor.dbus.network import NetworkManager
|
||||
from supervisor.dbus.network.interface import NetworkInterface
|
||||
from supervisor.exceptions import (
|
||||
@@ -93,7 +93,7 @@ async def test_activate_connection(
|
||||
"/org/freedesktop/NetworkManager/Settings/1",
|
||||
"/org/freedesktop/NetworkManager/Devices/1",
|
||||
)
|
||||
assert connection.state == ConnectionStateType.ACTIVATED
|
||||
assert connection.state == ConnectionState.ACTIVATED
|
||||
assert (
|
||||
connection.settings.object_path == "/org/freedesktop/NetworkManager/Settings/1"
|
||||
)
|
||||
@@ -117,7 +117,7 @@ async def test_add_and_activate_connection(
|
||||
)
|
||||
assert settings.connection.uuid == "0c23631e-2118-355c-bbb0-8943229cb0d6"
|
||||
assert settings.ipv4.method == "auto"
|
||||
assert connection.state == ConnectionStateType.ACTIVATED
|
||||
assert connection.state == ConnectionState.ACTIVATED
|
||||
assert (
|
||||
connection.settings.object_path == "/org/freedesktop/NetworkManager/Settings/1"
|
||||
)
|
||||
|
||||
@@ -35,8 +35,8 @@ class System(DBusServiceMock):
|
||||
"""Migrate Docker storage driver."""
|
||||
if isinstance(self.response_migrate_docker_storage_driver, DBusError):
|
||||
raise self.response_migrate_docker_storage_driver # pylint: disable=raising-bad-type
|
||||
if backend not in ("overlayfs", "overlay2"):
|
||||
if backend != "overlayfs":
|
||||
raise DBusError(
|
||||
ErrorType.FAILED,
|
||||
f"unsupported driver: {backend} (only 'overlayfs' and 'overlay2' are supported)",
|
||||
f"unsupported driver: {backend} (only 'overlayfs' is currently supported)",
|
||||
)
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
"""Docker tests."""
|
||||
|
||||
from docker.types import Mount
|
||||
from supervisor.docker.const import DockerMount, MountBindOptions, MountType
|
||||
|
||||
# dev mount with equivalent of bind-recursive=writable specified via dict value
|
||||
DEV_MOUNT = Mount(type="bind", source="/dev", target="/dev", read_only=True)
|
||||
DEV_MOUNT["BindOptions"] = {"ReadOnlyNonRecursive": True}
|
||||
DEV_MOUNT = DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/dev",
|
||||
target="/dev",
|
||||
read_only=True,
|
||||
bind_options=MountBindOptions(read_only_non_recursive=True),
|
||||
)
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
"""Test docker addon setup."""
|
||||
|
||||
import asyncio
|
||||
from http import HTTPStatus
|
||||
from ipaddress import IPv4Address
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import MagicMock, Mock, PropertyMock, patch
|
||||
|
||||
from docker.errors import NotFound
|
||||
from docker.types import Mount
|
||||
import aiodocker
|
||||
import pytest
|
||||
|
||||
from supervisor.addons import validate as vd
|
||||
@@ -18,6 +18,12 @@ from supervisor.const import BusEvent
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.dbus.agent.cgroup import CGroup
|
||||
from supervisor.docker.addon import DockerAddon
|
||||
from supervisor.docker.const import (
|
||||
DockerMount,
|
||||
MountBindOptions,
|
||||
MountType,
|
||||
PropagationMode,
|
||||
)
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
from supervisor.exceptions import CoreDNSError, DockerNotFound
|
||||
from supervisor.hardware.data import Device
|
||||
@@ -80,8 +86,8 @@ def test_base_volumes_included(
|
||||
|
||||
# Data added as rw
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=docker_addon.addon.path_extern_data.as_posix(),
|
||||
target="/data",
|
||||
read_only=False,
|
||||
@@ -99,8 +105,8 @@ def test_addon_map_folder_defaults(
|
||||
)
|
||||
# Config added and is marked rw
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
@@ -110,8 +116,8 @@ def test_addon_map_folder_defaults(
|
||||
|
||||
# SSL added and defaults to ro
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_ssl.as_posix(),
|
||||
target="/ssl",
|
||||
read_only=True,
|
||||
@@ -121,30 +127,30 @@ def test_addon_map_folder_defaults(
|
||||
|
||||
# Media added and propagation set
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_media.as_posix(),
|
||||
target="/media",
|
||||
read_only=True,
|
||||
propagation="rslave",
|
||||
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
|
||||
)
|
||||
in docker_addon.mounts
|
||||
)
|
||||
|
||||
# Share added and propagation set
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_share.as_posix(),
|
||||
target="/share",
|
||||
read_only=True,
|
||||
propagation="rslave",
|
||||
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
|
||||
)
|
||||
in docker_addon.mounts
|
||||
)
|
||||
|
||||
# Backup not added
|
||||
assert "/backup" not in [mount["Target"] for mount in docker_addon.mounts]
|
||||
assert "/backup" not in [mount.target for mount in docker_addon.mounts]
|
||||
|
||||
|
||||
def test_addon_map_homeassistant_folder(
|
||||
@@ -157,8 +163,8 @@ def test_addon_map_homeassistant_folder(
|
||||
|
||||
# Home Assistant config folder mounted to /homeassistant, not /config
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_homeassistant.as_posix(),
|
||||
target="/homeassistant",
|
||||
read_only=True,
|
||||
@@ -177,8 +183,8 @@ def test_addon_map_addon_configs_folder(
|
||||
|
||||
# Addon configs folder included
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_addon_configs.as_posix(),
|
||||
target="/addon_configs",
|
||||
read_only=True,
|
||||
@@ -197,8 +203,8 @@ def test_addon_map_addon_config_folder(
|
||||
|
||||
# Addon config folder included
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=docker_addon.addon.path_extern_config.as_posix(),
|
||||
target="/config",
|
||||
read_only=True,
|
||||
@@ -220,8 +226,8 @@ def test_addon_map_addon_config_folder_with_custom_target(
|
||||
|
||||
# Addon config folder included
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=docker_addon.addon.path_extern_config.as_posix(),
|
||||
target="/custom/target/path",
|
||||
read_only=False,
|
||||
@@ -240,8 +246,8 @@ def test_addon_map_data_folder_with_custom_target(
|
||||
|
||||
# Addon config folder included
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=docker_addon.addon.path_extern_data.as_posix(),
|
||||
target="/custom/data/path",
|
||||
read_only=False,
|
||||
@@ -260,8 +266,8 @@ def test_addon_ignore_on_config_map(
|
||||
|
||||
# Config added and is marked rw
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
@@ -271,11 +277,10 @@ def test_addon_ignore_on_config_map(
|
||||
|
||||
# Mount for addon's specific config folder omitted since config in map field
|
||||
assert (
|
||||
len([mount for mount in docker_addon.mounts if mount["Target"] == "/config"])
|
||||
== 1
|
||||
len([mount for mount in docker_addon.mounts if mount.target == "/config"]) == 1
|
||||
)
|
||||
# Home Assistant mount omitted since config in map field
|
||||
assert "/homeassistant" not in [mount["Target"] for mount in docker_addon.mounts]
|
||||
assert "/homeassistant" not in [mount.target for mount in docker_addon.mounts]
|
||||
|
||||
|
||||
def test_journald_addon(
|
||||
@@ -287,8 +292,8 @@ def test_journald_addon(
|
||||
)
|
||||
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/var/log/journal",
|
||||
target="/var/log/journal",
|
||||
read_only=True,
|
||||
@@ -296,8 +301,8 @@ def test_journald_addon(
|
||||
in docker_addon.mounts
|
||||
)
|
||||
assert (
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/log/journal",
|
||||
target="/run/log/journal",
|
||||
read_only=True,
|
||||
@@ -314,7 +319,7 @@ def test_not_journald_addon(
|
||||
coresys, addonsdata_system, "basic-addon-config.json"
|
||||
)
|
||||
|
||||
assert "/var/log/journal" not in [mount["Target"] for mount in docker_addon.mounts]
|
||||
assert "/var/log/journal" not in [mount.target for mount in docker_addon.mounts]
|
||||
|
||||
|
||||
async def test_addon_run_docker_error(
|
||||
@@ -325,7 +330,9 @@ async def test_addon_run_docker_error(
|
||||
):
|
||||
"""Test docker error when addon is run."""
|
||||
await coresys.dbus.timedate.connect(coresys.dbus.bus)
|
||||
coresys.docker.containers.create.side_effect = NotFound("Missing")
|
||||
coresys.docker.containers.create.side_effect = aiodocker.DockerError(
|
||||
HTTPStatus.NOT_FOUND, {"message": "missing"}
|
||||
)
|
||||
docker_addon = get_docker_addon(
|
||||
coresys, addonsdata_system, "basic-addon-config.json"
|
||||
)
|
||||
|
||||
@@ -2,22 +2,24 @@
|
||||
|
||||
from ipaddress import IPv4Address
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from docker.types import Mount
|
||||
import pytest
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import DockerMount, MountType, Ulimit
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
|
||||
from . import DEV_MOUNT
|
||||
|
||||
|
||||
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, path_extern):
|
||||
@pytest.mark.usefixtures("path_extern")
|
||||
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, container: MagicMock):
|
||||
"""Test starting audio plugin."""
|
||||
config_file = tmp_supervisor_data / "audio" / "pulse_audio.json"
|
||||
assert not config_file.exists()
|
||||
|
||||
with patch.object(DockerAPI, "run") as run:
|
||||
with patch.object(DockerAPI, "run", return_value=container.attrs) as run:
|
||||
await coresys.plugins.audio.start()
|
||||
|
||||
run.assert_called_once()
|
||||
@@ -26,21 +28,31 @@ async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, path_extern):
|
||||
assert run.call_args.kwargs["hostname"] == "hassio-audio"
|
||||
assert run.call_args.kwargs["cap_add"] == ["SYS_NICE", "SYS_RESOURCE"]
|
||||
assert run.call_args.kwargs["ulimits"] == [
|
||||
{"Name": "rtprio", "Soft": 10, "Hard": 10}
|
||||
Ulimit(name="rtprio", soft=10, hard=10)
|
||||
]
|
||||
|
||||
assert run.call_args.kwargs["mounts"] == [
|
||||
DEV_MOUNT,
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_audio.as_posix(),
|
||||
target="/data",
|
||||
read_only=False,
|
||||
),
|
||||
Mount(type="bind", source="/run/dbus", target="/run/dbus", read_only=True),
|
||||
Mount(type="bind", source="/run/udev", target="/run/udev", read_only=True),
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/dbus",
|
||||
target="/run/dbus",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/udev",
|
||||
target="/run/udev",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/etc/machine-id",
|
||||
target="/etc/machine-id",
|
||||
read_only=True,
|
||||
|
||||
@@ -1,9 +1,49 @@
|
||||
"""Test docker login."""
|
||||
|
||||
import pytest
|
||||
|
||||
# pylint: disable=protected-access
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import DOCKER_HUB
|
||||
from supervisor.docker.const import DOCKER_HUB, DOCKER_HUB_LEGACY
|
||||
from supervisor.docker.interface import DockerInterface
|
||||
from supervisor.docker.utils import get_registry_from_image
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("image_ref", "expected_registry"),
|
||||
[
|
||||
# No registry - Docker Hub images
|
||||
("nginx", None),
|
||||
("nginx:latest", None),
|
||||
("library/nginx", None),
|
||||
("library/nginx:latest", None),
|
||||
("homeassistant/amd64-supervisor", None),
|
||||
("homeassistant/amd64-supervisor:1.2.3", None),
|
||||
# Registry with dot
|
||||
("ghcr.io/homeassistant/amd64-supervisor", "ghcr.io"),
|
||||
("ghcr.io/homeassistant/amd64-supervisor:latest", "ghcr.io"),
|
||||
("myregistry.com/nginx", "myregistry.com"),
|
||||
("registry.example.com/org/image:v1", "registry.example.com"),
|
||||
("127.0.0.1/myimage", "127.0.0.1"),
|
||||
# Registry with port
|
||||
("myregistry:5000/myimage", "myregistry:5000"),
|
||||
("localhost:5000/myimage", "localhost:5000"),
|
||||
("registry.io:5000/org/app:v1", "registry.io:5000"),
|
||||
# localhost special case
|
||||
("localhost/myimage", "localhost"),
|
||||
("localhost/myimage:tag", "localhost"),
|
||||
# IPv6
|
||||
("[::1]:5000/myimage", "[::1]:5000"),
|
||||
("[2001:db8::1]:5000/myimage:tag", "[2001:db8::1]:5000"),
|
||||
],
|
||||
)
|
||||
def test_get_registry_from_image(image_ref: str, expected_registry: str | None):
|
||||
"""Test get_registry_from_image extracts registry from image reference.
|
||||
|
||||
Based on Docker's reference implementation:
|
||||
vendor/github.com/distribution/reference/normalize.go
|
||||
"""
|
||||
assert get_registry_from_image(image_ref) == expected_registry
|
||||
|
||||
|
||||
def test_no_credentials(coresys: CoreSys, test_docker_interface: DockerInterface):
|
||||
@@ -47,3 +87,36 @@ def test_matching_credentials(coresys: CoreSys, test_docker_interface: DockerInt
|
||||
)
|
||||
assert credentials["username"] == "Spongebob Squarepants"
|
||||
assert "registry" not in credentials
|
||||
|
||||
|
||||
def test_legacy_docker_hub_credentials(
|
||||
coresys: CoreSys, test_docker_interface: DockerInterface
|
||||
):
|
||||
"""Test legacy hub.docker.com credentials are used for Docker Hub images."""
|
||||
coresys.docker.config._data["registries"] = {
|
||||
DOCKER_HUB_LEGACY: {"username": "LegacyUser", "password": "Password1!"},
|
||||
}
|
||||
|
||||
credentials = test_docker_interface._get_credentials(
|
||||
"homeassistant/amd64-supervisor"
|
||||
)
|
||||
assert credentials["username"] == "LegacyUser"
|
||||
# No registry should be included for Docker Hub
|
||||
assert "registry" not in credentials
|
||||
|
||||
|
||||
def test_docker_hub_preferred_over_legacy(
|
||||
coresys: CoreSys, test_docker_interface: DockerInterface
|
||||
):
|
||||
"""Test docker.io is preferred over legacy hub.docker.com when both exist."""
|
||||
coresys.docker.config._data["registries"] = {
|
||||
DOCKER_HUB: {"username": "NewUser", "password": "Password1!"},
|
||||
DOCKER_HUB_LEGACY: {"username": "LegacyUser", "password": "Password2!"},
|
||||
}
|
||||
|
||||
credentials = test_docker_interface._get_credentials(
|
||||
"homeassistant/amd64-supervisor"
|
||||
)
|
||||
# docker.io should be preferred
|
||||
assert credentials["username"] == "NewUser"
|
||||
assert "registry" not in credentials
|
||||
|
||||
@@ -2,20 +2,22 @@
|
||||
|
||||
from ipaddress import IPv4Address
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from docker.types import Mount
|
||||
import pytest
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import DockerMount, MountType
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
|
||||
|
||||
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, path_extern):
|
||||
@pytest.mark.usefixtures("path_extern")
|
||||
async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, container: MagicMock):
|
||||
"""Test starting dns plugin."""
|
||||
config_file = tmp_supervisor_data / "dns" / "coredns.json"
|
||||
assert not config_file.exists()
|
||||
|
||||
with patch.object(DockerAPI, "run") as run:
|
||||
with patch.object(DockerAPI, "run", return_value=container.attrs) as run:
|
||||
await coresys.plugins.dns.start()
|
||||
|
||||
run.assert_called_once()
|
||||
@@ -25,13 +27,18 @@ async def test_start(coresys: CoreSys, tmp_supervisor_data: Path, path_extern):
|
||||
assert run.call_args.kwargs["dns"] is False
|
||||
assert run.call_args.kwargs["oom_score_adj"] == -300
|
||||
assert run.call_args.kwargs["mounts"] == [
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_dns.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
),
|
||||
Mount(type="bind", source="/run/dbus", target="/run/dbus", read_only=True),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/dbus",
|
||||
target="/run/dbus",
|
||||
read_only=True,
|
||||
),
|
||||
]
|
||||
assert "volumes" not in run.call_args.kwargs
|
||||
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
"""Test Home Assistant container."""
|
||||
|
||||
from ipaddress import IPv4Address
|
||||
from pathlib import Path
|
||||
from unittest.mock import ANY, MagicMock, patch
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
from docker.types import Mount
|
||||
import pytest
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import (
|
||||
DockerMount,
|
||||
MountBindOptions,
|
||||
MountType,
|
||||
PropagationMode,
|
||||
)
|
||||
from supervisor.docker.homeassistant import DockerHomeAssistant
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
from supervisor.homeassistant.const import LANDINGPAGE
|
||||
@@ -15,14 +20,13 @@ from supervisor.homeassistant.const import LANDINGPAGE
|
||||
from . import DEV_MOUNT
|
||||
|
||||
|
||||
async def test_homeassistant_start(
|
||||
coresys: CoreSys, tmp_supervisor_data: Path, path_extern
|
||||
):
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
|
||||
async def test_homeassistant_start(coresys: CoreSys, container: MagicMock):
|
||||
"""Test starting homeassistant."""
|
||||
coresys.homeassistant.version = AwesomeVersion("2023.8.1")
|
||||
|
||||
with (
|
||||
patch.object(DockerAPI, "run") as run,
|
||||
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
|
||||
patch.object(
|
||||
DockerHomeAssistant, "is_running", side_effect=[False, False, True]
|
||||
),
|
||||
@@ -46,57 +50,68 @@ async def test_homeassistant_start(
|
||||
"TZ": ANY,
|
||||
"SUPERVISOR_TOKEN": ANY,
|
||||
"HASSIO_TOKEN": ANY,
|
||||
# no "HA_DUPLICATE_LOG_FILE"
|
||||
}
|
||||
assert run.call_args.kwargs["mounts"] == [
|
||||
DEV_MOUNT,
|
||||
Mount(type="bind", source="/run/dbus", target="/run/dbus", read_only=True),
|
||||
Mount(type="bind", source="/run/udev", target="/run/udev", read_only=True),
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/dbus",
|
||||
target="/run/dbus",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/udev",
|
||||
target="/run/udev",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
),
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_ssl.as_posix(),
|
||||
target="/ssl",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_share.as_posix(),
|
||||
target="/share",
|
||||
read_only=False,
|
||||
propagation="rslave",
|
||||
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
|
||||
),
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_media.as_posix(),
|
||||
target="/media",
|
||||
read_only=False,
|
||||
propagation="rslave",
|
||||
bind_options=MountBindOptions(propagation=PropagationMode.RSLAVE),
|
||||
),
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.homeassistant.path_extern_pulse.as_posix(),
|
||||
target="/etc/pulse/client.conf",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.plugins.audio.path_extern_pulse.as_posix(),
|
||||
target="/run/audio",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.plugins.audio.path_extern_asound.as_posix(),
|
||||
target="/etc/asound.conf",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/etc/machine-id",
|
||||
target="/etc/machine-id",
|
||||
read_only=True,
|
||||
@@ -105,14 +120,36 @@ async def test_homeassistant_start(
|
||||
assert "volumes" not in run.call_args.kwargs
|
||||
|
||||
|
||||
async def test_landingpage_start(
|
||||
coresys: CoreSys, tmp_supervisor_data: Path, path_extern
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
|
||||
async def test_homeassistant_start_with_duplicate_log_file(
|
||||
coresys: CoreSys, container: MagicMock
|
||||
):
|
||||
"""Test starting homeassistant with duplicate_log_file enabled."""
|
||||
coresys.homeassistant.version = AwesomeVersion("2025.12.0")
|
||||
coresys.homeassistant.duplicate_log_file = True
|
||||
|
||||
with (
|
||||
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
|
||||
patch.object(
|
||||
DockerHomeAssistant, "is_running", side_effect=[False, False, True]
|
||||
),
|
||||
patch("supervisor.homeassistant.core.asyncio.sleep"),
|
||||
):
|
||||
await coresys.homeassistant.core.start()
|
||||
|
||||
run.assert_called_once()
|
||||
env = run.call_args.kwargs["environment"]
|
||||
assert "HA_DUPLICATE_LOG_FILE" in env
|
||||
assert env["HA_DUPLICATE_LOG_FILE"] == "1"
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern")
|
||||
async def test_landingpage_start(coresys: CoreSys, container: MagicMock):
|
||||
"""Test starting landingpage."""
|
||||
coresys.homeassistant.version = LANDINGPAGE
|
||||
|
||||
with (
|
||||
patch.object(DockerAPI, "run") as run,
|
||||
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
|
||||
patch.object(DockerHomeAssistant, "is_running", return_value=False),
|
||||
):
|
||||
await coresys.homeassistant.core.start()
|
||||
@@ -133,19 +170,30 @@ async def test_landingpage_start(
|
||||
"TZ": ANY,
|
||||
"SUPERVISOR_TOKEN": ANY,
|
||||
"HASSIO_TOKEN": ANY,
|
||||
# no "HA_DUPLICATE_LOG_FILE"
|
||||
}
|
||||
assert run.call_args.kwargs["mounts"] == [
|
||||
DEV_MOUNT,
|
||||
Mount(type="bind", source="/run/dbus", target="/run/dbus", read_only=True),
|
||||
Mount(type="bind", source="/run/udev", target="/run/udev", read_only=True),
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/dbus",
|
||||
target="/run/dbus",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/udev",
|
||||
target="/run/udev",
|
||||
read_only=True,
|
||||
),
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source=coresys.config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
),
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/etc/machine-id",
|
||||
target="/etc/machine-id",
|
||||
read_only=True,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Test Docker interface."""
|
||||
|
||||
import asyncio
|
||||
from http import HTTPStatus
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, PropertyMock, call, patch
|
||||
@@ -54,7 +55,7 @@ async def test_docker_image_platform(
|
||||
coresys.docker.images.inspect.return_value = {"Id": "test:1.2.3"}
|
||||
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test", arch=cpu_arch)
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
"test", tag="1.2.3", platform=platform, auth=None, stream=True
|
||||
"test", tag="1.2.3", platform=platform, auth=None, stream=True, timeout=None
|
||||
)
|
||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||
|
||||
@@ -71,7 +72,12 @@ async def test_docker_image_default_platform(
|
||||
):
|
||||
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
||||
"test",
|
||||
tag="1.2.3",
|
||||
platform="linux/386",
|
||||
auth=None,
|
||||
stream=True,
|
||||
timeout=None,
|
||||
)
|
||||
|
||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||
@@ -111,7 +117,12 @@ async def test_private_registry_credentials_passed_to_pull(
|
||||
expected_auth["registry"] = registry_key
|
||||
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
image, tag="1.2.3", platform="linux/amd64", auth=expected_auth, stream=True
|
||||
image,
|
||||
tag="1.2.3",
|
||||
platform="linux/amd64",
|
||||
auth=expected_auth,
|
||||
stream=True,
|
||||
timeout=None,
|
||||
)
|
||||
|
||||
|
||||
@@ -138,7 +149,7 @@ async def test_current_state(
|
||||
container_collection = MagicMock()
|
||||
container_collection.get.return_value = Container(attrs)
|
||||
with patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers",
|
||||
"supervisor.docker.manager.DockerAPI.containers_legacy",
|
||||
new=PropertyMock(return_value=container_collection),
|
||||
):
|
||||
assert await coresys.homeassistant.core.instance.current_state() == expected
|
||||
@@ -148,7 +159,7 @@ async def test_current_state_failures(coresys: CoreSys):
|
||||
"""Test failure states for current state."""
|
||||
container_collection = MagicMock()
|
||||
with patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers",
|
||||
"supervisor.docker.manager.DockerAPI.containers_legacy",
|
||||
new=PropertyMock(return_value=container_collection),
|
||||
):
|
||||
container_collection.get.side_effect = NotFound("dne")
|
||||
@@ -201,7 +212,7 @@ async def test_attach_existing_container(
|
||||
container_collection.get.return_value = Container(attrs)
|
||||
with (
|
||||
patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers",
|
||||
"supervisor.docker.manager.DockerAPI.containers_legacy",
|
||||
new=PropertyMock(return_value=container_collection),
|
||||
),
|
||||
patch.object(type(coresys.bus), "fire_event") as fire_event,
|
||||
@@ -243,7 +254,7 @@ async def test_attach_existing_container(
|
||||
|
||||
async def test_attach_container_failure(coresys: CoreSys):
|
||||
"""Test attach fails to find container but finds image."""
|
||||
coresys.docker.containers.get.side_effect = DockerException()
|
||||
coresys.docker.containers_legacy.get.side_effect = DockerException()
|
||||
coresys.docker.images.inspect.return_value.setdefault("Config", {})["Image"] = (
|
||||
"sha256:abc123"
|
||||
)
|
||||
@@ -261,7 +272,7 @@ async def test_attach_container_failure(coresys: CoreSys):
|
||||
|
||||
async def test_attach_total_failure(coresys: CoreSys):
|
||||
"""Test attach fails to find container or image."""
|
||||
coresys.docker.containers.get.side_effect = DockerException
|
||||
coresys.docker.containers_legacy.get.side_effect = DockerException
|
||||
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
|
||||
400, {"message": ""}
|
||||
)
|
||||
@@ -294,8 +305,10 @@ async def test_run_missing_image(
|
||||
tmp_supervisor_data: Path,
|
||||
):
|
||||
"""Test run captures the exception when image is missing."""
|
||||
coresys.docker.containers.create.side_effect = [NotFound("missing"), MagicMock()]
|
||||
container.status = "stopped"
|
||||
coresys.docker.containers.create.side_effect = [
|
||||
aiodocker.DockerError(HTTPStatus.NOT_FOUND, {"message": "missing"}),
|
||||
MagicMock(),
|
||||
]
|
||||
install_addon_ssh.data["image"] = "test_image"
|
||||
|
||||
with pytest.raises(DockerNotFound):
|
||||
@@ -360,7 +373,12 @@ async def test_install_fires_progress_events(
|
||||
):
|
||||
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
||||
"test",
|
||||
tag="1.2.3",
|
||||
platform="linux/386",
|
||||
auth=None,
|
||||
stream=True,
|
||||
timeout=None,
|
||||
)
|
||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||
|
||||
@@ -817,7 +835,12 @@ async def test_install_progress_containerd_snapshot(
|
||||
with patch.object(Supervisor, "arch", PropertyMock(return_value="i386")):
|
||||
await test_docker_interface.mock_install()
|
||||
coresys.docker.images.pull.assert_called_once_with(
|
||||
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
||||
"test",
|
||||
tag="1.2.3",
|
||||
platform="linux/386",
|
||||
auth=None,
|
||||
stream=True,
|
||||
timeout=None,
|
||||
)
|
||||
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||
|
||||
|
||||
@@ -2,8 +2,9 @@
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
from aiodocker.containers import DockerContainer
|
||||
from docker.errors import APIError, DockerException, NotFound
|
||||
import pytest
|
||||
from requests import RequestException
|
||||
@@ -139,40 +140,38 @@ async def test_run_command_custom_stdout_stderr(docker: DockerAPI):
|
||||
assert result.output == b"output"
|
||||
|
||||
|
||||
async def test_run_container_with_cidfile(
|
||||
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
|
||||
):
|
||||
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
|
||||
async def test_run_container_with_cidfile(coresys: CoreSys, docker: DockerAPI):
|
||||
"""Test container creation with cidfile and bind mount."""
|
||||
# Mock container
|
||||
mock_container = MagicMock()
|
||||
mock_container.id = "test_container_id_12345"
|
||||
mock_container = MagicMock(spec=DockerContainer, id="test_container_id_12345")
|
||||
mock_container.show.return_value = mock_metadata = {"Id": mock_container.id}
|
||||
|
||||
container_name = "test_container"
|
||||
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
|
||||
extern_cidfile_path = coresys.config.path_extern_cid_files / f"{container_name}.cid"
|
||||
|
||||
docker.dockerpy.containers.run.return_value = mock_container
|
||||
docker.containers.create.return_value = mock_container
|
||||
|
||||
# Mock container creation
|
||||
with patch.object(
|
||||
docker.containers, "create", return_value=mock_container
|
||||
) as create_mock:
|
||||
# Execute run with a container name
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda kwrgs: docker.run(**kwrgs),
|
||||
{"image": "test_image", "tag": "latest", "name": container_name},
|
||||
)
|
||||
result = await docker.run("test_image", tag="latest", name=container_name)
|
||||
|
||||
# Check the container creation parameters
|
||||
create_mock.assert_called_once()
|
||||
kwargs = create_mock.call_args[1]
|
||||
create_config = create_mock.call_args.args[0]
|
||||
|
||||
assert "volumes" in kwargs
|
||||
assert str(extern_cidfile_path) in kwargs["volumes"]
|
||||
assert kwargs["volumes"][str(extern_cidfile_path)]["bind"] == "/run/cid"
|
||||
assert kwargs["volumes"][str(extern_cidfile_path)]["mode"] == "ro"
|
||||
assert "HostConfig" in create_config
|
||||
assert "Mounts" in create_config["HostConfig"]
|
||||
assert {
|
||||
"Type": "bind",
|
||||
"Source": str(extern_cidfile_path),
|
||||
"Target": "/run/cid",
|
||||
"ReadOnly": True,
|
||||
} in create_config["HostConfig"]["Mounts"]
|
||||
|
||||
# Verify container start was called
|
||||
mock_container.start.assert_called_once()
|
||||
@@ -181,16 +180,15 @@ async def test_run_container_with_cidfile(
|
||||
assert cidfile_path.exists()
|
||||
assert cidfile_path.read_text() == mock_container.id
|
||||
|
||||
assert result == mock_container
|
||||
assert result == mock_metadata
|
||||
|
||||
|
||||
async def test_run_container_with_leftover_cidfile(
|
||||
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
|
||||
):
|
||||
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
|
||||
async def test_run_container_with_leftover_cidfile(coresys: CoreSys, docker: DockerAPI):
|
||||
"""Test container creation removes leftover cidfile before creating new one."""
|
||||
# Mock container
|
||||
mock_container = MagicMock()
|
||||
mock_container.id = "test_container_id_new"
|
||||
mock_container = MagicMock(spec=DockerContainer, id="test_container_id_new")
|
||||
mock_container.show.return_value = mock_metadata = {"Id": mock_container.id}
|
||||
|
||||
container_name = "test_container"
|
||||
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
|
||||
@@ -203,12 +201,7 @@ async def test_run_container_with_leftover_cidfile(
|
||||
docker.containers, "create", return_value=mock_container
|
||||
) as create_mock:
|
||||
# Execute run with a container name
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda kwrgs: docker.run(**kwrgs),
|
||||
{"image": "test_image", "tag": "latest", "name": container_name},
|
||||
)
|
||||
result = await docker.run("test_image", tag="latest", name=container_name)
|
||||
|
||||
# Verify container was created
|
||||
create_mock.assert_called_once()
|
||||
@@ -217,7 +210,7 @@ async def test_run_container_with_leftover_cidfile(
|
||||
assert cidfile_path.exists()
|
||||
assert cidfile_path.read_text() == mock_container.id
|
||||
|
||||
assert result == mock_container
|
||||
assert result == mock_metadata
|
||||
|
||||
|
||||
async def test_stop_container_with_cidfile_cleanup(
|
||||
@@ -236,7 +229,7 @@ async def test_stop_container_with_cidfile_cleanup(
|
||||
|
||||
# Mock the containers.get method and cidfile cleanup
|
||||
with (
|
||||
patch.object(docker.containers, "get", return_value=mock_container),
|
||||
patch.object(docker.containers_legacy, "get", return_value=mock_container),
|
||||
):
|
||||
# Call stop_container with remove_container=True
|
||||
loop = asyncio.get_event_loop()
|
||||
@@ -263,7 +256,7 @@ async def test_stop_container_without_removal_no_cidfile_cleanup(docker: DockerA
|
||||
|
||||
# Mock the containers.get method and cidfile cleanup
|
||||
with (
|
||||
patch.object(docker.containers, "get", return_value=mock_container),
|
||||
patch.object(docker.containers_legacy, "get", return_value=mock_container),
|
||||
patch("pathlib.Path.unlink") as mock_unlink,
|
||||
):
|
||||
# Call stop_container with remove_container=False
|
||||
@@ -277,9 +270,8 @@ async def test_stop_container_without_removal_no_cidfile_cleanup(docker: DockerA
|
||||
mock_unlink.assert_not_called()
|
||||
|
||||
|
||||
async def test_cidfile_cleanup_handles_oserror(
|
||||
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
|
||||
):
|
||||
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
|
||||
async def test_cidfile_cleanup_handles_oserror(coresys: CoreSys, docker: DockerAPI):
|
||||
"""Test that cidfile cleanup handles OSError gracefully."""
|
||||
# Mock container
|
||||
mock_container = MagicMock()
|
||||
@@ -293,7 +285,7 @@ async def test_cidfile_cleanup_handles_oserror(
|
||||
|
||||
# Mock the containers.get method and cidfile cleanup to raise OSError
|
||||
with (
|
||||
patch.object(docker.containers, "get", return_value=mock_container),
|
||||
patch.object(docker.containers_legacy, "get", return_value=mock_container),
|
||||
patch("pathlib.Path.is_dir", return_value=False),
|
||||
patch("pathlib.Path.is_file", return_value=True),
|
||||
patch(
|
||||
@@ -311,8 +303,9 @@ async def test_cidfile_cleanup_handles_oserror(
|
||||
mock_unlink.assert_called_once_with(missing_ok=True)
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("path_extern", "tmp_supervisor_data")
|
||||
async def test_run_container_with_leftover_cidfile_directory(
|
||||
coresys: CoreSys, docker: DockerAPI, path_extern, tmp_supervisor_data
|
||||
coresys: CoreSys, docker: DockerAPI
|
||||
):
|
||||
"""Test container creation removes leftover cidfile directory before creating new one.
|
||||
|
||||
@@ -321,8 +314,8 @@ async def test_run_container_with_leftover_cidfile_directory(
|
||||
the bind mount source as a directory.
|
||||
"""
|
||||
# Mock container
|
||||
mock_container = MagicMock()
|
||||
mock_container.id = "test_container_id_new"
|
||||
mock_container = MagicMock(spec=DockerContainer, id="test_container_id_new")
|
||||
mock_container.show.return_value = mock_metadata = {"Id": mock_container.id}
|
||||
|
||||
container_name = "test_container"
|
||||
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
|
||||
@@ -336,12 +329,7 @@ async def test_run_container_with_leftover_cidfile_directory(
|
||||
docker.containers, "create", return_value=mock_container
|
||||
) as create_mock:
|
||||
# Execute run with a container name
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda kwrgs: docker.run(**kwrgs),
|
||||
{"image": "test_image", "tag": "latest", "name": container_name},
|
||||
)
|
||||
result = await docker.run("test_image", tag="latest", name=container_name)
|
||||
|
||||
# Verify container was created
|
||||
create_mock.assert_called_once()
|
||||
@@ -351,7 +339,7 @@ async def test_run_container_with_leftover_cidfile_directory(
|
||||
assert cidfile_path.is_file()
|
||||
assert cidfile_path.read_text() == mock_container.id
|
||||
|
||||
assert result == mock_container
|
||||
assert result == mock_metadata
|
||||
|
||||
|
||||
async def test_repair(coresys: CoreSys, caplog: pytest.LogCaptureFixture):
|
||||
@@ -412,9 +400,9 @@ async def test_repair_failures(coresys: CoreSys, caplog: pytest.LogCaptureFixtur
|
||||
async def test_import_image(coresys: CoreSys, tmp_path: Path, log_starter: str):
|
||||
"""Test importing an image into docker."""
|
||||
(test_tar := tmp_path / "test.tar").touch()
|
||||
coresys.docker.images.import_image.return_value = [
|
||||
{"stream": f"{log_starter}: imported"}
|
||||
]
|
||||
coresys.docker.images.import_image = AsyncMock(
|
||||
return_value=[{"stream": f"{log_starter}: imported"}]
|
||||
)
|
||||
coresys.docker.images.inspect.return_value = {"Id": "imported"}
|
||||
|
||||
image = await coresys.docker.import_image(test_tar)
|
||||
@@ -426,9 +414,9 @@ async def test_import_image(coresys: CoreSys, tmp_path: Path, log_starter: str):
|
||||
async def test_import_image_error(coresys: CoreSys, tmp_path: Path):
|
||||
"""Test failure importing an image into docker."""
|
||||
(test_tar := tmp_path / "test.tar").touch()
|
||||
coresys.docker.images.import_image.return_value = [
|
||||
{"errorDetail": {"message": "fail"}}
|
||||
]
|
||||
coresys.docker.images.import_image = AsyncMock(
|
||||
return_value=[{"errorDetail": {"message": "fail"}}]
|
||||
)
|
||||
|
||||
with pytest.raises(DockerError, match="Can't import image from tar: fail"):
|
||||
await coresys.docker.import_image(test_tar)
|
||||
@@ -441,10 +429,12 @@ async def test_import_multiple_images_in_tar(
|
||||
):
|
||||
"""Test importing an image into docker."""
|
||||
(test_tar := tmp_path / "test.tar").touch()
|
||||
coresys.docker.images.import_image.return_value = [
|
||||
{"stream": "Loaded image: imported-1"},
|
||||
{"stream": "Loaded image: imported-2"},
|
||||
]
|
||||
coresys.docker.images.import_image = AsyncMock(
|
||||
return_value=[
|
||||
{"stream": "Loaded image: imported-1"},
|
||||
{"stream": "Loaded image: imported-2"},
|
||||
]
|
||||
)
|
||||
|
||||
assert await coresys.docker.import_image(test_tar) is None
|
||||
|
||||
|
||||
@@ -120,7 +120,7 @@ async def test_unlabeled_container(coresys: CoreSys):
|
||||
}
|
||||
)
|
||||
with patch(
|
||||
"supervisor.docker.manager.DockerAPI.containers",
|
||||
"supervisor.docker.manager.DockerAPI.containers_legacy",
|
||||
new=PropertyMock(return_value=container_collection),
|
||||
):
|
||||
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
"""Test Observer plugin container."""
|
||||
|
||||
from ipaddress import IPv4Address, ip_network
|
||||
from unittest.mock import patch
|
||||
|
||||
from docker.types import Mount
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.docker.const import DockerMount, MountType
|
||||
from supervisor.docker.manager import DockerAPI
|
||||
|
||||
|
||||
async def test_start(coresys: CoreSys):
|
||||
async def test_start(coresys: CoreSys, container: MagicMock):
|
||||
"""Test starting observer plugin."""
|
||||
with patch.object(DockerAPI, "run") as run:
|
||||
with patch.object(DockerAPI, "run", return_value=container.attrs) as run:
|
||||
await coresys.plugins.observer.start()
|
||||
|
||||
run.assert_called_once()
|
||||
@@ -28,8 +27,8 @@ async def test_start(coresys: CoreSys):
|
||||
)
|
||||
assert run.call_args.kwargs["ports"] == {"80/tcp": 4357}
|
||||
assert run.call_args.kwargs["mounts"] == [
|
||||
Mount(
|
||||
type="bind",
|
||||
DockerMount(
|
||||
type=MountType.BIND,
|
||||
source="/run/docker.sock",
|
||||
target="/run/docker.sock",
|
||||
read_only=True,
|
||||
|
||||
@@ -238,6 +238,7 @@ async def test_install_other_error(
|
||||
@pytest.mark.usefixtures("path_extern")
|
||||
async def test_start(
|
||||
coresys: CoreSys,
|
||||
container: MagicMock,
|
||||
container_exc: DockerException | None,
|
||||
image_exc: aiodocker.DockerError | None,
|
||||
remove_calls: list[call],
|
||||
@@ -245,8 +246,8 @@ async def test_start(
|
||||
"""Test starting Home Assistant."""
|
||||
coresys.docker.images.inspect.return_value = {"Id": "123"}
|
||||
coresys.docker.images.inspect.side_effect = image_exc
|
||||
coresys.docker.containers.get.return_value.id = "123"
|
||||
coresys.docker.containers.get.side_effect = container_exc
|
||||
coresys.docker.containers_legacy.get.return_value.id = "123"
|
||||
coresys.docker.containers_legacy.get.side_effect = container_exc
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
@@ -254,7 +255,7 @@ async def test_start(
|
||||
"version",
|
||||
new=PropertyMock(return_value=AwesomeVersion("2023.7.0")),
|
||||
),
|
||||
patch.object(DockerAPI, "run") as run,
|
||||
patch.object(DockerAPI, "run", return_value=container.attrs) as run,
|
||||
patch.object(HomeAssistantCore, "_block_till_run") as block_till_run,
|
||||
):
|
||||
await coresys.homeassistant.core.start()
|
||||
@@ -268,17 +269,18 @@ async def test_start(
|
||||
assert run.call_args.kwargs["name"] == "homeassistant"
|
||||
assert run.call_args.kwargs["hostname"] == "homeassistant"
|
||||
|
||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
|
||||
assert (
|
||||
coresys.docker.containers.get.return_value.remove.call_args_list == remove_calls
|
||||
coresys.docker.containers_legacy.get.return_value.remove.call_args_list
|
||||
== remove_calls
|
||||
)
|
||||
|
||||
|
||||
async def test_start_existing_container(coresys: CoreSys, path_extern):
|
||||
"""Test starting Home Assistant when container exists and is viable."""
|
||||
coresys.docker.images.inspect.return_value = {"Id": "123"}
|
||||
coresys.docker.containers.get.return_value.image.id = "123"
|
||||
coresys.docker.containers.get.return_value.status = "exited"
|
||||
coresys.docker.containers_legacy.get.return_value.image.id = "123"
|
||||
coresys.docker.containers_legacy.get.return_value.status = "exited"
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
@@ -291,29 +293,29 @@ async def test_start_existing_container(coresys: CoreSys, path_extern):
|
||||
await coresys.homeassistant.core.start()
|
||||
block_till_run.assert_called_once()
|
||||
|
||||
coresys.docker.containers.get.return_value.start.assert_called_once()
|
||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||
coresys.docker.containers.get.return_value.remove.assert_not_called()
|
||||
coresys.docker.containers.get.return_value.run.assert_not_called()
|
||||
coresys.docker.containers_legacy.get.return_value.start.assert_called_once()
|
||||
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
|
||||
coresys.docker.containers_legacy.get.return_value.remove.assert_not_called()
|
||||
coresys.docker.containers_legacy.get.return_value.run.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("exists", [True, False])
|
||||
async def test_stop(coresys: CoreSys, exists: bool):
|
||||
"""Test stoppping Home Assistant."""
|
||||
if exists:
|
||||
coresys.docker.containers.get.return_value.status = "running"
|
||||
coresys.docker.containers_legacy.get.return_value.status = "running"
|
||||
else:
|
||||
coresys.docker.containers.get.side_effect = NotFound("missing")
|
||||
coresys.docker.containers_legacy.get.side_effect = NotFound("missing")
|
||||
|
||||
await coresys.homeassistant.core.stop()
|
||||
|
||||
coresys.docker.containers.get.return_value.remove.assert_not_called()
|
||||
coresys.docker.containers_legacy.get.return_value.remove.assert_not_called()
|
||||
if exists:
|
||||
coresys.docker.containers.get.return_value.stop.assert_called_once_with(
|
||||
coresys.docker.containers_legacy.get.return_value.stop.assert_called_once_with(
|
||||
timeout=260
|
||||
)
|
||||
else:
|
||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
|
||||
|
||||
|
||||
async def test_restart(coresys: CoreSys):
|
||||
@@ -322,18 +324,20 @@ async def test_restart(coresys: CoreSys):
|
||||
await coresys.homeassistant.core.restart()
|
||||
block_till_run.assert_called_once()
|
||||
|
||||
coresys.docker.containers.get.return_value.restart.assert_called_once_with(
|
||||
coresys.docker.containers_legacy.get.return_value.restart.assert_called_once_with(
|
||||
timeout=260
|
||||
)
|
||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||
coresys.docker.containers_legacy.get.return_value.stop.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("get_error", [NotFound("missing"), DockerException(), None])
|
||||
async def test_restart_failures(coresys: CoreSys, get_error: DockerException | None):
|
||||
"""Test restart fails when container missing or can't be restarted."""
|
||||
coresys.docker.containers.get.return_value.restart.side_effect = DockerException()
|
||||
coresys.docker.containers_legacy.get.return_value.restart.side_effect = (
|
||||
DockerException()
|
||||
)
|
||||
if get_error:
|
||||
coresys.docker.containers.get.side_effect = get_error
|
||||
coresys.docker.containers_legacy.get.side_effect = get_error
|
||||
|
||||
with pytest.raises(HomeAssistantError):
|
||||
await coresys.homeassistant.core.restart()
|
||||
@@ -352,10 +356,12 @@ async def test_stats_failures(
|
||||
coresys: CoreSys, get_error: DockerException | None, status: str
|
||||
):
|
||||
"""Test errors when getting stats."""
|
||||
coresys.docker.containers.get.return_value.status = status
|
||||
coresys.docker.containers.get.return_value.stats.side_effect = DockerException()
|
||||
coresys.docker.containers_legacy.get.return_value.status = status
|
||||
coresys.docker.containers_legacy.get.return_value.stats.side_effect = (
|
||||
DockerException()
|
||||
)
|
||||
if get_error:
|
||||
coresys.docker.containers.get.side_effect = get_error
|
||||
coresys.docker.containers_legacy.get.side_effect = get_error
|
||||
|
||||
with pytest.raises(HomeAssistantError):
|
||||
await coresys.homeassistant.core.stats()
|
||||
@@ -387,7 +393,7 @@ async def test_api_check_timeout(
|
||||
):
|
||||
await coresys.homeassistant.core.start()
|
||||
|
||||
assert coresys.homeassistant.api.get_api_state.call_count == 3
|
||||
assert coresys.homeassistant.api.get_api_state.call_count == 10
|
||||
assert (
|
||||
"No Home Assistant Core response, assuming a fatal startup error" in caplog.text
|
||||
)
|
||||
|
||||
@@ -200,6 +200,8 @@ async def test_notify_on_change(coresys: CoreSys, ha_ws_client: AsyncMock):
|
||||
"type": "HassioError",
|
||||
"message": "Unknown error, see Supervisor logs (check with 'ha supervisor logs')",
|
||||
"stage": "test",
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
],
|
||||
"created": ANY,
|
||||
@@ -228,6 +230,8 @@ async def test_notify_on_change(coresys: CoreSys, ha_ws_client: AsyncMock):
|
||||
"type": "HassioError",
|
||||
"message": "Unknown error, see Supervisor logs (check with 'ha supervisor logs')",
|
||||
"stage": "test",
|
||||
"error_key": None,
|
||||
"extra_fields": None,
|
||||
}
|
||||
],
|
||||
"created": ANY,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Test base plugin functionality."""
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, call, patch
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
@@ -159,15 +158,13 @@ async def test_plugin_watchdog(coresys: CoreSys, plugin: PluginBase) -> None:
|
||||
],
|
||||
indirect=["plugin"],
|
||||
)
|
||||
@pytest.mark.usefixtures("coresys", "tmp_supervisor_data", "path_extern")
|
||||
async def test_plugin_watchdog_max_failed_attempts(
|
||||
coresys: CoreSys,
|
||||
capture_exception: Mock,
|
||||
plugin: PluginBase,
|
||||
error: PluginError,
|
||||
container: MagicMock,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
tmp_supervisor_data: Path,
|
||||
path_extern,
|
||||
) -> None:
|
||||
"""Test plugin watchdog gives up after max failed attempts."""
|
||||
with patch.object(type(plugin.instance), "attach"):
|
||||
|
||||
@@ -76,7 +76,7 @@ async def test_check(
|
||||
docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon, folder: str
|
||||
):
|
||||
"""Test check reports issue when containers have incorrect config."""
|
||||
docker.containers.get = _make_mock_container_get(
|
||||
docker.containers_legacy.get = _make_mock_container_get(
|
||||
["homeassistant", "hassio_audio", "addon_local_ssh"], folder
|
||||
)
|
||||
# Use state used in setup()
|
||||
@@ -132,7 +132,7 @@ async def test_check(
|
||||
assert await docker_config.approve_check()
|
||||
|
||||
# IF config issue is resolved, all issues are removed except the main one. Which will be removed if check isn't approved
|
||||
docker.containers.get = _make_mock_container_get([])
|
||||
docker.containers_legacy.get = _make_mock_container_get([])
|
||||
with patch.object(DockerInterface, "is_running", return_value=True):
|
||||
await coresys.plugins.load()
|
||||
await coresys.homeassistant.load()
|
||||
@@ -159,7 +159,7 @@ async def test_addon_volume_mount_not_flagged(
|
||||
] # No media/share
|
||||
|
||||
# Mock container that has VOLUME mount to media/share with wrong propagation
|
||||
docker.containers.get = _make_mock_container_get_with_volume_mount(
|
||||
docker.containers_legacy.get = _make_mock_container_get_with_volume_mount(
|
||||
["addon_local_ssh"], folder
|
||||
)
|
||||
|
||||
@@ -221,7 +221,7 @@ async def test_addon_configured_mount_still_flagged(
|
||||
out.attrs["Mounts"].append(mount)
|
||||
return out
|
||||
|
||||
docker.containers.get = mock_container_get
|
||||
docker.containers_legacy.get = mock_container_get
|
||||
|
||||
await coresys.core.set_state(CoreState.SETUP)
|
||||
with patch.object(DockerInterface, "is_running", return_value=True):
|
||||
@@ -275,7 +275,7 @@ async def test_addon_custom_target_path_flagged(
|
||||
out.attrs["Mounts"].append(mount)
|
||||
return out
|
||||
|
||||
docker.containers.get = mock_container_get
|
||||
docker.containers_legacy.get = mock_container_get
|
||||
|
||||
await coresys.core.set_state(CoreState.SETUP)
|
||||
with patch.object(DockerInterface, "is_running", return_value=True):
|
||||
|
||||
@@ -30,7 +30,7 @@ async def test_evaluation(coresys: CoreSys):
|
||||
assert container.reason not in coresys.resolution.unsupported
|
||||
assert UnhealthyReason.DOCKER not in coresys.resolution.unhealthy
|
||||
|
||||
coresys.docker.containers.list.return_value = [
|
||||
coresys.docker.containers_legacy.list.return_value = [
|
||||
_make_image_attr("armhfbuild/watchtower:latest"),
|
||||
_make_image_attr("concerco/watchtowerv6:10.0.2"),
|
||||
_make_image_attr("containrrr/watchtower:1.1"),
|
||||
@@ -47,7 +47,7 @@ async def test_evaluation(coresys: CoreSys):
|
||||
"pyouroboros/ouroboros:1.4.3",
|
||||
}
|
||||
|
||||
coresys.docker.containers.list.return_value = []
|
||||
coresys.docker.containers_legacy.list.return_value = []
|
||||
await container()
|
||||
assert container.reason not in coresys.resolution.unsupported
|
||||
|
||||
@@ -62,7 +62,7 @@ async def test_corrupt_docker(coresys: CoreSys):
|
||||
corrupt_docker = Issue(IssueType.CORRUPT_DOCKER, ContextType.SYSTEM)
|
||||
assert corrupt_docker not in coresys.resolution.issues
|
||||
|
||||
coresys.docker.containers.list.side_effect = DockerException
|
||||
coresys.docker.containers_legacy.list.side_effect = DockerException
|
||||
await container()
|
||||
assert corrupt_docker in coresys.resolution.issues
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ async def test_evaluation(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
meta.attrs = observer_attrs if name == "hassio_observer" else addon_attrs
|
||||
return meta
|
||||
|
||||
coresys.docker.containers.get = get_container
|
||||
coresys.docker.containers_legacy.get = get_container
|
||||
await coresys.plugins.observer.instance.attach(TEST_VERSION)
|
||||
await install_addon_ssh.instance.attach(TEST_VERSION)
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ async def _mock_wait_for_container() -> None:
|
||||
|
||||
async def test_fixup(docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test fixup rebuilds addon's container."""
|
||||
docker.containers.get = make_mock_container_get("running")
|
||||
docker.containers_legacy.get = make_mock_container_get("running")
|
||||
|
||||
addon_execute_rebuild = FixupAddonExecuteRebuild(coresys)
|
||||
|
||||
@@ -61,7 +61,7 @@ async def test_fixup_stopped_core(
|
||||
):
|
||||
"""Test fixup just removes addon's container when it is stopped."""
|
||||
caplog.clear()
|
||||
docker.containers.get = make_mock_container_get("stopped")
|
||||
docker.containers_legacy.get = make_mock_container_get("stopped")
|
||||
addon_execute_rebuild = FixupAddonExecuteRebuild(coresys)
|
||||
|
||||
coresys.resolution.create_issue(
|
||||
@@ -76,7 +76,7 @@ async def test_fixup_stopped_core(
|
||||
|
||||
assert not coresys.resolution.issues
|
||||
assert not coresys.resolution.suggestions
|
||||
docker.containers.get("addon_local_ssh").remove.assert_called_once_with(
|
||||
docker.containers_legacy.get("addon_local_ssh").remove.assert_called_once_with(
|
||||
force=True, v=True
|
||||
)
|
||||
assert "Addon local_ssh is stopped" in caplog.text
|
||||
@@ -90,7 +90,7 @@ async def test_fixup_unknown_core(
|
||||
):
|
||||
"""Test fixup does nothing if addon's container has already been removed."""
|
||||
caplog.clear()
|
||||
docker.containers.get.side_effect = NotFound("")
|
||||
docker.containers_legacy.get.side_effect = NotFound("")
|
||||
addon_execute_rebuild = FixupAddonExecuteRebuild(coresys)
|
||||
|
||||
coresys.resolution.create_issue(
|
||||
|
||||
@@ -27,7 +27,7 @@ def make_mock_container_get(status: str):
|
||||
|
||||
async def test_fixup(docker: DockerAPI, coresys: CoreSys):
|
||||
"""Test fixup rebuilds core's container."""
|
||||
docker.containers.get = make_mock_container_get("running")
|
||||
docker.containers_legacy.get = make_mock_container_get("running")
|
||||
|
||||
core_execute_rebuild = FixupCoreExecuteRebuild(coresys)
|
||||
|
||||
@@ -51,7 +51,7 @@ async def test_fixup_stopped_core(
|
||||
):
|
||||
"""Test fixup just removes HA's container when it is stopped."""
|
||||
caplog.clear()
|
||||
docker.containers.get = make_mock_container_get("stopped")
|
||||
docker.containers_legacy.get = make_mock_container_get("stopped")
|
||||
core_execute_rebuild = FixupCoreExecuteRebuild(coresys)
|
||||
|
||||
coresys.resolution.create_issue(
|
||||
@@ -65,7 +65,7 @@ async def test_fixup_stopped_core(
|
||||
|
||||
assert not coresys.resolution.issues
|
||||
assert not coresys.resolution.suggestions
|
||||
docker.containers.get("homeassistant").remove.assert_called_once_with(
|
||||
docker.containers_legacy.get("homeassistant").remove.assert_called_once_with(
|
||||
force=True, v=True
|
||||
)
|
||||
assert "Home Assistant is stopped" in caplog.text
|
||||
@@ -76,7 +76,7 @@ async def test_fixup_unknown_core(
|
||||
):
|
||||
"""Test fixup does nothing if core's container has already been removed."""
|
||||
caplog.clear()
|
||||
docker.containers.get.side_effect = NotFound("")
|
||||
docker.containers_legacy.get.side_effect = NotFound("")
|
||||
core_execute_rebuild = FixupCoreExecuteRebuild(coresys)
|
||||
|
||||
coresys.resolution.create_issue(
|
||||
|
||||
@@ -28,7 +28,7 @@ def make_mock_container_get(status: str):
|
||||
@pytest.mark.parametrize("status", ["running", "stopped"])
|
||||
async def test_fixup(docker: DockerAPI, coresys: CoreSys, status: str):
|
||||
"""Test fixup rebuilds plugin's container regardless of current state."""
|
||||
docker.containers.get = make_mock_container_get(status)
|
||||
docker.containers_legacy.get = make_mock_container_get(status)
|
||||
|
||||
plugin_execute_rebuild = FixupPluginExecuteRebuild(coresys)
|
||||
|
||||
|
||||
@@ -3,13 +3,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
from git import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
|
||||
import pytest
|
||||
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.exceptions import StoreGitCloneError, StoreGitError
|
||||
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
||||
from supervisor.store.git import GitRepo
|
||||
|
||||
REPO_URL = "https://github.com/awesome-developer/awesome-repo"
|
||||
@@ -119,3 +120,38 @@ async def test_git_load_error(coresys: CoreSys, tmp_path: Path, git_errors: Exce
|
||||
await repo.load()
|
||||
|
||||
assert len(coresys.resolution.suggestions) == 0
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("supervisor_internet")
|
||||
async def test_git_pull_missing_origin_remote(coresys: CoreSys, tmp_path: Path):
|
||||
"""Test git pull with missing origin remote creates reset suggestion.
|
||||
|
||||
This tests the scenario where a repository exists but has no 'origin' remote,
|
||||
which can happen if the remote was renamed or deleted. The pull operation
|
||||
should create a CORRUPT_REPOSITORY issue with EXECUTE_RESET suggestion.
|
||||
|
||||
Fixes: SUPERVISOR-69Z, SUPERVISOR-172C
|
||||
"""
|
||||
repo = GitRepo(coresys, tmp_path, REPO_URL)
|
||||
|
||||
# Create a mock git repo without an origin remote
|
||||
mock_repo = MagicMock()
|
||||
mock_repo.remotes = [] # Empty remotes list - no 'origin'
|
||||
mock_repo.active_branch.name = "main"
|
||||
repo.repo = mock_repo
|
||||
|
||||
with (
|
||||
patch("git.Git") as mock_git,
|
||||
pytest.raises(StoreGitError),
|
||||
):
|
||||
mock_git.return_value.ls_remote = MagicMock()
|
||||
await repo.pull.__wrapped__(repo)
|
||||
|
||||
# Verify resolution issue was created
|
||||
assert len(coresys.resolution.issues) == 1
|
||||
assert coresys.resolution.issues[0].type == IssueType.CORRUPT_REPOSITORY
|
||||
assert coresys.resolution.issues[0].context == ContextType.STORE
|
||||
|
||||
# Verify reset suggestion was created
|
||||
assert len(coresys.resolution.suggestions) == 1
|
||||
assert coresys.resolution.suggestions[0].type == SuggestionType.EXECUTE_RESET
|
||||
|
||||
@@ -9,7 +9,7 @@ from awesomeversion import AwesomeVersion
|
||||
import pytest
|
||||
|
||||
from supervisor.addons.addon import Addon
|
||||
from supervisor.arch import CpuArch
|
||||
from supervisor.arch import CpuArchManager
|
||||
from supervisor.backups.manager import BackupManager
|
||||
from supervisor.coresys import CoreSys
|
||||
from supervisor.exceptions import AddonNotSupportedError, StoreJobError
|
||||
@@ -163,7 +163,9 @@ async def test_update_unavailable_addon(
|
||||
with (
|
||||
patch.object(BackupManager, "do_backup_partial") as backup,
|
||||
patch.object(AddonStore, "data", new=PropertyMock(return_value=addon_config)),
|
||||
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||
patch.object(
|
||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
||||
),
|
||||
patch.object(CoreSys, "machine", new=PropertyMock(return_value="qemux86-64")),
|
||||
patch.object(
|
||||
HomeAssistant,
|
||||
@@ -219,7 +221,9 @@ async def test_install_unavailable_addon(
|
||||
|
||||
with (
|
||||
patch.object(AddonStore, "data", new=PropertyMock(return_value=addon_config)),
|
||||
patch.object(CpuArch, "supported", new=PropertyMock(return_value=["amd64"])),
|
||||
patch.object(
|
||||
CpuArchManager, "supported", new=PropertyMock(return_value=["amd64"])
|
||||
),
|
||||
patch.object(CoreSys, "machine", new=PropertyMock(return_value="qemux86-64")),
|
||||
patch.object(
|
||||
HomeAssistant,
|
||||
|
||||
0
wheels/.gitkeep
Normal file
0
wheels/.gitkeep
Normal file
Reference in New Issue
Block a user