mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-12-02 14:08:17 +00:00
Compare commits
49 Commits
2025.11.2
...
improve-pr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9342456b34 | ||
|
|
2080a2719e | ||
|
|
6820dbb4d2 | ||
|
|
6302c7d394 | ||
|
|
f55fd891e9 | ||
|
|
8a251e0324 | ||
|
|
62b7b8c399 | ||
|
|
3c87704802 | ||
|
|
ae7700f52c | ||
|
|
e06e792e74 | ||
|
|
5f55ab8de4 | ||
|
|
ca521c24cb | ||
|
|
6042694d84 | ||
|
|
2b2aedae60 | ||
|
|
4b4afd081b | ||
|
|
a3dca10fd8 | ||
|
|
d73682ee8a | ||
|
|
032fa4cdc4 | ||
|
|
7244e447ab | ||
|
|
603ba57846 | ||
|
|
0ff12abdf4 | ||
|
|
906838e325 | ||
|
|
3be0c13fc5 | ||
|
|
bb450cad4f | ||
|
|
10af48a65b | ||
|
|
2f334c48c3 | ||
|
|
6d87e8f591 | ||
|
|
4d1dd63248 | ||
|
|
0c2d0cf5c1 | ||
|
|
ca7a3af676 | ||
|
|
93272fe4c0 | ||
|
|
79a99cc66d | ||
|
|
6af6c3157f | ||
|
|
5ed0c85168 | ||
|
|
63a3dff118 | ||
|
|
fc8fc171c1 | ||
|
|
72bbc50c83 | ||
|
|
0837e05cb2 | ||
|
|
d3d652eba5 | ||
|
|
2eea3c70eb | ||
|
|
95c106d502 | ||
|
|
74f9431519 | ||
|
|
0eef2169f7 | ||
|
|
2656b451cd | ||
|
|
af7a629dd4 | ||
|
|
30cc172199 | ||
|
|
69ae8db13c | ||
|
|
d85aedc42b | ||
|
|
d541fe5c3a |
66
.github/workflows/builder.yml
vendored
66
.github/workflows/builder.yml
vendored
@@ -34,6 +34,9 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
DEFAULT_PYTHON: "3.13"
|
DEFAULT_PYTHON: "3.13"
|
||||||
|
COSIGN_VERSION: "v2.5.3"
|
||||||
|
CRANE_VERSION: "v0.20.7"
|
||||||
|
CRANE_SHA256: "8ef3564d264e6b5ca93f7b7f5652704c4dd29d33935aff6947dd5adefd05953e"
|
||||||
BUILD_NAME: supervisor
|
BUILD_NAME: supervisor
|
||||||
BUILD_TYPE: supervisor
|
BUILD_TYPE: supervisor
|
||||||
|
|
||||||
@@ -53,7 +56,7 @@ jobs:
|
|||||||
requirements: ${{ steps.requirements.outputs.changed }}
|
requirements: ${{ steps.requirements.outputs.changed }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout the repository
|
- name: Checkout the repository
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -92,7 +95,7 @@ jobs:
|
|||||||
arch: ${{ fromJson(needs.init.outputs.architectures) }}
|
arch: ${{ fromJson(needs.init.outputs.architectures) }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout the repository
|
- name: Checkout the repository
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -107,7 +110,7 @@ jobs:
|
|||||||
# home-assistant/wheels doesn't support sha pinning
|
# home-assistant/wheels doesn't support sha pinning
|
||||||
- name: Build wheels
|
- name: Build wheels
|
||||||
if: needs.init.outputs.requirements == 'true'
|
if: needs.init.outputs.requirements == 'true'
|
||||||
uses: home-assistant/wheels@2025.10.0
|
uses: home-assistant/wheels@2025.11.0
|
||||||
with:
|
with:
|
||||||
abi: cp313
|
abi: cp313
|
||||||
tag: musllinux_1_2
|
tag: musllinux_1_2
|
||||||
@@ -126,7 +129,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
||||||
if: needs.init.outputs.publish == 'true'
|
if: needs.init.outputs.publish == 'true'
|
||||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||||
|
|
||||||
@@ -134,7 +137,7 @@ jobs:
|
|||||||
if: needs.init.outputs.publish == 'true'
|
if: needs.init.outputs.publish == 'true'
|
||||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||||
with:
|
with:
|
||||||
cosign-release: "v2.5.3"
|
cosign-release: ${{ env.COSIGN_VERSION }}
|
||||||
|
|
||||||
- name: Install dirhash and calc hash
|
- name: Install dirhash and calc hash
|
||||||
if: needs.init.outputs.publish == 'true'
|
if: needs.init.outputs.publish == 'true'
|
||||||
@@ -173,12 +176,12 @@ jobs:
|
|||||||
|
|
||||||
version:
|
version:
|
||||||
name: Update version
|
name: Update version
|
||||||
needs: ["init", "run_supervisor"]
|
needs: ["init", "run_supervisor", "retag_deprecated"]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout the repository
|
- name: Checkout the repository
|
||||||
if: needs.init.outputs.publish == 'true'
|
if: needs.init.outputs.publish == 'true'
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
|
|
||||||
- name: Initialize git
|
- name: Initialize git
|
||||||
if: needs.init.outputs.publish == 'true'
|
if: needs.init.outputs.publish == 'true'
|
||||||
@@ -203,7 +206,7 @@ jobs:
|
|||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout the repository
|
- name: Checkout the repository
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
|
|
||||||
# home-assistant/builder doesn't support sha pinning
|
# home-assistant/builder doesn't support sha pinning
|
||||||
- name: Build the Supervisor
|
- name: Build the Supervisor
|
||||||
@@ -352,3 +355,50 @@ jobs:
|
|||||||
- name: Get supervisor logs on failiure
|
- name: Get supervisor logs on failiure
|
||||||
if: ${{ cancelled() || failure() }}
|
if: ${{ cancelled() || failure() }}
|
||||||
run: docker logs hassio_supervisor
|
run: docker logs hassio_supervisor
|
||||||
|
|
||||||
|
retag_deprecated:
|
||||||
|
needs: ["build", "init"]
|
||||||
|
name: Re-tag deprecated ${{ matrix.arch }} images
|
||||||
|
if: needs.init.outputs.publish == 'true'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
id-token: write
|
||||||
|
packages: write
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
arch: ["armhf", "armv7", "i386"]
|
||||||
|
env:
|
||||||
|
# Last available release for deprecated architectures
|
||||||
|
FROZEN_VERSION: "2025.11.5"
|
||||||
|
steps:
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Install Cosign
|
||||||
|
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||||
|
with:
|
||||||
|
cosign-release: ${{ env.COSIGN_VERSION }}
|
||||||
|
|
||||||
|
- name: Install crane
|
||||||
|
run: |
|
||||||
|
curl -sLO https://github.com/google/go-containerregistry/releases/download/${{ env.CRANE_VERSION }}/go-containerregistry_Linux_x86_64.tar.gz
|
||||||
|
echo "${{ env.CRANE_SHA256 }} go-containerregistry_Linux_x86_64.tar.gz" | sha256sum -c -
|
||||||
|
tar xzf go-containerregistry_Linux_x86_64.tar.gz crane
|
||||||
|
sudo mv crane /usr/local/bin/
|
||||||
|
|
||||||
|
- name: Re-tag deprecated image with updated version label
|
||||||
|
run: |
|
||||||
|
crane auth login ghcr.io -u ${{ github.repository_owner }} -p ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
crane mutate \
|
||||||
|
--label io.hass.version=${{ needs.init.outputs.version }} \
|
||||||
|
--tag ghcr.io/home-assistant/${{ matrix.arch }}-hassio-supervisor:${{ needs.init.outputs.version }} \
|
||||||
|
ghcr.io/home-assistant/${{ matrix.arch }}-hassio-supervisor:${{ env.FROZEN_VERSION }}
|
||||||
|
|
||||||
|
- name: Sign image with Cosign
|
||||||
|
run: |
|
||||||
|
cosign sign --yes ghcr.io/home-assistant/${{ matrix.arch }}-hassio-supervisor:${{ needs.init.outputs.version }}
|
||||||
|
|||||||
38
.github/workflows/ci.yaml
vendored
38
.github/workflows/ci.yaml
vendored
@@ -26,10 +26,10 @@ jobs:
|
|||||||
name: Prepare Python dependencies
|
name: Prepare Python dependencies
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code from GitHub
|
- name: Check out code from GitHub
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
id: python
|
id: python
|
||||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||||
- name: Restore Python virtual environment
|
- name: Restore Python virtual environment
|
||||||
@@ -68,9 +68,9 @@ jobs:
|
|||||||
needs: prepare
|
needs: prepare
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code from GitHub
|
- name: Check out code from GitHub
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||||
id: python
|
id: python
|
||||||
with:
|
with:
|
||||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||||
@@ -111,9 +111,9 @@ jobs:
|
|||||||
needs: prepare
|
needs: prepare
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code from GitHub
|
- name: Check out code from GitHub
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||||
id: python
|
id: python
|
||||||
with:
|
with:
|
||||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||||
@@ -154,7 +154,7 @@ jobs:
|
|||||||
needs: prepare
|
needs: prepare
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code from GitHub
|
- name: Check out code from GitHub
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Register hadolint problem matcher
|
- name: Register hadolint problem matcher
|
||||||
run: |
|
run: |
|
||||||
echo "::add-matcher::.github/workflows/matchers/hadolint.json"
|
echo "::add-matcher::.github/workflows/matchers/hadolint.json"
|
||||||
@@ -169,9 +169,9 @@ jobs:
|
|||||||
needs: prepare
|
needs: prepare
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code from GitHub
|
- name: Check out code from GitHub
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||||
id: python
|
id: python
|
||||||
with:
|
with:
|
||||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||||
@@ -213,9 +213,9 @@ jobs:
|
|||||||
needs: prepare
|
needs: prepare
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code from GitHub
|
- name: Check out code from GitHub
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||||
id: python
|
id: python
|
||||||
with:
|
with:
|
||||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||||
@@ -257,9 +257,9 @@ jobs:
|
|||||||
needs: prepare
|
needs: prepare
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code from GitHub
|
- name: Check out code from GitHub
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||||
id: python
|
id: python
|
||||||
with:
|
with:
|
||||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||||
@@ -293,9 +293,9 @@ jobs:
|
|||||||
needs: prepare
|
needs: prepare
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code from GitHub
|
- name: Check out code from GitHub
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||||
id: python
|
id: python
|
||||||
with:
|
with:
|
||||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||||
@@ -339,9 +339,9 @@ jobs:
|
|||||||
name: Run tests Python ${{ needs.prepare.outputs.python-version }}
|
name: Run tests Python ${{ needs.prepare.outputs.python-version }}
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code from GitHub
|
- name: Check out code from GitHub
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||||
id: python
|
id: python
|
||||||
with:
|
with:
|
||||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||||
@@ -398,9 +398,9 @@ jobs:
|
|||||||
needs: ["pytest", "prepare"]
|
needs: ["pytest", "prepare"]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code from GitHub
|
- name: Check out code from GitHub
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||||
id: python
|
id: python
|
||||||
with:
|
with:
|
||||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||||
|
|||||||
2
.github/workflows/release-drafter.yml
vendored
2
.github/workflows/release-drafter.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
name: Release Drafter
|
name: Release Drafter
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout the repository
|
- name: Checkout the repository
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/sentry.yaml
vendored
2
.github/workflows/sentry.yaml
vendored
@@ -10,7 +10,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code from GitHub
|
- name: Check out code from GitHub
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Sentry Release
|
- name: Sentry Release
|
||||||
uses: getsentry/action-release@128c5058bbbe93c8e02147fe0a9c713f166259a6 # v3.4.0
|
uses: getsentry/action-release@128c5058bbbe93c8e02147fe0a9c713f166259a6 # v3.4.0
|
||||||
env:
|
env:
|
||||||
|
|||||||
6
.github/workflows/update_frontend.yml
vendored
6
.github/workflows/update_frontend.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
latest_version: ${{ steps.latest_frontend_version.outputs.latest_tag }}
|
latest_version: ${{ steps.latest_frontend_version.outputs.latest_tag }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Get latest frontend release
|
- name: Get latest frontend release
|
||||||
id: latest_frontend_version
|
id: latest_frontend_version
|
||||||
uses: abatilo/release-info-action@32cb932219f1cee3fc4f4a298fd65ead5d35b661 # v1.3.3
|
uses: abatilo/release-info-action@32cb932219f1cee3fc4f4a298fd65ead5d35b661 # v1.3.3
|
||||||
@@ -49,7 +49,7 @@ jobs:
|
|||||||
if: needs.check-version.outputs.skip != 'true'
|
if: needs.check-version.outputs.skip != 'true'
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||||
- name: Clear www folder
|
- name: Clear www folder
|
||||||
run: |
|
run: |
|
||||||
rm -rf supervisor/api/panel/*
|
rm -rf supervisor/api/panel/*
|
||||||
@@ -68,7 +68,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
rm -f supervisor/api/panel/home_assistant_frontend_supervisor-*.tar.gz
|
rm -f supervisor/api/panel/home_assistant_frontend_supervisor-*.tar.gz
|
||||||
- name: Create PR
|
- name: Create PR
|
||||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
|
||||||
with:
|
with:
|
||||||
commit-message: "Update frontend to version ${{ needs.check-version.outputs.latest_version }}"
|
commit-message: "Update frontend to version ${{ needs.check-version.outputs.latest_version }}"
|
||||||
branch: autoupdate-frontend
|
branch: autoupdate-frontend
|
||||||
|
|||||||
10
build.yaml
10
build.yaml
@@ -1,13 +1,7 @@
|
|||||||
image: ghcr.io/home-assistant/{arch}-hassio-supervisor
|
image: ghcr.io/home-assistant/{arch}-hassio-supervisor
|
||||||
build_from:
|
build_from:
|
||||||
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.13-alpine3.22
|
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.13-alpine3.22-2025.11.1
|
||||||
armhf: ghcr.io/home-assistant/armhf-base-python:3.13-alpine3.22
|
amd64: ghcr.io/home-assistant/amd64-base-python:3.13-alpine3.22-2025.11.1
|
||||||
armv7: ghcr.io/home-assistant/armv7-base-python:3.13-alpine3.22
|
|
||||||
amd64: ghcr.io/home-assistant/amd64-base-python:3.13-alpine3.22
|
|
||||||
i386: ghcr.io/home-assistant/i386-base-python:3.13-alpine3.22
|
|
||||||
codenotary:
|
|
||||||
signer: notary@home-assistant.io
|
|
||||||
base_image: notary@home-assistant.io
|
|
||||||
cosign:
|
cosign:
|
||||||
base_identity: https://github.com/home-assistant/docker-base/.*
|
base_identity: https://github.com/home-assistant/docker-base/.*
|
||||||
identity: https://github.com/home-assistant/supervisor/.*
|
identity: https://github.com/home-assistant/supervisor/.*
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
aiodns==3.5.0
|
aiodns==3.5.0
|
||||||
|
aiodocker==0.24.0
|
||||||
aiohttp==3.13.2
|
aiohttp==3.13.2
|
||||||
atomicwrites-homeassistant==1.4.1
|
atomicwrites-homeassistant==1.4.1
|
||||||
attrs==25.4.0
|
attrs==25.4.0
|
||||||
awesomeversion==25.8.0
|
awesomeversion==25.8.0
|
||||||
backports.zstd==1.0.0
|
backports.zstd==1.1.0
|
||||||
blockbuster==1.5.25
|
blockbuster==1.5.25
|
||||||
brotli==1.2.0
|
brotli==1.2.0
|
||||||
ciso8601==2.3.3
|
ciso8601==2.3.3
|
||||||
@@ -24,8 +25,8 @@ pyudev==0.24.4
|
|||||||
PyYAML==6.0.3
|
PyYAML==6.0.3
|
||||||
requests==2.32.5
|
requests==2.32.5
|
||||||
securetar==2025.2.1
|
securetar==2025.2.1
|
||||||
sentry-sdk==2.43.0
|
sentry-sdk==2.46.0
|
||||||
setuptools==80.9.0
|
setuptools==80.9.0
|
||||||
voluptuous==0.15.2
|
voluptuous==0.15.2
|
||||||
dbus-fast==2.45.1
|
dbus-fast==3.1.2
|
||||||
zlib-fast==0.2.1
|
zlib-fast==0.2.1
|
||||||
|
|||||||
@@ -1,16 +1,16 @@
|
|||||||
astroid==4.0.2
|
astroid==4.0.2
|
||||||
coverage==7.11.3
|
coverage==7.12.0
|
||||||
mypy==1.18.2
|
mypy==1.18.2
|
||||||
pre-commit==4.4.0
|
pre-commit==4.5.0
|
||||||
pylint==4.0.2
|
pylint==4.0.3
|
||||||
pytest-aiohttp==1.1.0
|
pytest-aiohttp==1.1.0
|
||||||
pytest-asyncio==1.2.0
|
pytest-asyncio==1.3.0
|
||||||
pytest-cov==7.0.0
|
pytest-cov==7.0.0
|
||||||
pytest-timeout==2.4.0
|
pytest-timeout==2.4.0
|
||||||
pytest==8.4.2
|
pytest==9.0.1
|
||||||
ruff==0.14.4
|
ruff==0.14.6
|
||||||
time-machine==2.19.0
|
time-machine==3.1.0
|
||||||
types-docker==7.1.0.20251009
|
types-docker==7.1.0.20251127
|
||||||
types-pyyaml==6.0.12.20250915
|
types-pyyaml==6.0.12.20250915
|
||||||
types-requests==2.32.4.20250913
|
types-requests==2.32.4.20250913
|
||||||
urllib3==2.5.0
|
urllib3==2.5.0
|
||||||
|
|||||||
@@ -2,7 +2,9 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import base64
|
||||||
from functools import cached_property
|
from functools import cached_property
|
||||||
|
import json
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
@@ -12,12 +14,15 @@ from ..const import (
|
|||||||
ATTR_ARGS,
|
ATTR_ARGS,
|
||||||
ATTR_BUILD_FROM,
|
ATTR_BUILD_FROM,
|
||||||
ATTR_LABELS,
|
ATTR_LABELS,
|
||||||
|
ATTR_PASSWORD,
|
||||||
ATTR_SQUASH,
|
ATTR_SQUASH,
|
||||||
|
ATTR_USERNAME,
|
||||||
FILE_SUFFIX_CONFIGURATION,
|
FILE_SUFFIX_CONFIGURATION,
|
||||||
META_ADDON,
|
META_ADDON,
|
||||||
SOCKET_DOCKER,
|
SOCKET_DOCKER,
|
||||||
)
|
)
|
||||||
from ..coresys import CoreSys, CoreSysAttributes
|
from ..coresys import CoreSys, CoreSysAttributes
|
||||||
|
from ..docker.const import DOCKER_HUB
|
||||||
from ..docker.interface import MAP_ARCH
|
from ..docker.interface import MAP_ARCH
|
||||||
from ..exceptions import ConfigurationFileError, HassioArchNotFound
|
from ..exceptions import ConfigurationFileError, HassioArchNotFound
|
||||||
from ..utils.common import FileConfiguration, find_one_filetype
|
from ..utils.common import FileConfiguration, find_one_filetype
|
||||||
@@ -122,8 +127,43 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
|||||||
except HassioArchNotFound:
|
except HassioArchNotFound:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def get_docker_config_json(self) -> str | None:
|
||||||
|
"""Generate Docker config.json content with registry credentials for base image.
|
||||||
|
|
||||||
|
Returns a JSON string with registry credentials for the base image's registry,
|
||||||
|
or None if no matching registry is configured.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
HassioArchNotFound: If the add-on is not supported on the current architecture.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Early return before accessing base_image to avoid unnecessary arch lookup
|
||||||
|
if not self.sys_docker.config.registries:
|
||||||
|
return None
|
||||||
|
|
||||||
|
registry = self.sys_docker.config.get_registry_for_image(self.base_image)
|
||||||
|
if not registry:
|
||||||
|
return None
|
||||||
|
|
||||||
|
stored = self.sys_docker.config.registries[registry]
|
||||||
|
username = stored[ATTR_USERNAME]
|
||||||
|
password = stored[ATTR_PASSWORD]
|
||||||
|
|
||||||
|
# Docker config.json uses base64-encoded "username:password" for auth
|
||||||
|
auth_string = base64.b64encode(f"{username}:{password}".encode()).decode()
|
||||||
|
|
||||||
|
# Use the actual registry URL for the key
|
||||||
|
# Docker Hub uses "https://index.docker.io/v1/" as the key
|
||||||
|
registry_key = (
|
||||||
|
"https://index.docker.io/v1/" if registry == DOCKER_HUB else registry
|
||||||
|
)
|
||||||
|
|
||||||
|
config = {"auths": {registry_key: {"auth": auth_string}}}
|
||||||
|
|
||||||
|
return json.dumps(config)
|
||||||
|
|
||||||
def get_docker_args(
|
def get_docker_args(
|
||||||
self, version: AwesomeVersion, image_tag: str
|
self, version: AwesomeVersion, image_tag: str, docker_config_path: Path | None
|
||||||
) -> dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
"""Create a dict with Docker run args."""
|
"""Create a dict with Docker run args."""
|
||||||
dockerfile_path = self.get_dockerfile().relative_to(self.addon.path_location)
|
dockerfile_path = self.get_dockerfile().relative_to(self.addon.path_location)
|
||||||
@@ -172,12 +212,24 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
|||||||
self.addon.path_location
|
self.addon.path_location
|
||||||
)
|
)
|
||||||
|
|
||||||
|
volumes = {
|
||||||
|
SOCKET_DOCKER: {"bind": "/var/run/docker.sock", "mode": "rw"},
|
||||||
|
addon_extern_path: {"bind": "/addon", "mode": "ro"},
|
||||||
|
}
|
||||||
|
|
||||||
|
# Mount Docker config with registry credentials if available
|
||||||
|
if docker_config_path:
|
||||||
|
docker_config_extern_path = self.sys_config.local_to_extern_path(
|
||||||
|
docker_config_path
|
||||||
|
)
|
||||||
|
volumes[docker_config_extern_path] = {
|
||||||
|
"bind": "/root/.docker/config.json",
|
||||||
|
"mode": "ro",
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"command": build_cmd,
|
"command": build_cmd,
|
||||||
"volumes": {
|
"volumes": volumes,
|
||||||
SOCKET_DOCKER: {"bind": "/var/run/docker.sock", "mode": "rw"},
|
|
||||||
addon_extern_path: {"bind": "/addon", "mode": "ro"},
|
|
||||||
},
|
|
||||||
"working_dir": "/addon",
|
"working_dir": "/addon",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -152,6 +152,7 @@ class RestAPI(CoreSysAttributes):
|
|||||||
self._api_host.advanced_logs,
|
self._api_host.advanced_logs,
|
||||||
identifier=syslog_identifier,
|
identifier=syslog_identifier,
|
||||||
latest=True,
|
latest=True,
|
||||||
|
no_colors=True,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
web.get(
|
web.get(
|
||||||
@@ -449,6 +450,7 @@ class RestAPI(CoreSysAttributes):
|
|||||||
await async_capture_exception(err)
|
await async_capture_exception(err)
|
||||||
kwargs.pop("follow", None) # Follow is not supported for Docker logs
|
kwargs.pop("follow", None) # Follow is not supported for Docker logs
|
||||||
kwargs.pop("latest", None) # Latest is not supported for Docker logs
|
kwargs.pop("latest", None) # Latest is not supported for Docker logs
|
||||||
|
kwargs.pop("no_colors", None) # no_colors not supported for Docker logs
|
||||||
return await api_supervisor.logs(*args, **kwargs)
|
return await api_supervisor.logs(*args, **kwargs)
|
||||||
|
|
||||||
self.webapp.add_routes(
|
self.webapp.add_routes(
|
||||||
@@ -460,7 +462,7 @@ class RestAPI(CoreSysAttributes):
|
|||||||
),
|
),
|
||||||
web.get(
|
web.get(
|
||||||
"/supervisor/logs/latest",
|
"/supervisor/logs/latest",
|
||||||
partial(get_supervisor_logs, latest=True),
|
partial(get_supervisor_logs, latest=True, no_colors=True),
|
||||||
),
|
),
|
||||||
web.get("/supervisor/logs/boots/{bootid}", get_supervisor_logs),
|
web.get("/supervisor/logs/boots/{bootid}", get_supervisor_logs),
|
||||||
web.get(
|
web.get(
|
||||||
@@ -576,7 +578,7 @@ class RestAPI(CoreSysAttributes):
|
|||||||
),
|
),
|
||||||
web.get(
|
web.get(
|
||||||
"/addons/{addon}/logs/latest",
|
"/addons/{addon}/logs/latest",
|
||||||
partial(get_addon_logs, latest=True),
|
partial(get_addon_logs, latest=True, no_colors=True),
|
||||||
),
|
),
|
||||||
web.get("/addons/{addon}/logs/boots/{bootid}", get_addon_logs),
|
web.get("/addons/{addon}/logs/boots/{bootid}", get_addon_logs),
|
||||||
web.get(
|
web.get(
|
||||||
@@ -811,6 +813,10 @@ class RestAPI(CoreSysAttributes):
|
|||||||
self.webapp.add_routes(
|
self.webapp.add_routes(
|
||||||
[
|
[
|
||||||
web.get("/docker/info", api_docker.info),
|
web.get("/docker/info", api_docker.info),
|
||||||
|
web.post(
|
||||||
|
"/docker/migrate-storage-driver",
|
||||||
|
api_docker.migrate_docker_storage_driver,
|
||||||
|
),
|
||||||
web.post("/docker/options", api_docker.options),
|
web.post("/docker/options", api_docker.options),
|
||||||
web.get("/docker/registries", api_docker.registries),
|
web.get("/docker/registries", api_docker.registries),
|
||||||
web.post("/docker/registries", api_docker.create_registry),
|
web.post("/docker/registries", api_docker.create_registry),
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import logging
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from aiohttp import web
|
from aiohttp import web
|
||||||
|
from awesomeversion import AwesomeVersion
|
||||||
import voluptuous as vol
|
import voluptuous as vol
|
||||||
|
|
||||||
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
||||||
@@ -16,6 +17,7 @@ from ..const import (
|
|||||||
ATTR_PASSWORD,
|
ATTR_PASSWORD,
|
||||||
ATTR_REGISTRIES,
|
ATTR_REGISTRIES,
|
||||||
ATTR_STORAGE,
|
ATTR_STORAGE,
|
||||||
|
ATTR_STORAGE_DRIVER,
|
||||||
ATTR_USERNAME,
|
ATTR_USERNAME,
|
||||||
ATTR_VERSION,
|
ATTR_VERSION,
|
||||||
)
|
)
|
||||||
@@ -42,6 +44,12 @@ SCHEMA_OPTIONS = vol.Schema(
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER = vol.Schema(
|
||||||
|
{
|
||||||
|
vol.Required(ATTR_STORAGE_DRIVER): vol.In(["overlayfs", "overlay2"]),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class APIDocker(CoreSysAttributes):
|
class APIDocker(CoreSysAttributes):
|
||||||
"""Handle RESTful API for Docker configuration."""
|
"""Handle RESTful API for Docker configuration."""
|
||||||
@@ -123,3 +131,27 @@ class APIDocker(CoreSysAttributes):
|
|||||||
|
|
||||||
del self.sys_docker.config.registries[hostname]
|
del self.sys_docker.config.registries[hostname]
|
||||||
await self.sys_docker.config.save_data()
|
await self.sys_docker.config.save_data()
|
||||||
|
|
||||||
|
@api_process
|
||||||
|
async def migrate_docker_storage_driver(self, request: web.Request) -> None:
|
||||||
|
"""Migrate Docker storage driver."""
|
||||||
|
if (
|
||||||
|
not self.coresys.os.available
|
||||||
|
or not self.coresys.os.version
|
||||||
|
or self.coresys.os.version < AwesomeVersion("17.0.dev0")
|
||||||
|
):
|
||||||
|
raise APINotFound(
|
||||||
|
"Home Assistant OS 17.0 or newer required for Docker storage driver migration"
|
||||||
|
)
|
||||||
|
|
||||||
|
body = await api_validate(SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER, request)
|
||||||
|
await self.sys_dbus.agent.system.migrate_docker_storage_driver(
|
||||||
|
body[ATTR_STORAGE_DRIVER]
|
||||||
|
)
|
||||||
|
|
||||||
|
_LOGGER.info("Host system reboot required to apply Docker storage migration")
|
||||||
|
self.sys_resolution.create_issue(
|
||||||
|
IssueType.REBOOT_REQUIRED,
|
||||||
|
ContextType.SYSTEM,
|
||||||
|
suggestions=[SuggestionType.EXECUTE_REBOOT],
|
||||||
|
)
|
||||||
|
|||||||
@@ -206,6 +206,7 @@ class APIHost(CoreSysAttributes):
|
|||||||
identifier: str | None = None,
|
identifier: str | None = None,
|
||||||
follow: bool = False,
|
follow: bool = False,
|
||||||
latest: bool = False,
|
latest: bool = False,
|
||||||
|
no_colors: bool = False,
|
||||||
) -> web.StreamResponse:
|
) -> web.StreamResponse:
|
||||||
"""Return systemd-journald logs."""
|
"""Return systemd-journald logs."""
|
||||||
log_formatter = LogFormatter.PLAIN
|
log_formatter = LogFormatter.PLAIN
|
||||||
@@ -251,6 +252,9 @@ class APIHost(CoreSysAttributes):
|
|||||||
if "verbose" in request.query or request.headers[ACCEPT] == CONTENT_TYPE_X_LOG:
|
if "verbose" in request.query or request.headers[ACCEPT] == CONTENT_TYPE_X_LOG:
|
||||||
log_formatter = LogFormatter.VERBOSE
|
log_formatter = LogFormatter.VERBOSE
|
||||||
|
|
||||||
|
if "no_colors" in request.query:
|
||||||
|
no_colors = True
|
||||||
|
|
||||||
if "lines" in request.query:
|
if "lines" in request.query:
|
||||||
lines = request.query.get("lines", DEFAULT_LINES)
|
lines = request.query.get("lines", DEFAULT_LINES)
|
||||||
try:
|
try:
|
||||||
@@ -280,7 +284,9 @@ class APIHost(CoreSysAttributes):
|
|||||||
response = web.StreamResponse()
|
response = web.StreamResponse()
|
||||||
response.content_type = CONTENT_TYPE_TEXT
|
response.content_type = CONTENT_TYPE_TEXT
|
||||||
headers_returned = False
|
headers_returned = False
|
||||||
async for cursor, line in journal_logs_reader(resp, log_formatter):
|
async for cursor, line in journal_logs_reader(
|
||||||
|
resp, log_formatter, no_colors
|
||||||
|
):
|
||||||
try:
|
try:
|
||||||
if not headers_returned:
|
if not headers_returned:
|
||||||
if cursor:
|
if cursor:
|
||||||
@@ -318,9 +324,12 @@ class APIHost(CoreSysAttributes):
|
|||||||
identifier: str | None = None,
|
identifier: str | None = None,
|
||||||
follow: bool = False,
|
follow: bool = False,
|
||||||
latest: bool = False,
|
latest: bool = False,
|
||||||
|
no_colors: bool = False,
|
||||||
) -> web.StreamResponse:
|
) -> web.StreamResponse:
|
||||||
"""Return systemd-journald logs. Wrapped as standard API handler."""
|
"""Return systemd-journald logs. Wrapped as standard API handler."""
|
||||||
return await self.advanced_logs_handler(request, identifier, follow, latest)
|
return await self.advanced_logs_handler(
|
||||||
|
request, identifier, follow, latest, no_colors
|
||||||
|
)
|
||||||
|
|
||||||
@api_process
|
@api_process
|
||||||
async def disk_usage(self, request: web.Request) -> dict:
|
async def disk_usage(self, request: web.Request) -> dict:
|
||||||
@@ -334,10 +343,14 @@ class APIHost(CoreSysAttributes):
|
|||||||
|
|
||||||
disk = self.sys_hardware.disk
|
disk = self.sys_hardware.disk
|
||||||
|
|
||||||
total, used, _ = await self.sys_run_in_executor(
|
total, _, free = await self.sys_run_in_executor(
|
||||||
disk.disk_usage, self.sys_config.path_supervisor
|
disk.disk_usage, self.sys_config.path_supervisor
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Calculate used by subtracting free makes sure we include reserved space
|
||||||
|
# in used space reporting.
|
||||||
|
used = total - free
|
||||||
|
|
||||||
known_paths = await self.sys_run_in_executor(
|
known_paths = await self.sys_run_in_executor(
|
||||||
disk.get_dir_sizes,
|
disk.get_dir_sizes,
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -63,12 +63,10 @@ def json_loads(data: Any) -> dict[str, Any]:
|
|||||||
def api_process(method):
|
def api_process(method):
|
||||||
"""Wrap function with true/false calls to rest api."""
|
"""Wrap function with true/false calls to rest api."""
|
||||||
|
|
||||||
async def wrap_api(
|
async def wrap_api(*args, **kwargs) -> web.Response | web.StreamResponse:
|
||||||
api: CoreSysAttributes, *args, **kwargs
|
|
||||||
) -> web.Response | web.StreamResponse:
|
|
||||||
"""Return API information."""
|
"""Return API information."""
|
||||||
try:
|
try:
|
||||||
answer = await method(api, *args, **kwargs)
|
answer = await method(*args, **kwargs)
|
||||||
except BackupFileNotFoundError as err:
|
except BackupFileNotFoundError as err:
|
||||||
return api_return_error(err, status=404)
|
return api_return_error(err, status=404)
|
||||||
except APIError as err:
|
except APIError as err:
|
||||||
@@ -109,12 +107,10 @@ def api_process_raw(content, *, error_type=None):
|
|||||||
def wrap_method(method):
|
def wrap_method(method):
|
||||||
"""Wrap function with raw output to rest api."""
|
"""Wrap function with raw output to rest api."""
|
||||||
|
|
||||||
async def wrap_api(
|
async def wrap_api(*args, **kwargs) -> web.Response | web.StreamResponse:
|
||||||
api: CoreSysAttributes, *args, **kwargs
|
|
||||||
) -> web.Response | web.StreamResponse:
|
|
||||||
"""Return api information."""
|
"""Return api information."""
|
||||||
try:
|
try:
|
||||||
msg_data = await method(api, *args, **kwargs)
|
msg_data = await method(*args, **kwargs)
|
||||||
except APIError as err:
|
except APIError as err:
|
||||||
return api_return_error(
|
return api_return_error(
|
||||||
err,
|
err,
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from asyncio import Task
|
||||||
from collections.abc import Callable, Coroutine
|
from collections.abc import Callable, Coroutine
|
||||||
import logging
|
import logging
|
||||||
from typing import Any
|
from typing import Any
|
||||||
@@ -38,11 +39,13 @@ class Bus(CoreSysAttributes):
|
|||||||
self._listeners.setdefault(event, []).append(listener)
|
self._listeners.setdefault(event, []).append(listener)
|
||||||
return listener
|
return listener
|
||||||
|
|
||||||
def fire_event(self, event: BusEvent, reference: Any) -> None:
|
def fire_event(self, event: BusEvent, reference: Any) -> list[Task]:
|
||||||
"""Fire an event to the bus."""
|
"""Fire an event to the bus."""
|
||||||
_LOGGER.debug("Fire event '%s' with '%s'", event, reference)
|
_LOGGER.debug("Fire event '%s' with '%s'", event, reference)
|
||||||
|
tasks: list[Task] = []
|
||||||
for listener in self._listeners.get(event, []):
|
for listener in self._listeners.get(event, []):
|
||||||
self.sys_create_task(listener.callback(reference))
|
tasks.append(self.sys_create_task(listener.callback(reference)))
|
||||||
|
return tasks
|
||||||
|
|
||||||
def remove_listener(self, listener: EventListener) -> None:
|
def remove_listener(self, listener: EventListener) -> None:
|
||||||
"""Unregister an listener."""
|
"""Unregister an listener."""
|
||||||
|
|||||||
@@ -328,6 +328,7 @@ ATTR_STATE = "state"
|
|||||||
ATTR_STATIC = "static"
|
ATTR_STATIC = "static"
|
||||||
ATTR_STDIN = "stdin"
|
ATTR_STDIN = "stdin"
|
||||||
ATTR_STORAGE = "storage"
|
ATTR_STORAGE = "storage"
|
||||||
|
ATTR_STORAGE_DRIVER = "storage_driver"
|
||||||
ATTR_SUGGESTIONS = "suggestions"
|
ATTR_SUGGESTIONS = "suggestions"
|
||||||
ATTR_SUPERVISOR = "supervisor"
|
ATTR_SUPERVISOR = "supervisor"
|
||||||
ATTR_SUPERVISOR_INTERNET = "supervisor_internet"
|
ATTR_SUPERVISOR_INTERNET = "supervisor_internet"
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from datetime import UTC, datetime, tzinfo
|
|||||||
from functools import partial
|
from functools import partial
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import time
|
||||||
from types import MappingProxyType
|
from types import MappingProxyType
|
||||||
from typing import TYPE_CHECKING, Any, Self, TypeVar
|
from typing import TYPE_CHECKING, Any, Self, TypeVar
|
||||||
|
|
||||||
@@ -655,8 +656,14 @@ class CoreSys:
|
|||||||
if kwargs:
|
if kwargs:
|
||||||
funct = partial(funct, **kwargs)
|
funct = partial(funct, **kwargs)
|
||||||
|
|
||||||
|
# Convert datetime to event loop time base
|
||||||
|
# If datetime is in the past, delay will be negative and call_at will
|
||||||
|
# schedule the call as soon as possible.
|
||||||
|
delay = when.timestamp() - time.time()
|
||||||
|
loop_time = self.loop.time() + delay
|
||||||
|
|
||||||
return self.loop.call_at(
|
return self.loop.call_at(
|
||||||
when.timestamp(), funct, *args, context=self._create_context()
|
loop_time, funct, *args, context=self._create_context()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -15,3 +15,8 @@ class System(DBusInterface):
|
|||||||
async def schedule_wipe_device(self) -> bool:
|
async def schedule_wipe_device(self) -> bool:
|
||||||
"""Schedule a factory reset on next system boot."""
|
"""Schedule a factory reset on next system boot."""
|
||||||
return await self.connected_dbus.System.call("schedule_wipe_device")
|
return await self.connected_dbus.System.call("schedule_wipe_device")
|
||||||
|
|
||||||
|
@dbus_connected
|
||||||
|
async def migrate_docker_storage_driver(self, backend: str) -> None:
|
||||||
|
"""Migrate Docker storage driver."""
|
||||||
|
await self.connected_dbus.System.call("migrate_docker_storage_driver", backend)
|
||||||
|
|||||||
@@ -306,6 +306,8 @@ class DeviceType(IntEnum):
|
|||||||
VLAN = 11
|
VLAN = 11
|
||||||
TUN = 16
|
TUN = 16
|
||||||
VETH = 20
|
VETH = 20
|
||||||
|
WIREGUARD = 29
|
||||||
|
LOOPBACK = 32
|
||||||
|
|
||||||
|
|
||||||
class WirelessMethodType(IntEnum):
|
class WirelessMethodType(IntEnum):
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ class DBusManager(CoreSysAttributes):
|
|||||||
|
|
||||||
async def load(self) -> None:
|
async def load(self) -> None:
|
||||||
"""Connect interfaces to D-Bus."""
|
"""Connect interfaces to D-Bus."""
|
||||||
if not SOCKET_DBUS.exists():
|
if not await self.sys_run_in_executor(SOCKET_DBUS.exists):
|
||||||
_LOGGER.error(
|
_LOGGER.error(
|
||||||
"No D-Bus support on Host. Disabled any kind of host control!"
|
"No D-Bus support on Host. Disabled any kind of host control!"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -134,9 +134,10 @@ class NetworkManager(DBusInterfaceProxy):
|
|||||||
async def check_connectivity(self, *, force: bool = False) -> ConnectivityState:
|
async def check_connectivity(self, *, force: bool = False) -> ConnectivityState:
|
||||||
"""Check the connectivity of the host."""
|
"""Check the connectivity of the host."""
|
||||||
if force:
|
if force:
|
||||||
return await self.connected_dbus.call("check_connectivity")
|
return ConnectivityState(
|
||||||
else:
|
await self.connected_dbus.call("check_connectivity")
|
||||||
return await self.connected_dbus.get("connectivity")
|
)
|
||||||
|
return ConnectivityState(await self.connected_dbus.get("connectivity"))
|
||||||
|
|
||||||
async def connect(self, bus: MessageBus) -> None:
|
async def connect(self, bus: MessageBus) -> None:
|
||||||
"""Connect to system's D-Bus."""
|
"""Connect to system's D-Bus."""
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ class NetworkConnection(DBusInterfaceProxy):
|
|||||||
@dbus_property
|
@dbus_property
|
||||||
def state(self) -> ConnectionStateType:
|
def state(self) -> ConnectionStateType:
|
||||||
"""Return the state of the connection."""
|
"""Return the state of the connection."""
|
||||||
return self.properties[DBUS_ATTR_STATE]
|
return ConnectionStateType(self.properties[DBUS_ATTR_STATE])
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def state_flags(self) -> set[ConnectionStateFlags]:
|
def state_flags(self) -> set[ConnectionStateFlags]:
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
"""NetworkInterface object for Network Manager."""
|
"""NetworkInterface object for Network Manager."""
|
||||||
|
|
||||||
|
import logging
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from dbus_fast.aio.message_bus import MessageBus
|
from dbus_fast.aio.message_bus import MessageBus
|
||||||
@@ -23,6 +24,8 @@ from .connection import NetworkConnection
|
|||||||
from .setting import NetworkSetting
|
from .setting import NetworkSetting
|
||||||
from .wireless import NetworkWireless
|
from .wireless import NetworkWireless
|
||||||
|
|
||||||
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class NetworkInterface(DBusInterfaceProxy):
|
class NetworkInterface(DBusInterfaceProxy):
|
||||||
"""NetworkInterface object represents Network Manager Device objects.
|
"""NetworkInterface object represents Network Manager Device objects.
|
||||||
@@ -57,7 +60,15 @@ class NetworkInterface(DBusInterfaceProxy):
|
|||||||
@dbus_property
|
@dbus_property
|
||||||
def type(self) -> DeviceType:
|
def type(self) -> DeviceType:
|
||||||
"""Return interface type."""
|
"""Return interface type."""
|
||||||
return self.properties[DBUS_ATTR_DEVICE_TYPE]
|
try:
|
||||||
|
return DeviceType(self.properties[DBUS_ATTR_DEVICE_TYPE])
|
||||||
|
except ValueError:
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Unknown device type %s for %s, treating as UNKNOWN",
|
||||||
|
self.properties[DBUS_ATTR_DEVICE_TYPE],
|
||||||
|
self.object_path,
|
||||||
|
)
|
||||||
|
return DeviceType.UNKNOWN
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@dbus_property
|
@dbus_property
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ class Resolved(DBusInterfaceProxy):
|
|||||||
@dbus_property
|
@dbus_property
|
||||||
def current_dns_server(
|
def current_dns_server(
|
||||||
self,
|
self,
|
||||||
) -> list[tuple[int, DNSAddressFamily, bytes]] | None:
|
) -> tuple[int, DNSAddressFamily, bytes] | None:
|
||||||
"""Return current DNS server."""
|
"""Return current DNS server."""
|
||||||
return self.properties[DBUS_ATTR_CURRENT_DNS_SERVER]
|
return self.properties[DBUS_ATTR_CURRENT_DNS_SERVER]
|
||||||
|
|
||||||
@@ -83,7 +83,7 @@ class Resolved(DBusInterfaceProxy):
|
|||||||
@dbus_property
|
@dbus_property
|
||||||
def current_dns_server_ex(
|
def current_dns_server_ex(
|
||||||
self,
|
self,
|
||||||
) -> list[tuple[int, DNSAddressFamily, bytes, int, str]] | None:
|
) -> tuple[int, DNSAddressFamily, bytes, int, str] | None:
|
||||||
"""Return current DNS server including port and server name."""
|
"""Return current DNS server including port and server name."""
|
||||||
return self.properties[DBUS_ATTR_CURRENT_DNS_SERVER_EX]
|
return self.properties[DBUS_ATTR_CURRENT_DNS_SERVER_EX]
|
||||||
|
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ class SystemdUnit(DBusInterface):
|
|||||||
@dbus_connected
|
@dbus_connected
|
||||||
async def get_active_state(self) -> UnitActiveState:
|
async def get_active_state(self) -> UnitActiveState:
|
||||||
"""Get active state of the unit."""
|
"""Get active state of the unit."""
|
||||||
return await self.connected_dbus.Unit.get("active_state")
|
return UnitActiveState(await self.connected_dbus.Unit.get("active_state"))
|
||||||
|
|
||||||
@dbus_connected
|
@dbus_connected
|
||||||
def properties_changed(self) -> DBusSignalWrapper:
|
def properties_changed(self) -> DBusSignalWrapper:
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ from dbus_fast import Variant
|
|||||||
from .const import EncryptType, EraseMode
|
from .const import EncryptType, EraseMode
|
||||||
|
|
||||||
|
|
||||||
def udisks2_bytes_to_path(path_bytes: bytearray) -> Path:
|
def udisks2_bytes_to_path(path_bytes: bytes) -> Path:
|
||||||
"""Convert bytes to path object without null character on end."""
|
"""Convert bytes to path object without null character on end."""
|
||||||
if path_bytes and path_bytes[-1] == 0:
|
if path_bytes and path_bytes[-1] == 0:
|
||||||
return Path(path_bytes[:-1].decode())
|
return Path(path_bytes[:-1].decode())
|
||||||
@@ -73,7 +73,7 @@ FormatOptionsDataType = TypedDict(
|
|||||||
{
|
{
|
||||||
"label": NotRequired[str],
|
"label": NotRequired[str],
|
||||||
"take-ownership": NotRequired[bool],
|
"take-ownership": NotRequired[bool],
|
||||||
"encrypt.passphrase": NotRequired[bytearray],
|
"encrypt.passphrase": NotRequired[bytes],
|
||||||
"encrypt.type": NotRequired[str],
|
"encrypt.type": NotRequired[str],
|
||||||
"erase": NotRequired[str],
|
"erase": NotRequired[str],
|
||||||
"update-partition-type": NotRequired[bool],
|
"update-partition-type": NotRequired[bool],
|
||||||
|
|||||||
@@ -7,8 +7,10 @@ from ipaddress import IPv4Address
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
import tempfile
|
||||||
from typing import TYPE_CHECKING, cast
|
from typing import TYPE_CHECKING, cast
|
||||||
|
|
||||||
|
import aiodocker
|
||||||
from attr import evolve
|
from attr import evolve
|
||||||
from awesomeversion import AwesomeVersion
|
from awesomeversion import AwesomeVersion
|
||||||
import docker
|
import docker
|
||||||
@@ -704,12 +706,38 @@ class DockerAddon(DockerInterface):
|
|||||||
with suppress(docker.errors.NotFound):
|
with suppress(docker.errors.NotFound):
|
||||||
self.sys_docker.containers.get(builder_name).remove(force=True, v=True)
|
self.sys_docker.containers.get(builder_name).remove(force=True, v=True)
|
||||||
|
|
||||||
result = self.sys_docker.run_command(
|
# Generate Docker config with registry credentials for base image if needed
|
||||||
ADDON_BUILDER_IMAGE,
|
docker_config_path: Path | None = None
|
||||||
version=builder_version_tag,
|
docker_config_content = build_env.get_docker_config_json()
|
||||||
name=builder_name,
|
temp_dir: tempfile.TemporaryDirectory | None = None
|
||||||
**build_env.get_docker_args(version, addon_image_tag),
|
|
||||||
)
|
try:
|
||||||
|
if docker_config_content:
|
||||||
|
# Create temporary directory for docker config
|
||||||
|
temp_dir = tempfile.TemporaryDirectory(
|
||||||
|
prefix="hassio_build_", dir=self.sys_config.path_tmp
|
||||||
|
)
|
||||||
|
docker_config_path = Path(temp_dir.name) / "config.json"
|
||||||
|
docker_config_path.write_text(
|
||||||
|
docker_config_content, encoding="utf-8"
|
||||||
|
)
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Created temporary Docker config for build at %s",
|
||||||
|
docker_config_path,
|
||||||
|
)
|
||||||
|
|
||||||
|
result = self.sys_docker.run_command(
|
||||||
|
ADDON_BUILDER_IMAGE,
|
||||||
|
version=builder_version_tag,
|
||||||
|
name=builder_name,
|
||||||
|
**build_env.get_docker_args(
|
||||||
|
version, addon_image_tag, docker_config_path
|
||||||
|
),
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
# Clean up temporary directory
|
||||||
|
if temp_dir:
|
||||||
|
temp_dir.cleanup()
|
||||||
|
|
||||||
logs = result.output.decode("utf-8")
|
logs = result.output.decode("utf-8")
|
||||||
|
|
||||||
@@ -717,19 +745,21 @@ class DockerAddon(DockerInterface):
|
|||||||
error_message = f"Docker build failed for {addon_image_tag} (exit code {result.exit_code}). Build output:\n{logs}"
|
error_message = f"Docker build failed for {addon_image_tag} (exit code {result.exit_code}). Build output:\n{logs}"
|
||||||
raise docker.errors.DockerException(error_message)
|
raise docker.errors.DockerException(error_message)
|
||||||
|
|
||||||
addon_image = self.sys_docker.images.get(addon_image_tag)
|
return addon_image_tag, logs
|
||||||
|
|
||||||
return addon_image, logs
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
docker_image, log = await self.sys_run_in_executor(build_image)
|
addon_image_tag, log = await self.sys_run_in_executor(build_image)
|
||||||
|
|
||||||
_LOGGER.debug("Build %s:%s done: %s", self.image, version, log)
|
_LOGGER.debug("Build %s:%s done: %s", self.image, version, log)
|
||||||
|
|
||||||
# Update meta data
|
# Update meta data
|
||||||
self._meta = docker_image.attrs
|
self._meta = await self.sys_docker.images.inspect(addon_image_tag)
|
||||||
|
|
||||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
except (
|
||||||
|
docker.errors.DockerException,
|
||||||
|
requests.RequestException,
|
||||||
|
aiodocker.DockerError,
|
||||||
|
) as err:
|
||||||
_LOGGER.error("Can't build %s:%s: %s", self.image, version, err)
|
_LOGGER.error("Can't build %s:%s: %s", self.image, version, err)
|
||||||
raise DockerError() from err
|
raise DockerError() from err
|
||||||
|
|
||||||
@@ -751,11 +781,8 @@ class DockerAddon(DockerInterface):
|
|||||||
)
|
)
|
||||||
async def import_image(self, tar_file: Path) -> None:
|
async def import_image(self, tar_file: Path) -> None:
|
||||||
"""Import a tar file as image."""
|
"""Import a tar file as image."""
|
||||||
docker_image = await self.sys_run_in_executor(
|
if docker_image := await self.sys_docker.import_image(tar_file):
|
||||||
self.sys_docker.import_image, tar_file
|
self._meta = docker_image
|
||||||
)
|
|
||||||
if docker_image:
|
|
||||||
self._meta = docker_image.attrs
|
|
||||||
_LOGGER.info("Importing image %s and version %s", tar_file, self.version)
|
_LOGGER.info("Importing image %s and version %s", tar_file, self.version)
|
||||||
|
|
||||||
with suppress(DockerError):
|
with suppress(DockerError):
|
||||||
@@ -769,17 +796,21 @@ class DockerAddon(DockerInterface):
|
|||||||
version: AwesomeVersion | None = None,
|
version: AwesomeVersion | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Check if old version exists and cleanup other versions of image not in use."""
|
"""Check if old version exists and cleanup other versions of image not in use."""
|
||||||
await self.sys_run_in_executor(
|
if not (use_image := image or self.image):
|
||||||
self.sys_docker.cleanup_old_images,
|
raise DockerError("Cannot determine image from metadata!", _LOGGER.error)
|
||||||
(image := image or self.image),
|
if not (use_version := version or self.version):
|
||||||
version or self.version,
|
raise DockerError("Cannot determine version from metadata!", _LOGGER.error)
|
||||||
|
|
||||||
|
await self.sys_docker.cleanup_old_images(
|
||||||
|
use_image,
|
||||||
|
use_version,
|
||||||
{old_image} if old_image else None,
|
{old_image} if old_image else None,
|
||||||
keep_images={
|
keep_images={
|
||||||
f"{addon.image}:{addon.version}"
|
f"{addon.image}:{addon.version}"
|
||||||
for addon in self.sys_addons.installed
|
for addon in self.sys_addons.installed
|
||||||
if addon.slug != self.addon.slug
|
if addon.slug != self.addon.slug
|
||||||
and addon.image
|
and addon.image
|
||||||
and addon.image in {old_image, image}
|
and addon.image in {old_image, use_image}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -15,6 +15,12 @@ from ..const import MACHINE_ID
|
|||||||
|
|
||||||
RE_RETRYING_DOWNLOAD_STATUS = re.compile(r"Retrying in \d+ seconds?")
|
RE_RETRYING_DOWNLOAD_STATUS = re.compile(r"Retrying in \d+ seconds?")
|
||||||
|
|
||||||
|
# Docker Hub registry identifier
|
||||||
|
DOCKER_HUB = "hub.docker.com"
|
||||||
|
|
||||||
|
# Regex to match images with a registry host (e.g., ghcr.io/org/image)
|
||||||
|
IMAGE_WITH_HOST = re.compile(r"^((?:[a-z0-9]+(?:-[a-z0-9]+)*\.)+[a-z]{2,})\/.+")
|
||||||
|
|
||||||
|
|
||||||
class Capabilities(StrEnum):
|
class Capabilities(StrEnum):
|
||||||
"""Linux Capabilities."""
|
"""Linux Capabilities."""
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
"""Init file for Supervisor Docker object."""
|
"""Init file for Supervisor Docker object."""
|
||||||
|
|
||||||
from collections.abc import Awaitable
|
|
||||||
from ipaddress import IPv4Address
|
from ipaddress import IPv4Address
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
@@ -236,11 +235,10 @@ class DockerHomeAssistant(DockerInterface):
|
|||||||
environment={ENV_TIME: self.sys_timezone},
|
environment={ENV_TIME: self.sys_timezone},
|
||||||
)
|
)
|
||||||
|
|
||||||
def is_initialize(self) -> Awaitable[bool]:
|
async def is_initialize(self) -> bool:
|
||||||
"""Return True if Docker container exists."""
|
"""Return True if Docker container exists."""
|
||||||
return self.sys_run_in_executor(
|
if not self.sys_homeassistant.version:
|
||||||
self.sys_docker.container_is_initialized,
|
return False
|
||||||
self.name,
|
return await self.sys_docker.container_is_initialized(
|
||||||
self.image,
|
self.name, self.image, self.sys_homeassistant.version
|
||||||
self.sys_homeassistant.version,
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -6,17 +6,17 @@ from abc import ABC, abstractmethod
|
|||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from collections.abc import Awaitable
|
from collections.abc import Awaitable
|
||||||
from contextlib import suppress
|
from contextlib import suppress
|
||||||
|
from http import HTTPStatus
|
||||||
import logging
|
import logging
|
||||||
import re
|
|
||||||
from time import time
|
from time import time
|
||||||
from typing import Any, cast
|
from typing import Any, cast
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
|
import aiodocker
|
||||||
from awesomeversion import AwesomeVersion
|
from awesomeversion import AwesomeVersion
|
||||||
from awesomeversion.strategy import AwesomeVersionStrategy
|
from awesomeversion.strategy import AwesomeVersionStrategy
|
||||||
import docker
|
import docker
|
||||||
from docker.models.containers import Container
|
from docker.models.containers import Container
|
||||||
from docker.models.images import Image
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ..bus import EventListener
|
from ..bus import EventListener
|
||||||
@@ -33,6 +33,7 @@ from ..coresys import CoreSys
|
|||||||
from ..exceptions import (
|
from ..exceptions import (
|
||||||
DockerAPIError,
|
DockerAPIError,
|
||||||
DockerError,
|
DockerError,
|
||||||
|
DockerHubRateLimitExceeded,
|
||||||
DockerJobError,
|
DockerJobError,
|
||||||
DockerLogOutOfOrder,
|
DockerLogOutOfOrder,
|
||||||
DockerNotFound,
|
DockerNotFound,
|
||||||
@@ -44,16 +45,13 @@ from ..jobs.decorator import Job
|
|||||||
from ..jobs.job_group import JobGroup
|
from ..jobs.job_group import JobGroup
|
||||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||||
from ..utils.sentry import async_capture_exception
|
from ..utils.sentry import async_capture_exception
|
||||||
from .const import ContainerState, PullImageLayerStage, RestartPolicy
|
from .const import DOCKER_HUB, ContainerState, PullImageLayerStage, RestartPolicy
|
||||||
from .manager import CommandReturn, PullLogEntry
|
from .manager import CommandReturn, PullLogEntry
|
||||||
from .monitor import DockerContainerStateEvent
|
from .monitor import DockerContainerStateEvent
|
||||||
from .stats import DockerStats
|
from .stats import DockerStats
|
||||||
|
|
||||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
IMAGE_WITH_HOST = re.compile(r"^((?:[a-z0-9]+(?:-[a-z0-9]+)*\.)+[a-z]{2,})\/.+")
|
|
||||||
DOCKER_HUB = "hub.docker.com"
|
|
||||||
|
|
||||||
MAP_ARCH: dict[CpuArch | str, str] = {
|
MAP_ARCH: dict[CpuArch | str, str] = {
|
||||||
CpuArch.ARMV7: "linux/arm/v7",
|
CpuArch.ARMV7: "linux/arm/v7",
|
||||||
CpuArch.ARMHF: "linux/arm/v6",
|
CpuArch.ARMHF: "linux/arm/v6",
|
||||||
@@ -178,25 +176,16 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
return self.meta_config.get("Healthcheck")
|
return self.meta_config.get("Healthcheck")
|
||||||
|
|
||||||
def _get_credentials(self, image: str) -> dict:
|
def _get_credentials(self, image: str) -> dict:
|
||||||
"""Return a dictionay with credentials for docker login."""
|
"""Return a dictionary with credentials for docker login."""
|
||||||
registry = None
|
|
||||||
credentials = {}
|
credentials = {}
|
||||||
matcher = IMAGE_WITH_HOST.match(image)
|
registry = self.sys_docker.config.get_registry_for_image(image)
|
||||||
|
|
||||||
# Custom registry
|
|
||||||
if matcher:
|
|
||||||
if matcher.group(1) in self.sys_docker.config.registries:
|
|
||||||
registry = matcher.group(1)
|
|
||||||
credentials[ATTR_REGISTRY] = registry
|
|
||||||
|
|
||||||
# If no match assume "dockerhub" as registry
|
|
||||||
elif DOCKER_HUB in self.sys_docker.config.registries:
|
|
||||||
registry = DOCKER_HUB
|
|
||||||
|
|
||||||
if registry:
|
if registry:
|
||||||
stored = self.sys_docker.config.registries[registry]
|
stored = self.sys_docker.config.registries[registry]
|
||||||
credentials[ATTR_USERNAME] = stored[ATTR_USERNAME]
|
credentials[ATTR_USERNAME] = stored[ATTR_USERNAME]
|
||||||
credentials[ATTR_PASSWORD] = stored[ATTR_PASSWORD]
|
credentials[ATTR_PASSWORD] = stored[ATTR_PASSWORD]
|
||||||
|
if registry != DOCKER_HUB:
|
||||||
|
credentials[ATTR_REGISTRY] = registry
|
||||||
|
|
||||||
_LOGGER.debug(
|
_LOGGER.debug(
|
||||||
"Logging in to %s as %s",
|
"Logging in to %s as %s",
|
||||||
@@ -206,17 +195,6 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
|
|
||||||
return credentials
|
return credentials
|
||||||
|
|
||||||
async def _docker_login(self, image: str) -> None:
|
|
||||||
"""Try to log in to the registry if there are credentials available."""
|
|
||||||
if not self.sys_docker.config.registries:
|
|
||||||
return
|
|
||||||
|
|
||||||
credentials = self._get_credentials(image)
|
|
||||||
if not credentials:
|
|
||||||
return
|
|
||||||
|
|
||||||
await self.sys_run_in_executor(self.sys_docker.docker.login, **credentials)
|
|
||||||
|
|
||||||
def _process_pull_image_log( # noqa: C901
|
def _process_pull_image_log( # noqa: C901
|
||||||
self, install_job_id: str, reference: PullLogEntry
|
self, install_job_id: str, reference: PullLogEntry
|
||||||
) -> None:
|
) -> None:
|
||||||
@@ -248,28 +226,16 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
job = j
|
job = j
|
||||||
break
|
break
|
||||||
|
|
||||||
# This likely only occurs if the logs came in out of sync and we got progress before the Pulling FS Layer one
|
# There should no longer be any real risk of logs out of order anymore.
|
||||||
|
# However tests with very small images have shown that sometimes Docker
|
||||||
|
# skips stages in log. So keeping this one as a safety check on null job
|
||||||
if not job:
|
if not job:
|
||||||
raise DockerLogOutOfOrder(
|
raise DockerLogOutOfOrder(
|
||||||
f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {install_job_id} but could not find a matching job, skipping",
|
f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {install_job_id} but could not find a matching job, skipping",
|
||||||
_LOGGER.debug,
|
_LOGGER.debug,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Hopefully these come in order but if they sometimes get out of sync, avoid accidentally going backwards
|
# For progress calculation we assume downloading is 70% of time, extracting is 30% and others stages negligible
|
||||||
# If it happens a lot though we may need to reconsider the value of this feature
|
|
||||||
if job.done:
|
|
||||||
raise DockerLogOutOfOrder(
|
|
||||||
f"Received pull image log with status {reference.status} for job {job.uuid} but job was done, skipping",
|
|
||||||
_LOGGER.debug,
|
|
||||||
)
|
|
||||||
|
|
||||||
if job.stage and stage < PullImageLayerStage.from_status(job.stage):
|
|
||||||
raise DockerLogOutOfOrder(
|
|
||||||
f"Received pull image log with status {reference.status} for job {job.uuid} but job was already on stage {job.stage}, skipping",
|
|
||||||
_LOGGER.debug,
|
|
||||||
)
|
|
||||||
|
|
||||||
# For progress calcuation we assume downloading and extracting are each 50% of the time and others stages negligible
|
|
||||||
progress = job.progress
|
progress = job.progress
|
||||||
match stage:
|
match stage:
|
||||||
case PullImageLayerStage.DOWNLOADING | PullImageLayerStage.EXTRACTING:
|
case PullImageLayerStage.DOWNLOADING | PullImageLayerStage.EXTRACTING:
|
||||||
@@ -278,22 +244,26 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
and reference.progress_detail.current
|
and reference.progress_detail.current
|
||||||
and reference.progress_detail.total
|
and reference.progress_detail.total
|
||||||
):
|
):
|
||||||
progress = 50 * (
|
progress = (
|
||||||
reference.progress_detail.current
|
reference.progress_detail.current
|
||||||
/ reference.progress_detail.total
|
/ reference.progress_detail.total
|
||||||
)
|
)
|
||||||
if stage == PullImageLayerStage.EXTRACTING:
|
if stage == PullImageLayerStage.DOWNLOADING:
|
||||||
progress += 50
|
progress = 70 * progress
|
||||||
|
else:
|
||||||
|
progress = 70 + 30 * progress
|
||||||
case (
|
case (
|
||||||
PullImageLayerStage.VERIFYING_CHECKSUM
|
PullImageLayerStage.VERIFYING_CHECKSUM
|
||||||
| PullImageLayerStage.DOWNLOAD_COMPLETE
|
| PullImageLayerStage.DOWNLOAD_COMPLETE
|
||||||
):
|
):
|
||||||
progress = 50
|
progress = 70
|
||||||
case PullImageLayerStage.PULL_COMPLETE:
|
case PullImageLayerStage.PULL_COMPLETE:
|
||||||
progress = 100
|
progress = 100
|
||||||
case PullImageLayerStage.RETRYING_DOWNLOAD:
|
case PullImageLayerStage.RETRYING_DOWNLOAD:
|
||||||
progress = 0
|
progress = 0
|
||||||
|
|
||||||
|
# No real risk of getting things out of order in current implementation
|
||||||
|
# but keeping this one in case another change to these trips us up.
|
||||||
if stage != PullImageLayerStage.RETRYING_DOWNLOAD and progress < job.progress:
|
if stage != PullImageLayerStage.RETRYING_DOWNLOAD and progress < job.progress:
|
||||||
raise DockerLogOutOfOrder(
|
raise DockerLogOutOfOrder(
|
||||||
f"Received pull image log with status {reference.status} for job {job.uuid} that implied progress was {progress} but current progress is {job.progress}, skipping",
|
f"Received pull image log with status {reference.status} for job {job.uuid} that implied progress was {progress} but current progress is {job.progress}, skipping",
|
||||||
@@ -308,6 +278,8 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
if (
|
if (
|
||||||
stage in {PullImageLayerStage.DOWNLOADING, PullImageLayerStage.EXTRACTING}
|
stage in {PullImageLayerStage.DOWNLOADING, PullImageLayerStage.EXTRACTING}
|
||||||
and reference.progress_detail
|
and reference.progress_detail
|
||||||
|
and reference.progress_detail.current is not None
|
||||||
|
and reference.progress_detail.total is not None
|
||||||
):
|
):
|
||||||
job.update(
|
job.update(
|
||||||
progress=progress,
|
progress=progress,
|
||||||
@@ -340,24 +312,44 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
and job.name == "Pulling container image layer"
|
and job.name == "Pulling container image layer"
|
||||||
]
|
]
|
||||||
|
|
||||||
# First set the total bytes to be downloaded/extracted on the main job
|
# Calculate total from layers that have reported size info
|
||||||
if not install_job.extra:
|
# With containerd snapshotter, some layers skip "Downloading" and go directly to
|
||||||
total = 0
|
# "Download complete", so we can't wait for all layers to have extra before reporting progress
|
||||||
for job in layer_jobs:
|
layers_with_extra = [
|
||||||
if not job.extra:
|
job for job in layer_jobs if job.extra and job.extra.get("total")
|
||||||
return
|
]
|
||||||
total += job.extra["total"]
|
if not layers_with_extra:
|
||||||
install_job.extra = {"total": total}
|
return
|
||||||
else:
|
|
||||||
total = install_job.extra["total"]
|
|
||||||
|
|
||||||
# Then determine total progress based on progress of each sub-job, factoring in size of each compared to total
|
# Sum up total bytes. Layers that skip downloading get placeholder extra={1,1}
|
||||||
|
# which doesn't represent actual size. Separate "real" layers from placeholders.
|
||||||
|
# Filter guarantees job.extra is not None and has "total" key
|
||||||
|
real_layers = [
|
||||||
|
job for job in layers_with_extra if cast(dict, job.extra)["total"] > 1
|
||||||
|
]
|
||||||
|
placeholder_layers = [
|
||||||
|
job for job in layers_with_extra if cast(dict, job.extra)["total"] == 1
|
||||||
|
]
|
||||||
|
|
||||||
|
# If we only have placeholder layers (no real size info yet), don't report progress
|
||||||
|
# This prevents tiny cached layers from showing inflated progress before
|
||||||
|
# the actual download sizes are known
|
||||||
|
if not real_layers:
|
||||||
|
return
|
||||||
|
|
||||||
|
total = sum(cast(dict, job.extra)["total"] for job in real_layers)
|
||||||
|
if total == 0:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Update install_job.extra with current total (may increase as more layers report)
|
||||||
|
install_job.extra = {"total": total}
|
||||||
|
|
||||||
|
# Calculate progress based on layers that have real size info
|
||||||
|
# Placeholder layers (skipped downloads) count as complete but don't affect weighted progress
|
||||||
progress = 0.0
|
progress = 0.0
|
||||||
stage = PullImageLayerStage.PULL_COMPLETE
|
stage = PullImageLayerStage.PULL_COMPLETE
|
||||||
for job in layer_jobs:
|
for job in real_layers:
|
||||||
if not job.extra:
|
progress += job.progress * (cast(dict, job.extra)["total"] / total)
|
||||||
return
|
|
||||||
progress += job.progress * (job.extra["total"] / total)
|
|
||||||
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
|
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
|
||||||
|
|
||||||
if job_stage < PullImageLayerStage.EXTRACTING:
|
if job_stage < PullImageLayerStage.EXTRACTING:
|
||||||
@@ -368,6 +360,28 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
):
|
):
|
||||||
stage = PullImageLayerStage.EXTRACTING
|
stage = PullImageLayerStage.EXTRACTING
|
||||||
|
|
||||||
|
# Check if any layers are still pending (no extra yet)
|
||||||
|
# If so, we're still in downloading phase even if all layers_with_extra are done
|
||||||
|
layers_pending = len(layer_jobs) - len(layers_with_extra)
|
||||||
|
if layers_pending > 0:
|
||||||
|
# Scale progress to account for unreported layers
|
||||||
|
# This prevents tiny layers that complete first from showing inflated progress
|
||||||
|
# e.g., if 2/25 layers reported at 70%, actual progress is ~70 * 2/25 = 5.6%
|
||||||
|
layers_fraction = len(layers_with_extra) / len(layer_jobs)
|
||||||
|
progress = progress * layers_fraction
|
||||||
|
|
||||||
|
if stage == PullImageLayerStage.PULL_COMPLETE:
|
||||||
|
stage = PullImageLayerStage.DOWNLOADING
|
||||||
|
|
||||||
|
# Also check if all placeholders are done but we're waiting for real layers
|
||||||
|
if placeholder_layers and stage == PullImageLayerStage.PULL_COMPLETE:
|
||||||
|
# All real layers are done, but check if placeholders are still extracting
|
||||||
|
for job in placeholder_layers:
|
||||||
|
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
|
||||||
|
if job_stage < PullImageLayerStage.PULL_COMPLETE:
|
||||||
|
stage = PullImageLayerStage.EXTRACTING
|
||||||
|
break
|
||||||
|
|
||||||
# Ensure progress is 100 at this point to prevent float drift
|
# Ensure progress is 100 at this point to prevent float drift
|
||||||
if stage == PullImageLayerStage.PULL_COMPLETE:
|
if stage == PullImageLayerStage.PULL_COMPLETE:
|
||||||
progress = 100
|
progress = 100
|
||||||
@@ -399,9 +413,8 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
|
|
||||||
_LOGGER.info("Downloading docker image %s with tag %s.", image, version)
|
_LOGGER.info("Downloading docker image %s with tag %s.", image, version)
|
||||||
try:
|
try:
|
||||||
if self.sys_docker.config.registries:
|
# Get credentials for private registries to pass to aiodocker
|
||||||
# Try login if we have defined credentials
|
credentials = self._get_credentials(image) or None
|
||||||
await self._docker_login(image)
|
|
||||||
|
|
||||||
curr_job_id = self.sys_jobs.current.uuid
|
curr_job_id = self.sys_jobs.current.uuid
|
||||||
|
|
||||||
@@ -417,13 +430,13 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_image_log
|
BusEvent.DOCKER_IMAGE_PULL_UPDATE, process_pull_image_log
|
||||||
)
|
)
|
||||||
|
|
||||||
# Pull new image
|
# Pull new image, passing credentials to aiodocker
|
||||||
docker_image = await self.sys_run_in_executor(
|
docker_image = await self.sys_docker.pull_image(
|
||||||
self.sys_docker.pull_image,
|
|
||||||
self.sys_jobs.current.uuid,
|
self.sys_jobs.current.uuid,
|
||||||
image,
|
image,
|
||||||
str(version),
|
str(version),
|
||||||
platform=MAP_ARCH[image_arch],
|
platform=MAP_ARCH[image_arch],
|
||||||
|
auth=credentials,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Tag latest
|
# Tag latest
|
||||||
@@ -431,22 +444,37 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
_LOGGER.info(
|
_LOGGER.info(
|
||||||
"Tagging image %s with version %s as latest", image, version
|
"Tagging image %s with version %s as latest", image, version
|
||||||
)
|
)
|
||||||
await self.sys_run_in_executor(docker_image.tag, image, tag="latest")
|
await self.sys_docker.images.tag(
|
||||||
|
docker_image["Id"], image, tag="latest"
|
||||||
|
)
|
||||||
except docker.errors.APIError as err:
|
except docker.errors.APIError as err:
|
||||||
if err.status_code == 429:
|
if err.status_code == HTTPStatus.TOO_MANY_REQUESTS:
|
||||||
self.sys_resolution.create_issue(
|
self.sys_resolution.create_issue(
|
||||||
IssueType.DOCKER_RATELIMIT,
|
IssueType.DOCKER_RATELIMIT,
|
||||||
ContextType.SYSTEM,
|
ContextType.SYSTEM,
|
||||||
suggestions=[SuggestionType.REGISTRY_LOGIN],
|
suggestions=[SuggestionType.REGISTRY_LOGIN],
|
||||||
)
|
)
|
||||||
_LOGGER.info(
|
raise DockerHubRateLimitExceeded(_LOGGER.error) from err
|
||||||
"Your IP address has made too many requests to Docker Hub which activated a rate limit. "
|
await async_capture_exception(err)
|
||||||
"For more details see https://www.home-assistant.io/more-info/dockerhub-rate-limit"
|
|
||||||
)
|
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Can't install {image}:{version!s}: {err}", _LOGGER.error
|
f"Can't install {image}:{version!s}: {err}", _LOGGER.error
|
||||||
) from err
|
) from err
|
||||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
except aiodocker.DockerError as err:
|
||||||
|
if err.status == HTTPStatus.TOO_MANY_REQUESTS:
|
||||||
|
self.sys_resolution.create_issue(
|
||||||
|
IssueType.DOCKER_RATELIMIT,
|
||||||
|
ContextType.SYSTEM,
|
||||||
|
suggestions=[SuggestionType.REGISTRY_LOGIN],
|
||||||
|
)
|
||||||
|
raise DockerHubRateLimitExceeded(_LOGGER.error) from err
|
||||||
|
await async_capture_exception(err)
|
||||||
|
raise DockerError(
|
||||||
|
f"Can't install {image}:{version!s}: {err}", _LOGGER.error
|
||||||
|
) from err
|
||||||
|
except (
|
||||||
|
docker.errors.DockerException,
|
||||||
|
requests.RequestException,
|
||||||
|
) as err:
|
||||||
await async_capture_exception(err)
|
await async_capture_exception(err)
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Unknown error with {image}:{version!s} -> {err!s}", _LOGGER.error
|
f"Unknown error with {image}:{version!s} -> {err!s}", _LOGGER.error
|
||||||
@@ -455,14 +483,12 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
if listener:
|
if listener:
|
||||||
self.sys_bus.remove_listener(listener)
|
self.sys_bus.remove_listener(listener)
|
||||||
|
|
||||||
self._meta = docker_image.attrs
|
self._meta = docker_image
|
||||||
|
|
||||||
async def exists(self) -> bool:
|
async def exists(self) -> bool:
|
||||||
"""Return True if Docker image exists in local repository."""
|
"""Return True if Docker image exists in local repository."""
|
||||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
with suppress(aiodocker.DockerError, requests.RequestException):
|
||||||
await self.sys_run_in_executor(
|
await self.sys_docker.images.inspect(f"{self.image}:{self.version!s}")
|
||||||
self.sys_docker.images.get, f"{self.image}:{self.version!s}"
|
|
||||||
)
|
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -521,11 +547,11 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
with suppress(aiodocker.DockerError, requests.RequestException):
|
||||||
if not self._meta and self.image:
|
if not self._meta and self.image:
|
||||||
self._meta = self.sys_docker.images.get(
|
self._meta = await self.sys_docker.images.inspect(
|
||||||
f"{self.image}:{version!s}"
|
f"{self.image}:{version!s}"
|
||||||
).attrs
|
)
|
||||||
|
|
||||||
# Successful?
|
# Successful?
|
||||||
if not self._meta:
|
if not self._meta:
|
||||||
@@ -593,14 +619,17 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
)
|
)
|
||||||
async def remove(self, *, remove_image: bool = True) -> None:
|
async def remove(self, *, remove_image: bool = True) -> None:
|
||||||
"""Remove Docker images."""
|
"""Remove Docker images."""
|
||||||
|
if not self.image or not self.version:
|
||||||
|
raise DockerError(
|
||||||
|
"Cannot determine image and/or version from metadata!", _LOGGER.error
|
||||||
|
)
|
||||||
|
|
||||||
# Cleanup container
|
# Cleanup container
|
||||||
with suppress(DockerError):
|
with suppress(DockerError):
|
||||||
await self.stop()
|
await self.stop()
|
||||||
|
|
||||||
if remove_image:
|
if remove_image:
|
||||||
await self.sys_run_in_executor(
|
await self.sys_docker.remove_image(self.image, self.version)
|
||||||
self.sys_docker.remove_image, self.image, self.version
|
|
||||||
)
|
|
||||||
|
|
||||||
self._meta = None
|
self._meta = None
|
||||||
|
|
||||||
@@ -622,18 +651,16 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
image_name = f"{expected_image}:{version!s}"
|
image_name = f"{expected_image}:{version!s}"
|
||||||
if self.image == expected_image:
|
if self.image == expected_image:
|
||||||
try:
|
try:
|
||||||
image: Image = await self.sys_run_in_executor(
|
image = await self.sys_docker.images.inspect(image_name)
|
||||||
self.sys_docker.images.get, image_name
|
except (aiodocker.DockerError, requests.RequestException) as err:
|
||||||
)
|
|
||||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Could not get {image_name} for check due to: {err!s}",
|
f"Could not get {image_name} for check due to: {err!s}",
|
||||||
_LOGGER.error,
|
_LOGGER.error,
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
image_arch = f"{image.attrs['Os']}/{image.attrs['Architecture']}"
|
image_arch = f"{image['Os']}/{image['Architecture']}"
|
||||||
if "Variant" in image.attrs:
|
if "Variant" in image:
|
||||||
image_arch = f"{image_arch}/{image.attrs['Variant']}"
|
image_arch = f"{image_arch}/{image['Variant']}"
|
||||||
|
|
||||||
# If we have an image and its the right arch, all set
|
# If we have an image and its the right arch, all set
|
||||||
# It seems that newer Docker version return a variant for arm64 images.
|
# It seems that newer Docker version return a variant for arm64 images.
|
||||||
@@ -695,11 +722,13 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
version: AwesomeVersion | None = None,
|
version: AwesomeVersion | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Check if old version exists and cleanup."""
|
"""Check if old version exists and cleanup."""
|
||||||
await self.sys_run_in_executor(
|
if not (use_image := image or self.image):
|
||||||
self.sys_docker.cleanup_old_images,
|
raise DockerError("Cannot determine image from metadata!", _LOGGER.error)
|
||||||
image or self.image,
|
if not (use_version := version or self.version):
|
||||||
version or self.version,
|
raise DockerError("Cannot determine version from metadata!", _LOGGER.error)
|
||||||
{old_image} if old_image else None,
|
|
||||||
|
await self.sys_docker.cleanup_old_images(
|
||||||
|
use_image, use_version, {old_image} if old_image else None
|
||||||
)
|
)
|
||||||
|
|
||||||
@Job(
|
@Job(
|
||||||
@@ -751,10 +780,10 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
"""Return latest version of local image."""
|
"""Return latest version of local image."""
|
||||||
available_version: list[AwesomeVersion] = []
|
available_version: list[AwesomeVersion] = []
|
||||||
try:
|
try:
|
||||||
for image in await self.sys_run_in_executor(
|
for image in await self.sys_docker.images.list(
|
||||||
self.sys_docker.images.list, self.image
|
filters=f'{{"reference": ["{self.image}"]}}'
|
||||||
):
|
):
|
||||||
for tag in image.tags:
|
for tag in image["RepoTags"]:
|
||||||
version = AwesomeVersion(tag.partition(":")[2])
|
version = AwesomeVersion(tag.partition(":")[2])
|
||||||
if version.strategy == AwesomeVersionStrategy.UNKNOWN:
|
if version.strategy == AwesomeVersionStrategy.UNKNOWN:
|
||||||
continue
|
continue
|
||||||
@@ -763,7 +792,7 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
if not available_version:
|
if not available_version:
|
||||||
raise ValueError()
|
raise ValueError()
|
||||||
|
|
||||||
except (docker.errors.DockerException, ValueError) as err:
|
except (aiodocker.DockerError, ValueError) as err:
|
||||||
raise DockerNotFound(
|
raise DockerNotFound(
|
||||||
f"No version found for {self.image}", _LOGGER.info
|
f"No version found for {self.image}", _LOGGER.info
|
||||||
) from err
|
) from err
|
||||||
|
|||||||
@@ -6,20 +6,24 @@ import asyncio
|
|||||||
from contextlib import suppress
|
from contextlib import suppress
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
from http import HTTPStatus
|
||||||
from ipaddress import IPv4Address
|
from ipaddress import IPv4Address
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
import re
|
||||||
from typing import Any, Final, Self, cast
|
from typing import Any, Final, Self, cast
|
||||||
|
|
||||||
|
import aiodocker
|
||||||
|
from aiodocker.images import DockerImages
|
||||||
|
from aiohttp import ClientSession, ClientTimeout, UnixConnector
|
||||||
import attr
|
import attr
|
||||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||||
from docker import errors as docker_errors
|
from docker import errors as docker_errors
|
||||||
from docker.api.client import APIClient
|
from docker.api.client import APIClient
|
||||||
from docker.client import DockerClient
|
from docker.client import DockerClient
|
||||||
from docker.errors import DockerException, ImageNotFound, NotFound
|
|
||||||
from docker.models.containers import Container, ContainerCollection
|
from docker.models.containers import Container, ContainerCollection
|
||||||
from docker.models.images import Image, ImageCollection
|
|
||||||
from docker.models.networks import Network
|
from docker.models.networks import Network
|
||||||
from docker.types.daemon import CancellableStream
|
from docker.types.daemon import CancellableStream
|
||||||
import requests
|
import requests
|
||||||
@@ -45,7 +49,7 @@ from ..exceptions import (
|
|||||||
)
|
)
|
||||||
from ..utils.common import FileConfiguration
|
from ..utils.common import FileConfiguration
|
||||||
from ..validate import SCHEMA_DOCKER_CONFIG
|
from ..validate import SCHEMA_DOCKER_CONFIG
|
||||||
from .const import LABEL_MANAGED
|
from .const import DOCKER_HUB, IMAGE_WITH_HOST, LABEL_MANAGED
|
||||||
from .monitor import DockerMonitor
|
from .monitor import DockerMonitor
|
||||||
from .network import DockerNetwork
|
from .network import DockerNetwork
|
||||||
|
|
||||||
@@ -53,6 +57,7 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
MIN_SUPPORTED_DOCKER: Final = AwesomeVersion("24.0.0")
|
MIN_SUPPORTED_DOCKER: Final = AwesomeVersion("24.0.0")
|
||||||
DOCKER_NETWORK_HOST: Final = "host"
|
DOCKER_NETWORK_HOST: Final = "host"
|
||||||
|
RE_IMPORT_IMAGE_STREAM = re.compile(r"(^Loaded image ID: |^Loaded image: )(.+)$")
|
||||||
|
|
||||||
|
|
||||||
@attr.s(frozen=True)
|
@attr.s(frozen=True)
|
||||||
@@ -71,15 +76,25 @@ class DockerInfo:
|
|||||||
storage: str = attr.ib()
|
storage: str = attr.ib()
|
||||||
logging: str = attr.ib()
|
logging: str = attr.ib()
|
||||||
cgroup: str = attr.ib()
|
cgroup: str = attr.ib()
|
||||||
|
support_cpu_realtime: bool = attr.ib()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def new(data: dict[str, Any]):
|
async def new(data: dict[str, Any]) -> DockerInfo:
|
||||||
"""Create a object from docker info."""
|
"""Create a object from docker info."""
|
||||||
|
# Check if CONFIG_RT_GROUP_SCHED is loaded (blocking I/O in executor)
|
||||||
|
cpu_rt_file_exists = await asyncio.get_running_loop().run_in_executor(
|
||||||
|
None, Path("/sys/fs/cgroup/cpu/cpu.rt_runtime_us").exists
|
||||||
|
)
|
||||||
|
cpu_rt_supported = (
|
||||||
|
cpu_rt_file_exists and os.environ.get(ENV_SUPERVISOR_CPU_RT) == "1"
|
||||||
|
)
|
||||||
|
|
||||||
return DockerInfo(
|
return DockerInfo(
|
||||||
AwesomeVersion(data.get("ServerVersion", "0.0.0")),
|
AwesomeVersion(data.get("ServerVersion", "0.0.0")),
|
||||||
data.get("Driver", "unknown"),
|
data.get("Driver", "unknown"),
|
||||||
data.get("LoggingDriver", "unknown"),
|
data.get("LoggingDriver", "unknown"),
|
||||||
data.get("CgroupVersion", "1"),
|
data.get("CgroupVersion", "1"),
|
||||||
|
cpu_rt_supported,
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -90,23 +105,21 @@ class DockerInfo:
|
|||||||
except AwesomeVersionCompareException:
|
except AwesomeVersionCompareException:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
|
||||||
def support_cpu_realtime(self) -> bool:
|
|
||||||
"""Return true, if CONFIG_RT_GROUP_SCHED is loaded."""
|
|
||||||
if not Path("/sys/fs/cgroup/cpu/cpu.rt_runtime_us").exists():
|
|
||||||
return False
|
|
||||||
return bool(os.environ.get(ENV_SUPERVISOR_CPU_RT) == "1")
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass(frozen=True, slots=True)
|
@dataclass(frozen=True, slots=True)
|
||||||
class PullProgressDetail:
|
class PullProgressDetail:
|
||||||
"""Progress detail information for pull.
|
"""Progress detail information for pull.
|
||||||
|
|
||||||
Documentation lacking but both of these seem to be in bytes when populated.
|
Documentation lacking but both of these seem to be in bytes when populated.
|
||||||
|
|
||||||
|
Containerd-snapshot update - When leveraging this new feature, this information
|
||||||
|
becomes useless to us while extracting. It simply tells elapsed time using
|
||||||
|
current and units.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
current: int | None = None
|
current: int | None = None
|
||||||
total: int | None = None
|
total: int | None = None
|
||||||
|
units: str | None = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_pull_log_dict(cls, value: dict[str, int]) -> PullProgressDetail:
|
def from_pull_log_dict(cls, value: dict[str, int]) -> PullProgressDetail:
|
||||||
@@ -194,6 +207,27 @@ class DockerConfig(FileConfiguration):
|
|||||||
"""Return credentials for docker registries."""
|
"""Return credentials for docker registries."""
|
||||||
return self._data.get(ATTR_REGISTRIES, {})
|
return self._data.get(ATTR_REGISTRIES, {})
|
||||||
|
|
||||||
|
def get_registry_for_image(self, image: str) -> str | None:
|
||||||
|
"""Return the registry name if credentials are available for the image.
|
||||||
|
|
||||||
|
Matches the image against configured registries and returns the registry
|
||||||
|
name if found, or None if no matching credentials are configured.
|
||||||
|
"""
|
||||||
|
if not self.registries:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Check if image uses a custom registry (e.g., ghcr.io/org/image)
|
||||||
|
matcher = IMAGE_WITH_HOST.match(image)
|
||||||
|
if matcher:
|
||||||
|
registry = matcher.group(1)
|
||||||
|
if registry in self.registries:
|
||||||
|
return registry
|
||||||
|
# If no registry prefix, check for Docker Hub credentials
|
||||||
|
elif DOCKER_HUB in self.registries:
|
||||||
|
return DOCKER_HUB
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
class DockerAPI(CoreSysAttributes):
|
class DockerAPI(CoreSysAttributes):
|
||||||
"""Docker Supervisor wrapper.
|
"""Docker Supervisor wrapper.
|
||||||
@@ -204,7 +238,15 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
def __init__(self, coresys: CoreSys):
|
def __init__(self, coresys: CoreSys):
|
||||||
"""Initialize Docker base wrapper."""
|
"""Initialize Docker base wrapper."""
|
||||||
self.coresys = coresys
|
self.coresys = coresys
|
||||||
self._docker: DockerClient | None = None
|
# We keep both until we can fully refactor to aiodocker
|
||||||
|
self._dockerpy: DockerClient | None = None
|
||||||
|
self.docker: aiodocker.Docker = aiodocker.Docker(
|
||||||
|
url="unix://localhost", # dummy hostname for URL composition
|
||||||
|
connector=(connector := UnixConnector(SOCKET_DOCKER.as_posix())),
|
||||||
|
session=ClientSession(connector=connector, timeout=ClientTimeout(900)),
|
||||||
|
api_version="auto",
|
||||||
|
)
|
||||||
|
|
||||||
self._network: DockerNetwork | None = None
|
self._network: DockerNetwork | None = None
|
||||||
self._info: DockerInfo | None = None
|
self._info: DockerInfo | None = None
|
||||||
self.config: DockerConfig = DockerConfig()
|
self.config: DockerConfig = DockerConfig()
|
||||||
@@ -212,28 +254,28 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
|
|
||||||
async def post_init(self) -> Self:
|
async def post_init(self) -> Self:
|
||||||
"""Post init actions that must be done in event loop."""
|
"""Post init actions that must be done in event loop."""
|
||||||
self._docker = await asyncio.get_running_loop().run_in_executor(
|
self._dockerpy = await asyncio.get_running_loop().run_in_executor(
|
||||||
None,
|
None,
|
||||||
partial(
|
partial(
|
||||||
DockerClient,
|
DockerClient,
|
||||||
base_url=f"unix:/{str(SOCKET_DOCKER)}",
|
base_url=f"unix:/{SOCKET_DOCKER.as_posix()}",
|
||||||
version="auto",
|
version="auto",
|
||||||
timeout=900,
|
timeout=900,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
self._info = DockerInfo.new(self.docker.info())
|
self._info = await DockerInfo.new(self.dockerpy.info())
|
||||||
await self.config.read_data()
|
await self.config.read_data()
|
||||||
self._network = await DockerNetwork(self.docker).post_init(
|
self._network = await DockerNetwork(self.dockerpy).post_init(
|
||||||
self.config.enable_ipv6, self.config.mtu
|
self.config.enable_ipv6, self.config.mtu
|
||||||
)
|
)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def docker(self) -> DockerClient:
|
def dockerpy(self) -> DockerClient:
|
||||||
"""Get docker API client."""
|
"""Get docker API client."""
|
||||||
if not self._docker:
|
if not self._dockerpy:
|
||||||
raise RuntimeError("Docker API Client not initialized!")
|
raise RuntimeError("Docker API Client not initialized!")
|
||||||
return self._docker
|
return self._dockerpy
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def network(self) -> DockerNetwork:
|
def network(self) -> DockerNetwork:
|
||||||
@@ -243,19 +285,19 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
return self._network
|
return self._network
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def images(self) -> ImageCollection:
|
def images(self) -> DockerImages:
|
||||||
"""Return API images."""
|
"""Return API images."""
|
||||||
return self.docker.images
|
return self.docker.images
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def containers(self) -> ContainerCollection:
|
def containers(self) -> ContainerCollection:
|
||||||
"""Return API containers."""
|
"""Return API containers."""
|
||||||
return self.docker.containers
|
return self.dockerpy.containers
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def api(self) -> APIClient:
|
def api(self) -> APIClient:
|
||||||
"""Return API containers."""
|
"""Return API containers."""
|
||||||
return self.docker.api
|
return self.dockerpy.api
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def info(self) -> DockerInfo:
|
def info(self) -> DockerInfo:
|
||||||
@@ -267,7 +309,7 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
@property
|
@property
|
||||||
def events(self) -> CancellableStream:
|
def events(self) -> CancellableStream:
|
||||||
"""Return docker event stream."""
|
"""Return docker event stream."""
|
||||||
return self.docker.events(decode=True)
|
return self.dockerpy.events(decode=True)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def monitor(self) -> DockerMonitor:
|
def monitor(self) -> DockerMonitor:
|
||||||
@@ -383,7 +425,7 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
with suppress(DockerError):
|
with suppress(DockerError):
|
||||||
self.network.detach_default_bridge(container)
|
self.network.detach_default_bridge(container)
|
||||||
else:
|
else:
|
||||||
host_network: Network = self.docker.networks.get(DOCKER_NETWORK_HOST)
|
host_network: Network = self.dockerpy.networks.get(DOCKER_NETWORK_HOST)
|
||||||
|
|
||||||
# Check if container is register on host
|
# Check if container is register on host
|
||||||
# https://github.com/moby/moby/issues/23302
|
# https://github.com/moby/moby/issues/23302
|
||||||
@@ -410,35 +452,33 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
|
|
||||||
return container
|
return container
|
||||||
|
|
||||||
def pull_image(
|
async def pull_image(
|
||||||
self,
|
self,
|
||||||
job_id: str,
|
job_id: str,
|
||||||
repository: str,
|
repository: str,
|
||||||
tag: str = "latest",
|
tag: str = "latest",
|
||||||
platform: str | None = None,
|
platform: str | None = None,
|
||||||
) -> Image:
|
auth: dict[str, str] | None = None,
|
||||||
|
) -> dict[str, Any]:
|
||||||
"""Pull the specified image and return it.
|
"""Pull the specified image and return it.
|
||||||
|
|
||||||
This mimics the high level API of images.pull but provides better error handling by raising
|
This mimics the high level API of images.pull but provides better error handling by raising
|
||||||
based on a docker error on pull. Whereas the high level API ignores all errors on pull and
|
based on a docker error on pull. Whereas the high level API ignores all errors on pull and
|
||||||
raises only if the get fails afterwards. Additionally it fires progress reports for the pull
|
raises only if the get fails afterwards. Additionally it fires progress reports for the pull
|
||||||
on the bus so listeners can use that to update status for users.
|
on the bus so listeners can use that to update status for users.
|
||||||
|
|
||||||
Must be run in executor.
|
|
||||||
"""
|
"""
|
||||||
pull_log = self.docker.api.pull(
|
async for e in self.images.pull(
|
||||||
repository, tag=tag, platform=platform, stream=True, decode=True
|
repository, tag=tag, platform=platform, auth=auth, stream=True
|
||||||
)
|
):
|
||||||
for e in pull_log:
|
|
||||||
entry = PullLogEntry.from_pull_log_dict(job_id, e)
|
entry = PullLogEntry.from_pull_log_dict(job_id, e)
|
||||||
if entry.error:
|
if entry.error:
|
||||||
raise entry.exception
|
raise entry.exception
|
||||||
self.sys_loop.call_soon_threadsafe(
|
await asyncio.gather(
|
||||||
self.sys_bus.fire_event, BusEvent.DOCKER_IMAGE_PULL_UPDATE, entry
|
*self.sys_bus.fire_event(BusEvent.DOCKER_IMAGE_PULL_UPDATE, entry)
|
||||||
)
|
)
|
||||||
|
|
||||||
sep = "@" if tag.startswith("sha256:") else ":"
|
sep = "@" if tag.startswith("sha256:") else ":"
|
||||||
return self.images.get(f"{repository}{sep}{tag}")
|
return await self.images.inspect(f"{repository}{sep}{tag}")
|
||||||
|
|
||||||
def run_command(
|
def run_command(
|
||||||
self,
|
self,
|
||||||
@@ -459,7 +499,7 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
_LOGGER.info("Runing command '%s' on %s", command, image_with_tag)
|
_LOGGER.info("Runing command '%s' on %s", command, image_with_tag)
|
||||||
container = None
|
container = None
|
||||||
try:
|
try:
|
||||||
container = self.docker.containers.run(
|
container = self.dockerpy.containers.run(
|
||||||
image_with_tag,
|
image_with_tag,
|
||||||
command=command,
|
command=command,
|
||||||
detach=True,
|
detach=True,
|
||||||
@@ -487,35 +527,35 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
"""Repair local docker overlayfs2 issues."""
|
"""Repair local docker overlayfs2 issues."""
|
||||||
_LOGGER.info("Prune stale containers")
|
_LOGGER.info("Prune stale containers")
|
||||||
try:
|
try:
|
||||||
output = self.docker.api.prune_containers()
|
output = self.dockerpy.api.prune_containers()
|
||||||
_LOGGER.debug("Containers prune: %s", output)
|
_LOGGER.debug("Containers prune: %s", output)
|
||||||
except docker_errors.APIError as err:
|
except docker_errors.APIError as err:
|
||||||
_LOGGER.warning("Error for containers prune: %s", err)
|
_LOGGER.warning("Error for containers prune: %s", err)
|
||||||
|
|
||||||
_LOGGER.info("Prune stale images")
|
_LOGGER.info("Prune stale images")
|
||||||
try:
|
try:
|
||||||
output = self.docker.api.prune_images(filters={"dangling": False})
|
output = self.dockerpy.api.prune_images(filters={"dangling": False})
|
||||||
_LOGGER.debug("Images prune: %s", output)
|
_LOGGER.debug("Images prune: %s", output)
|
||||||
except docker_errors.APIError as err:
|
except docker_errors.APIError as err:
|
||||||
_LOGGER.warning("Error for images prune: %s", err)
|
_LOGGER.warning("Error for images prune: %s", err)
|
||||||
|
|
||||||
_LOGGER.info("Prune stale builds")
|
_LOGGER.info("Prune stale builds")
|
||||||
try:
|
try:
|
||||||
output = self.docker.api.prune_builds()
|
output = self.dockerpy.api.prune_builds()
|
||||||
_LOGGER.debug("Builds prune: %s", output)
|
_LOGGER.debug("Builds prune: %s", output)
|
||||||
except docker_errors.APIError as err:
|
except docker_errors.APIError as err:
|
||||||
_LOGGER.warning("Error for builds prune: %s", err)
|
_LOGGER.warning("Error for builds prune: %s", err)
|
||||||
|
|
||||||
_LOGGER.info("Prune stale volumes")
|
_LOGGER.info("Prune stale volumes")
|
||||||
try:
|
try:
|
||||||
output = self.docker.api.prune_builds()
|
output = self.dockerpy.api.prune_volumes()
|
||||||
_LOGGER.debug("Volumes prune: %s", output)
|
_LOGGER.debug("Volumes prune: %s", output)
|
||||||
except docker_errors.APIError as err:
|
except docker_errors.APIError as err:
|
||||||
_LOGGER.warning("Error for volumes prune: %s", err)
|
_LOGGER.warning("Error for volumes prune: %s", err)
|
||||||
|
|
||||||
_LOGGER.info("Prune stale networks")
|
_LOGGER.info("Prune stale networks")
|
||||||
try:
|
try:
|
||||||
output = self.docker.api.prune_networks()
|
output = self.dockerpy.api.prune_networks()
|
||||||
_LOGGER.debug("Networks prune: %s", output)
|
_LOGGER.debug("Networks prune: %s", output)
|
||||||
except docker_errors.APIError as err:
|
except docker_errors.APIError as err:
|
||||||
_LOGGER.warning("Error for networks prune: %s", err)
|
_LOGGER.warning("Error for networks prune: %s", err)
|
||||||
@@ -537,11 +577,11 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
|
|
||||||
Fix: https://github.com/moby/moby/issues/23302
|
Fix: https://github.com/moby/moby/issues/23302
|
||||||
"""
|
"""
|
||||||
network: Network = self.docker.networks.get(network_name)
|
network: Network = self.dockerpy.networks.get(network_name)
|
||||||
|
|
||||||
for cid, data in network.attrs.get("Containers", {}).items():
|
for cid, data in network.attrs.get("Containers", {}).items():
|
||||||
try:
|
try:
|
||||||
self.docker.containers.get(cid)
|
self.dockerpy.containers.get(cid)
|
||||||
continue
|
continue
|
||||||
except docker_errors.NotFound:
|
except docker_errors.NotFound:
|
||||||
_LOGGER.debug(
|
_LOGGER.debug(
|
||||||
@@ -556,22 +596,26 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
with suppress(docker_errors.DockerException, requests.RequestException):
|
with suppress(docker_errors.DockerException, requests.RequestException):
|
||||||
network.disconnect(data.get("Name", cid), force=True)
|
network.disconnect(data.get("Name", cid), force=True)
|
||||||
|
|
||||||
def container_is_initialized(
|
async def container_is_initialized(
|
||||||
self, name: str, image: str, version: AwesomeVersion
|
self, name: str, image: str, version: AwesomeVersion
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Return True if docker container exists in good state and is built from expected image."""
|
"""Return True if docker container exists in good state and is built from expected image."""
|
||||||
try:
|
try:
|
||||||
docker_container = self.containers.get(name)
|
docker_container = await self.sys_run_in_executor(self.containers.get, name)
|
||||||
docker_image = self.images.get(f"{image}:{version}")
|
docker_image = await self.images.inspect(f"{image}:{version}")
|
||||||
except NotFound:
|
except docker_errors.NotFound:
|
||||||
return False
|
return False
|
||||||
except (DockerException, requests.RequestException) as err:
|
except aiodocker.DockerError as err:
|
||||||
|
if err.status == HTTPStatus.NOT_FOUND:
|
||||||
|
return False
|
||||||
|
raise DockerError() from err
|
||||||
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError() from err
|
raise DockerError() from err
|
||||||
|
|
||||||
# Check the image is correct and state is good
|
# Check the image is correct and state is good
|
||||||
return (
|
return (
|
||||||
docker_container.image is not None
|
docker_container.image is not None
|
||||||
and docker_container.image.id == docker_image.id
|
and docker_container.image.id == docker_image["Id"]
|
||||||
and docker_container.status in ("exited", "running", "created")
|
and docker_container.status in ("exited", "running", "created")
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -581,18 +625,18 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
"""Stop/remove Docker container."""
|
"""Stop/remove Docker container."""
|
||||||
try:
|
try:
|
||||||
docker_container: Container = self.containers.get(name)
|
docker_container: Container = self.containers.get(name)
|
||||||
except NotFound:
|
except docker_errors.NotFound:
|
||||||
raise DockerNotFound() from None
|
raise DockerNotFound() from None
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError() from err
|
raise DockerError() from err
|
||||||
|
|
||||||
if docker_container.status == "running":
|
if docker_container.status == "running":
|
||||||
_LOGGER.info("Stopping %s application", name)
|
_LOGGER.info("Stopping %s application", name)
|
||||||
with suppress(DockerException, requests.RequestException):
|
with suppress(docker_errors.DockerException, requests.RequestException):
|
||||||
docker_container.stop(timeout=timeout)
|
docker_container.stop(timeout=timeout)
|
||||||
|
|
||||||
if remove_container:
|
if remove_container:
|
||||||
with suppress(DockerException, requests.RequestException):
|
with suppress(docker_errors.DockerException, requests.RequestException):
|
||||||
_LOGGER.info("Cleaning %s application", name)
|
_LOGGER.info("Cleaning %s application", name)
|
||||||
docker_container.remove(force=True, v=True)
|
docker_container.remove(force=True, v=True)
|
||||||
|
|
||||||
@@ -604,11 +648,11 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
"""Start Docker container."""
|
"""Start Docker container."""
|
||||||
try:
|
try:
|
||||||
docker_container: Container = self.containers.get(name)
|
docker_container: Container = self.containers.get(name)
|
||||||
except NotFound:
|
except docker_errors.NotFound:
|
||||||
raise DockerNotFound(
|
raise DockerNotFound(
|
||||||
f"{name} not found for starting up", _LOGGER.error
|
f"{name} not found for starting up", _LOGGER.error
|
||||||
) from None
|
) from None
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Could not get {name} for starting up", _LOGGER.error
|
f"Could not get {name} for starting up", _LOGGER.error
|
||||||
) from err
|
) from err
|
||||||
@@ -616,36 +660,36 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
_LOGGER.info("Starting %s", name)
|
_LOGGER.info("Starting %s", name)
|
||||||
try:
|
try:
|
||||||
docker_container.start()
|
docker_container.start()
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError(f"Can't start {name}: {err}", _LOGGER.error) from err
|
raise DockerError(f"Can't start {name}: {err}", _LOGGER.error) from err
|
||||||
|
|
||||||
def restart_container(self, name: str, timeout: int) -> None:
|
def restart_container(self, name: str, timeout: int) -> None:
|
||||||
"""Restart docker container."""
|
"""Restart docker container."""
|
||||||
try:
|
try:
|
||||||
container: Container = self.containers.get(name)
|
container: Container = self.containers.get(name)
|
||||||
except NotFound:
|
except docker_errors.NotFound:
|
||||||
raise DockerNotFound() from None
|
raise DockerNotFound() from None
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError() from err
|
raise DockerError() from err
|
||||||
|
|
||||||
_LOGGER.info("Restarting %s", name)
|
_LOGGER.info("Restarting %s", name)
|
||||||
try:
|
try:
|
||||||
container.restart(timeout=timeout)
|
container.restart(timeout=timeout)
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError(f"Can't restart {name}: {err}", _LOGGER.warning) from err
|
raise DockerError(f"Can't restart {name}: {err}", _LOGGER.warning) from err
|
||||||
|
|
||||||
def container_logs(self, name: str, tail: int = 100) -> bytes:
|
def container_logs(self, name: str, tail: int = 100) -> bytes:
|
||||||
"""Return Docker logs of container."""
|
"""Return Docker logs of container."""
|
||||||
try:
|
try:
|
||||||
docker_container: Container = self.containers.get(name)
|
docker_container: Container = self.containers.get(name)
|
||||||
except NotFound:
|
except docker_errors.NotFound:
|
||||||
raise DockerNotFound() from None
|
raise DockerNotFound() from None
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError() from err
|
raise DockerError() from err
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return docker_container.logs(tail=tail, stdout=True, stderr=True)
|
return docker_container.logs(tail=tail, stdout=True, stderr=True)
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Can't grep logs from {name}: {err}", _LOGGER.warning
|
f"Can't grep logs from {name}: {err}", _LOGGER.warning
|
||||||
) from err
|
) from err
|
||||||
@@ -654,9 +698,9 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
"""Read and return stats from container."""
|
"""Read and return stats from container."""
|
||||||
try:
|
try:
|
||||||
docker_container: Container = self.containers.get(name)
|
docker_container: Container = self.containers.get(name)
|
||||||
except NotFound:
|
except docker_errors.NotFound:
|
||||||
raise DockerNotFound() from None
|
raise DockerNotFound() from None
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError() from err
|
raise DockerError() from err
|
||||||
|
|
||||||
# container is not running
|
# container is not running
|
||||||
@@ -665,7 +709,7 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
return docker_container.stats(stream=False)
|
return docker_container.stats(stream=False)
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Can't read stats from {name}: {err}", _LOGGER.error
|
f"Can't read stats from {name}: {err}", _LOGGER.error
|
||||||
) from err
|
) from err
|
||||||
@@ -674,61 +718,84 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
"""Execute a command inside Docker container."""
|
"""Execute a command inside Docker container."""
|
||||||
try:
|
try:
|
||||||
docker_container: Container = self.containers.get(name)
|
docker_container: Container = self.containers.get(name)
|
||||||
except NotFound:
|
except docker_errors.NotFound:
|
||||||
raise DockerNotFound() from None
|
raise DockerNotFound() from None
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError() from err
|
raise DockerError() from err
|
||||||
|
|
||||||
# Execute
|
# Execute
|
||||||
try:
|
try:
|
||||||
code, output = docker_container.exec_run(command)
|
code, output = docker_container.exec_run(command)
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError() from err
|
raise DockerError() from err
|
||||||
|
|
||||||
return CommandReturn(code, output)
|
return CommandReturn(code, output)
|
||||||
|
|
||||||
def remove_image(
|
async def remove_image(
|
||||||
self, image: str, version: AwesomeVersion, latest: bool = True
|
self, image: str, version: AwesomeVersion, latest: bool = True
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Remove a Docker image by version and latest."""
|
"""Remove a Docker image by version and latest."""
|
||||||
try:
|
try:
|
||||||
if latest:
|
if latest:
|
||||||
_LOGGER.info("Removing image %s with latest", image)
|
_LOGGER.info("Removing image %s with latest", image)
|
||||||
with suppress(ImageNotFound):
|
try:
|
||||||
self.images.remove(image=f"{image}:latest", force=True)
|
await self.images.delete(f"{image}:latest", force=True)
|
||||||
|
except aiodocker.DockerError as err:
|
||||||
|
if err.status != HTTPStatus.NOT_FOUND:
|
||||||
|
raise
|
||||||
|
|
||||||
_LOGGER.info("Removing image %s with %s", image, version)
|
_LOGGER.info("Removing image %s with %s", image, version)
|
||||||
with suppress(ImageNotFound):
|
try:
|
||||||
self.images.remove(image=f"{image}:{version!s}", force=True)
|
await self.images.delete(f"{image}:{version!s}", force=True)
|
||||||
|
except aiodocker.DockerError as err:
|
||||||
|
if err.status != HTTPStatus.NOT_FOUND:
|
||||||
|
raise
|
||||||
|
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (aiodocker.DockerError, requests.RequestException) as err:
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Can't remove image {image}: {err}", _LOGGER.warning
|
f"Can't remove image {image}: {err}", _LOGGER.warning
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
def import_image(self, tar_file: Path) -> Image | None:
|
async def import_image(self, tar_file: Path) -> dict[str, Any] | None:
|
||||||
"""Import a tar file as image."""
|
"""Import a tar file as image."""
|
||||||
try:
|
try:
|
||||||
with tar_file.open("rb") as read_tar:
|
with tar_file.open("rb") as read_tar:
|
||||||
docker_image_list: list[Image] = self.images.load(read_tar) # type: ignore
|
resp: list[dict[str, Any]] = self.images.import_image(read_tar)
|
||||||
|
except (aiodocker.DockerError, OSError) as err:
|
||||||
if len(docker_image_list) != 1:
|
|
||||||
_LOGGER.warning(
|
|
||||||
"Unexpected image count %d while importing image from tar",
|
|
||||||
len(docker_image_list),
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
return docker_image_list[0]
|
|
||||||
except (DockerException, OSError) as err:
|
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Can't import image from tar: {err}", _LOGGER.error
|
f"Can't import image from tar: {err}", _LOGGER.error
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
|
docker_image_list: list[str] = []
|
||||||
|
for chunk in resp:
|
||||||
|
if "errorDetail" in chunk:
|
||||||
|
raise DockerError(
|
||||||
|
f"Can't import image from tar: {chunk['errorDetail']['message']}",
|
||||||
|
_LOGGER.error,
|
||||||
|
)
|
||||||
|
if "stream" in chunk:
|
||||||
|
if match := RE_IMPORT_IMAGE_STREAM.search(chunk["stream"]):
|
||||||
|
docker_image_list.append(match.group(2))
|
||||||
|
|
||||||
|
if len(docker_image_list) != 1:
|
||||||
|
_LOGGER.warning(
|
||||||
|
"Unexpected image count %d while importing image from tar",
|
||||||
|
len(docker_image_list),
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
return await self.images.inspect(docker_image_list[0])
|
||||||
|
except (aiodocker.DockerError, requests.RequestException) as err:
|
||||||
|
raise DockerError(
|
||||||
|
f"Could not inspect imported image due to: {err!s}", _LOGGER.error
|
||||||
|
) from err
|
||||||
|
|
||||||
def export_image(self, image: str, version: AwesomeVersion, tar_file: Path) -> None:
|
def export_image(self, image: str, version: AwesomeVersion, tar_file: Path) -> None:
|
||||||
"""Export current images into a tar file."""
|
"""Export current images into a tar file."""
|
||||||
try:
|
try:
|
||||||
docker_image = self.api.get_image(f"{image}:{version}")
|
docker_image = self.api.get_image(f"{image}:{version}")
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (docker_errors.DockerException, requests.RequestException) as err:
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Can't fetch image {image}: {err}", _LOGGER.error
|
f"Can't fetch image {image}: {err}", _LOGGER.error
|
||||||
) from err
|
) from err
|
||||||
@@ -745,7 +812,7 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
|
|
||||||
_LOGGER.info("Export image %s done", image)
|
_LOGGER.info("Export image %s done", image)
|
||||||
|
|
||||||
def cleanup_old_images(
|
async def cleanup_old_images(
|
||||||
self,
|
self,
|
||||||
current_image: str,
|
current_image: str,
|
||||||
current_version: AwesomeVersion,
|
current_version: AwesomeVersion,
|
||||||
@@ -756,46 +823,57 @@ class DockerAPI(CoreSysAttributes):
|
|||||||
"""Clean up old versions of an image."""
|
"""Clean up old versions of an image."""
|
||||||
image = f"{current_image}:{current_version!s}"
|
image = f"{current_image}:{current_version!s}"
|
||||||
try:
|
try:
|
||||||
keep = {cast(str, self.images.get(image).id)}
|
try:
|
||||||
except ImageNotFound:
|
image_attr = await self.images.inspect(image)
|
||||||
raise DockerNotFound(
|
except aiodocker.DockerError as err:
|
||||||
f"{current_image} not found for cleanup", _LOGGER.warning
|
if err.status == HTTPStatus.NOT_FOUND:
|
||||||
) from None
|
raise DockerNotFound(
|
||||||
except (DockerException, requests.RequestException) as err:
|
f"{current_image} not found for cleanup", _LOGGER.warning
|
||||||
|
) from None
|
||||||
|
raise
|
||||||
|
except (aiodocker.DockerError, requests.RequestException) as err:
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Can't get {current_image} for cleanup", _LOGGER.warning
|
f"Can't get {current_image} for cleanup", _LOGGER.warning
|
||||||
) from err
|
) from err
|
||||||
|
keep = {cast(str, image_attr["Id"])}
|
||||||
|
|
||||||
if keep_images:
|
if keep_images:
|
||||||
keep_images -= {image}
|
keep_images -= {image}
|
||||||
try:
|
results = await asyncio.gather(
|
||||||
for image in keep_images:
|
*[self.images.inspect(image) for image in keep_images],
|
||||||
# If its not found, no need to preserve it from getting removed
|
return_exceptions=True,
|
||||||
with suppress(ImageNotFound):
|
)
|
||||||
keep.add(cast(str, self.images.get(image).id))
|
for result in results:
|
||||||
except (DockerException, requests.RequestException) as err:
|
# If its not found, no need to preserve it from getting removed
|
||||||
raise DockerError(
|
if (
|
||||||
f"Failed to get one or more images from {keep} during cleanup",
|
isinstance(result, aiodocker.DockerError)
|
||||||
_LOGGER.warning,
|
and result.status == HTTPStatus.NOT_FOUND
|
||||||
) from err
|
):
|
||||||
|
continue
|
||||||
|
if isinstance(result, BaseException):
|
||||||
|
raise DockerError(
|
||||||
|
f"Failed to get one or more images from {keep} during cleanup",
|
||||||
|
_LOGGER.warning,
|
||||||
|
) from result
|
||||||
|
keep.add(cast(str, result["Id"]))
|
||||||
|
|
||||||
# Cleanup old and current
|
# Cleanup old and current
|
||||||
image_names = list(
|
image_names = list(
|
||||||
old_images | {current_image} if old_images else {current_image}
|
old_images | {current_image} if old_images else {current_image}
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
# This API accepts a list of image names. Tested and confirmed working on docker==7.1.0
|
images_list = await self.images.list(
|
||||||
# Its typing does say only `str` though. Bit concerning, could an update break this?
|
filters=json.dumps({"reference": image_names})
|
||||||
images_list = self.images.list(name=image_names) # type: ignore
|
)
|
||||||
except (DockerException, requests.RequestException) as err:
|
except (aiodocker.DockerError, requests.RequestException) as err:
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Corrupt docker overlayfs found: {err}", _LOGGER.warning
|
f"Corrupt docker overlayfs found: {err}", _LOGGER.warning
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
for docker_image in images_list:
|
for docker_image in images_list:
|
||||||
if docker_image.id in keep:
|
if docker_image["Id"] in keep:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
with suppress(DockerException, requests.RequestException):
|
with suppress(aiodocker.DockerError, requests.RequestException):
|
||||||
_LOGGER.info("Cleanup images: %s", docker_image.tags)
|
_LOGGER.info("Cleanup images: %s", docker_image["RepoTags"])
|
||||||
self.images.remove(docker_image.id, force=True)
|
await self.images.delete(docker_image["Id"], force=True)
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ class DockerMonitor(CoreSysAttributes, Thread):
|
|||||||
DockerContainerStateEvent(
|
DockerContainerStateEvent(
|
||||||
name=attributes["name"],
|
name=attributes["name"],
|
||||||
state=container_state,
|
state=container_state,
|
||||||
id=event["id"],
|
id=event["Actor"]["ID"],
|
||||||
time=event["time"],
|
time=event["time"],
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
"""Init file for Supervisor Docker object."""
|
"""Init file for Supervisor Docker object."""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
from collections.abc import Awaitable
|
from collections.abc import Awaitable
|
||||||
from ipaddress import IPv4Address
|
from ipaddress import IPv4Address
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
import aiodocker
|
||||||
from awesomeversion.awesomeversion import AwesomeVersion
|
from awesomeversion.awesomeversion import AwesomeVersion
|
||||||
import docker
|
import docker
|
||||||
import requests
|
import requests
|
||||||
@@ -112,19 +114,18 @@ class DockerSupervisor(DockerInterface):
|
|||||||
name="docker_supervisor_update_start_tag",
|
name="docker_supervisor_update_start_tag",
|
||||||
concurrency=JobConcurrency.GROUP_QUEUE,
|
concurrency=JobConcurrency.GROUP_QUEUE,
|
||||||
)
|
)
|
||||||
def update_start_tag(self, image: str, version: AwesomeVersion) -> Awaitable[None]:
|
async def update_start_tag(self, image: str, version: AwesomeVersion) -> None:
|
||||||
"""Update start tag to new version."""
|
"""Update start tag to new version."""
|
||||||
return self.sys_run_in_executor(self._update_start_tag, image, version)
|
|
||||||
|
|
||||||
def _update_start_tag(self, image: str, version: AwesomeVersion) -> None:
|
|
||||||
"""Update start tag to new version.
|
|
||||||
|
|
||||||
Need run inside executor.
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
docker_container = self.sys_docker.containers.get(self.name)
|
docker_container = await self.sys_run_in_executor(
|
||||||
docker_image = self.sys_docker.images.get(f"{image}:{version!s}")
|
self.sys_docker.containers.get, self.name
|
||||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
)
|
||||||
|
docker_image = await self.sys_docker.images.inspect(f"{image}:{version!s}")
|
||||||
|
except (
|
||||||
|
aiodocker.DockerError,
|
||||||
|
docker.errors.DockerException,
|
||||||
|
requests.RequestException,
|
||||||
|
) as err:
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Can't get image or container to fix start tag: {err}", _LOGGER.error
|
f"Can't get image or container to fix start tag: {err}", _LOGGER.error
|
||||||
) from err
|
) from err
|
||||||
@@ -144,8 +145,14 @@ class DockerSupervisor(DockerInterface):
|
|||||||
# If version tag
|
# If version tag
|
||||||
if start_tag != "latest":
|
if start_tag != "latest":
|
||||||
continue
|
continue
|
||||||
docker_image.tag(start_image, start_tag)
|
await asyncio.gather(
|
||||||
docker_image.tag(start_image, version.string)
|
self.sys_docker.images.tag(
|
||||||
|
docker_image["Id"], start_image, tag=start_tag
|
||||||
|
),
|
||||||
|
self.sys_docker.images.tag(
|
||||||
|
docker_image["Id"], start_image, tag=version.string
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
except (aiodocker.DockerError, requests.RequestException) as err:
|
||||||
raise DockerError(f"Can't fix start tag: {err}", _LOGGER.error) from err
|
raise DockerError(f"Can't fix start tag: {err}", _LOGGER.error) from err
|
||||||
|
|||||||
@@ -639,9 +639,32 @@ class DockerLogOutOfOrder(DockerError):
|
|||||||
class DockerNoSpaceOnDevice(DockerError):
|
class DockerNoSpaceOnDevice(DockerError):
|
||||||
"""Raise if a docker pull fails due to available space."""
|
"""Raise if a docker pull fails due to available space."""
|
||||||
|
|
||||||
|
error_key = "docker_no_space_on_device"
|
||||||
|
message_template = "No space left on disk"
|
||||||
|
|
||||||
def __init__(self, logger: Callable[..., None] | None = None) -> None:
|
def __init__(self, logger: Callable[..., None] | None = None) -> None:
|
||||||
"""Raise & log."""
|
"""Raise & log."""
|
||||||
super().__init__("No space left on disk", logger=logger)
|
super().__init__(None, logger=logger)
|
||||||
|
|
||||||
|
|
||||||
|
class DockerHubRateLimitExceeded(DockerError):
|
||||||
|
"""Raise for docker hub rate limit exceeded error."""
|
||||||
|
|
||||||
|
error_key = "dockerhub_rate_limit_exceeded"
|
||||||
|
message_template = (
|
||||||
|
"Your IP address has made too many requests to Docker Hub which activated a rate limit. "
|
||||||
|
"For more details see {dockerhub_rate_limit_url}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, logger: Callable[..., None] | None = None) -> None:
|
||||||
|
"""Raise & log."""
|
||||||
|
super().__init__(
|
||||||
|
None,
|
||||||
|
logger=logger,
|
||||||
|
extra_fields={
|
||||||
|
"dockerhub_rate_limit_url": "https://www.home-assistant.io/more-info/dockerhub-rate-limit"
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class DockerJobError(DockerError, JobException):
|
class DockerJobError(DockerError, JobException):
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ from contextvars import Context, ContextVar, Token
|
|||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import logging
|
import logging
|
||||||
from typing import Any, Self
|
from typing import Any, Self, cast
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
from attr.validators import gt, lt
|
from attr.validators import gt, lt
|
||||||
@@ -196,7 +196,7 @@ class SupervisorJob:
|
|||||||
self,
|
self,
|
||||||
progress: float | None = None,
|
progress: float | None = None,
|
||||||
stage: str | None = None,
|
stage: str | None = None,
|
||||||
extra: dict[str, Any] | None = DEFAULT, # type: ignore
|
extra: dict[str, Any] | None | type[DEFAULT] = DEFAULT,
|
||||||
done: bool | None = None,
|
done: bool | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Update multiple fields with one on change event."""
|
"""Update multiple fields with one on change event."""
|
||||||
@@ -207,8 +207,8 @@ class SupervisorJob:
|
|||||||
self.progress = progress
|
self.progress = progress
|
||||||
if stage is not None:
|
if stage is not None:
|
||||||
self.stage = stage
|
self.stage = stage
|
||||||
if extra != DEFAULT:
|
if extra is not DEFAULT:
|
||||||
self.extra = extra
|
self.extra = cast(dict[str, Any] | None, extra)
|
||||||
|
|
||||||
# Done has special event. use that to trigger on change if included
|
# Done has special event. use that to trigger on change if included
|
||||||
# If not then just use any other field to trigger
|
# If not then just use any other field to trigger
|
||||||
@@ -306,19 +306,21 @@ class JobManager(FileConfiguration, CoreSysAttributes):
|
|||||||
reference: str | None = None,
|
reference: str | None = None,
|
||||||
initial_stage: str | None = None,
|
initial_stage: str | None = None,
|
||||||
internal: bool = False,
|
internal: bool = False,
|
||||||
parent_id: str | None = DEFAULT, # type: ignore
|
parent_id: str | None | type[DEFAULT] = DEFAULT,
|
||||||
child_job_syncs: list[ChildJobSyncFilter] | None = None,
|
child_job_syncs: list[ChildJobSyncFilter] | None = None,
|
||||||
) -> SupervisorJob:
|
) -> SupervisorJob:
|
||||||
"""Create a new job."""
|
"""Create a new job."""
|
||||||
job = SupervisorJob(
|
kwargs: dict[str, Any] = {
|
||||||
name,
|
"reference": reference,
|
||||||
reference=reference,
|
"stage": initial_stage,
|
||||||
stage=initial_stage,
|
"on_change": self._on_job_change,
|
||||||
on_change=self._on_job_change,
|
"internal": internal,
|
||||||
internal=internal,
|
"child_job_syncs": child_job_syncs,
|
||||||
child_job_syncs=child_job_syncs,
|
}
|
||||||
**({} if parent_id == DEFAULT else {"parent_id": parent_id}), # type: ignore
|
if parent_id is not DEFAULT:
|
||||||
)
|
kwargs["parent_id"] = parent_id
|
||||||
|
|
||||||
|
job = SupervisorJob(name, **kwargs)
|
||||||
|
|
||||||
# Shouldn't happen but inability to find a parent for progress reporting
|
# Shouldn't happen but inability to find a parent for progress reporting
|
||||||
# shouldn't raise and break the active job
|
# shouldn't raise and break the active job
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ class JobCondition(StrEnum):
|
|||||||
PLUGINS_UPDATED = "plugins_updated"
|
PLUGINS_UPDATED = "plugins_updated"
|
||||||
RUNNING = "running"
|
RUNNING = "running"
|
||||||
SUPERVISOR_UPDATED = "supervisor_updated"
|
SUPERVISOR_UPDATED = "supervisor_updated"
|
||||||
|
ARCHITECTURE_SUPPORTED = "architecture_supported"
|
||||||
|
|
||||||
|
|
||||||
class JobConcurrency(StrEnum):
|
class JobConcurrency(StrEnum):
|
||||||
|
|||||||
@@ -441,6 +441,14 @@ class Job(CoreSysAttributes):
|
|||||||
raise JobConditionException(
|
raise JobConditionException(
|
||||||
f"'{method_name}' blocked from execution, supervisor needs to be updated first"
|
f"'{method_name}' blocked from execution, supervisor needs to be updated first"
|
||||||
)
|
)
|
||||||
|
if (
|
||||||
|
JobCondition.ARCHITECTURE_SUPPORTED in used_conditions
|
||||||
|
and UnsupportedReason.SYSTEM_ARCHITECTURE
|
||||||
|
in coresys.sys_resolution.unsupported
|
||||||
|
):
|
||||||
|
raise JobConditionException(
|
||||||
|
f"'{method_name}' blocked from execution, unsupported system architecture"
|
||||||
|
)
|
||||||
|
|
||||||
if JobCondition.PLUGINS_UPDATED in used_conditions and (
|
if JobCondition.PLUGINS_UPDATED in used_conditions and (
|
||||||
out_of_date := [
|
out_of_date := [
|
||||||
|
|||||||
@@ -64,6 +64,19 @@ def filter_data(coresys: CoreSys, event: Event, hint: Hint) -> Event | None:
|
|||||||
|
|
||||||
# Not full startup - missing information
|
# Not full startup - missing information
|
||||||
if coresys.core.state in (CoreState.INITIALIZE, CoreState.SETUP):
|
if coresys.core.state in (CoreState.INITIALIZE, CoreState.SETUP):
|
||||||
|
# During SETUP, we have basic system info available for better debugging
|
||||||
|
if coresys.core.state == CoreState.SETUP:
|
||||||
|
event.setdefault("contexts", {}).update(
|
||||||
|
{
|
||||||
|
"versions": {
|
||||||
|
"docker": coresys.docker.info.version,
|
||||||
|
"supervisor": coresys.supervisor.version,
|
||||||
|
},
|
||||||
|
"host": {
|
||||||
|
"machine": coresys.machine,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
return event
|
return event
|
||||||
|
|
||||||
# List installed addons
|
# List installed addons
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
"""A collection of tasks."""
|
"""A collection of tasks."""
|
||||||
|
|
||||||
|
from contextlib import suppress
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
import logging
|
import logging
|
||||||
from typing import cast
|
from typing import cast
|
||||||
@@ -13,6 +14,7 @@ from ..exceptions import (
|
|||||||
BackupFileNotFoundError,
|
BackupFileNotFoundError,
|
||||||
HomeAssistantError,
|
HomeAssistantError,
|
||||||
ObserverError,
|
ObserverError,
|
||||||
|
SupervisorUpdateError,
|
||||||
)
|
)
|
||||||
from ..homeassistant.const import LANDINGPAGE, WSType
|
from ..homeassistant.const import LANDINGPAGE, WSType
|
||||||
from ..jobs.const import JobConcurrency
|
from ..jobs.const import JobConcurrency
|
||||||
@@ -161,6 +163,7 @@ class Tasks(CoreSysAttributes):
|
|||||||
JobCondition.INTERNET_HOST,
|
JobCondition.INTERNET_HOST,
|
||||||
JobCondition.OS_SUPPORTED,
|
JobCondition.OS_SUPPORTED,
|
||||||
JobCondition.RUNNING,
|
JobCondition.RUNNING,
|
||||||
|
JobCondition.ARCHITECTURE_SUPPORTED,
|
||||||
],
|
],
|
||||||
concurrency=JobConcurrency.REJECT,
|
concurrency=JobConcurrency.REJECT,
|
||||||
)
|
)
|
||||||
@@ -173,7 +176,11 @@ class Tasks(CoreSysAttributes):
|
|||||||
"Found new Supervisor version %s, updating",
|
"Found new Supervisor version %s, updating",
|
||||||
self.sys_supervisor.latest_version,
|
self.sys_supervisor.latest_version,
|
||||||
)
|
)
|
||||||
await self.sys_supervisor.update()
|
|
||||||
|
# Errors are logged by the exceptions, we can't really do something
|
||||||
|
# if an update fails here.
|
||||||
|
with suppress(SupervisorUpdateError):
|
||||||
|
await self.sys_supervisor.update()
|
||||||
|
|
||||||
async def _watchdog_homeassistant_api(self):
|
async def _watchdog_homeassistant_api(self):
|
||||||
"""Create scheduler task for monitoring running state of API.
|
"""Create scheduler task for monitoring running state of API.
|
||||||
|
|||||||
@@ -135,7 +135,7 @@ class Mount(CoreSysAttributes, ABC):
|
|||||||
@property
|
@property
|
||||||
def state(self) -> UnitActiveState | None:
|
def state(self) -> UnitActiveState | None:
|
||||||
"""Get state of mount."""
|
"""Get state of mount."""
|
||||||
return self._state
|
return UnitActiveState(self._state) if self._state is not None else None
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def local_where(self) -> Path:
|
def local_where(self) -> Path:
|
||||||
|
|||||||
@@ -23,4 +23,5 @@ PLUGIN_UPDATE_CONDITIONS = [
|
|||||||
JobCondition.HEALTHY,
|
JobCondition.HEALTHY,
|
||||||
JobCondition.INTERNET_HOST,
|
JobCondition.INTERNET_HOST,
|
||||||
JobCondition.SUPERVISOR_UPDATED,
|
JobCondition.SUPERVISOR_UPDATED,
|
||||||
|
JobCondition.ARCHITECTURE_SUPPORTED,
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -58,6 +58,7 @@ class UnsupportedReason(StrEnum):
|
|||||||
SYSTEMD_JOURNAL = "systemd_journal"
|
SYSTEMD_JOURNAL = "systemd_journal"
|
||||||
SYSTEMD_RESOLVED = "systemd_resolved"
|
SYSTEMD_RESOLVED = "systemd_resolved"
|
||||||
VIRTUALIZATION_IMAGE = "virtualization_image"
|
VIRTUALIZATION_IMAGE = "virtualization_image"
|
||||||
|
SYSTEM_ARCHITECTURE = "system_architecture"
|
||||||
|
|
||||||
|
|
||||||
class UnhealthyReason(StrEnum):
|
class UnhealthyReason(StrEnum):
|
||||||
|
|||||||
@@ -5,8 +5,6 @@ from ...coresys import CoreSys
|
|||||||
from ..const import UnsupportedReason
|
from ..const import UnsupportedReason
|
||||||
from .base import EvaluateBase
|
from .base import EvaluateBase
|
||||||
|
|
||||||
SUPPORTED_OS = ["Debian GNU/Linux 12 (bookworm)"]
|
|
||||||
|
|
||||||
|
|
||||||
def setup(coresys: CoreSys) -> EvaluateBase:
|
def setup(coresys: CoreSys) -> EvaluateBase:
|
||||||
"""Initialize evaluation-setup function."""
|
"""Initialize evaluation-setup function."""
|
||||||
@@ -33,6 +31,4 @@ class EvaluateOperatingSystem(EvaluateBase):
|
|||||||
|
|
||||||
async def evaluate(self) -> bool:
|
async def evaluate(self) -> bool:
|
||||||
"""Run evaluation."""
|
"""Run evaluation."""
|
||||||
if self.sys_os.available:
|
return not self.sys_os.available
|
||||||
return False
|
|
||||||
return self.sys_host.info.operating_system not in SUPPORTED_OS
|
|
||||||
|
|||||||
38
supervisor/resolution/evaluations/system_architecture.py
Normal file
38
supervisor/resolution/evaluations/system_architecture.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
"""Evaluation class for system architecture support."""
|
||||||
|
|
||||||
|
from ...const import CoreState
|
||||||
|
from ...coresys import CoreSys
|
||||||
|
from ..const import UnsupportedReason
|
||||||
|
from .base import EvaluateBase
|
||||||
|
|
||||||
|
|
||||||
|
def setup(coresys: CoreSys) -> EvaluateBase:
|
||||||
|
"""Initialize evaluation-setup function."""
|
||||||
|
return EvaluateSystemArchitecture(coresys)
|
||||||
|
|
||||||
|
|
||||||
|
class EvaluateSystemArchitecture(EvaluateBase):
|
||||||
|
"""Evaluate if the current Supervisor architecture is supported."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def reason(self) -> UnsupportedReason:
|
||||||
|
"""Return a UnsupportedReason enum."""
|
||||||
|
return UnsupportedReason.SYSTEM_ARCHITECTURE
|
||||||
|
|
||||||
|
@property
|
||||||
|
def on_failure(self) -> str:
|
||||||
|
"""Return a string that is printed when self.evaluate is True."""
|
||||||
|
return "System architecture is no longer supported. Move to a supported system architecture."
|
||||||
|
|
||||||
|
@property
|
||||||
|
def states(self) -> list[CoreState]:
|
||||||
|
"""Return a list of valid states when this evaluation can run."""
|
||||||
|
return [CoreState.INITIALIZE]
|
||||||
|
|
||||||
|
async def evaluate(self):
|
||||||
|
"""Run evaluation."""
|
||||||
|
return self.sys_host.info.sys_arch.supervisor in {
|
||||||
|
"i386",
|
||||||
|
"armhf",
|
||||||
|
"armv7",
|
||||||
|
}
|
||||||
@@ -242,9 +242,10 @@ class Updater(FileConfiguration, CoreSysAttributes):
|
|||||||
@Job(
|
@Job(
|
||||||
name="updater_fetch_data",
|
name="updater_fetch_data",
|
||||||
conditions=[
|
conditions=[
|
||||||
|
JobCondition.ARCHITECTURE_SUPPORTED,
|
||||||
JobCondition.INTERNET_SYSTEM,
|
JobCondition.INTERNET_SYSTEM,
|
||||||
JobCondition.OS_SUPPORTED,
|
|
||||||
JobCondition.HOME_ASSISTANT_CORE_SUPPORTED,
|
JobCondition.HOME_ASSISTANT_CORE_SUPPORTED,
|
||||||
|
JobCondition.OS_SUPPORTED,
|
||||||
],
|
],
|
||||||
on_condition=UpdaterJobError,
|
on_condition=UpdaterJobError,
|
||||||
throttle_period=timedelta(seconds=30),
|
throttle_period=timedelta(seconds=30),
|
||||||
|
|||||||
@@ -7,13 +7,7 @@ from collections.abc import Awaitable, Callable
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any, Protocol, cast
|
from typing import Any, Protocol, cast
|
||||||
|
|
||||||
from dbus_fast import (
|
from dbus_fast import ErrorType, InvalidIntrospectionError, Message, MessageType
|
||||||
ErrorType,
|
|
||||||
InvalidIntrospectionError,
|
|
||||||
Message,
|
|
||||||
MessageType,
|
|
||||||
Variant,
|
|
||||||
)
|
|
||||||
from dbus_fast.aio.message_bus import MessageBus
|
from dbus_fast.aio.message_bus import MessageBus
|
||||||
from dbus_fast.aio.proxy_object import ProxyInterface, ProxyObject
|
from dbus_fast.aio.proxy_object import ProxyInterface, ProxyObject
|
||||||
from dbus_fast.errors import DBusError as DBusFastDBusError
|
from dbus_fast.errors import DBusError as DBusFastDBusError
|
||||||
@@ -265,7 +259,7 @@ class DBus:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
async def sync_property_change(
|
async def sync_property_change(
|
||||||
prop_interface: str, changed: dict[str, Variant], invalidated: list[str]
|
prop_interface: str, changed: dict[str, Any], invalidated: list[str]
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Sync property changes to cache."""
|
"""Sync property changes to cache."""
|
||||||
if interface != prop_interface:
|
if interface != prop_interface:
|
||||||
|
|||||||
@@ -5,12 +5,20 @@ from collections.abc import AsyncGenerator
|
|||||||
from datetime import UTC, datetime
|
from datetime import UTC, datetime
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
from aiohttp import ClientResponse
|
from aiohttp import ClientResponse
|
||||||
|
|
||||||
from supervisor.exceptions import MalformedBinaryEntryError
|
from supervisor.exceptions import MalformedBinaryEntryError
|
||||||
from supervisor.host.const import LogFormatter
|
from supervisor.host.const import LogFormatter
|
||||||
|
|
||||||
|
_RE_ANSI_CSI_COLORS_PATTERN = re.compile(r"\x1B\[[0-9;]*m")
|
||||||
|
|
||||||
|
|
||||||
|
def _strip_ansi_colors(message: str) -> str:
|
||||||
|
"""Remove ANSI color codes from a message string."""
|
||||||
|
return _RE_ANSI_CSI_COLORS_PATTERN.sub("", message)
|
||||||
|
|
||||||
|
|
||||||
def formatter(required_fields: list[str]):
|
def formatter(required_fields: list[str]):
|
||||||
"""Decorate journal entry formatters with list of required fields.
|
"""Decorate journal entry formatters with list of required fields.
|
||||||
@@ -31,9 +39,9 @@ def formatter(required_fields: list[str]):
|
|||||||
|
|
||||||
|
|
||||||
@formatter(["MESSAGE"])
|
@formatter(["MESSAGE"])
|
||||||
def journal_plain_formatter(entries: dict[str, str]) -> str:
|
def journal_plain_formatter(entries: dict[str, str], no_colors: bool = False) -> str:
|
||||||
"""Format parsed journal entries as a plain message."""
|
"""Format parsed journal entries as a plain message."""
|
||||||
return entries["MESSAGE"]
|
return _strip_ansi_colors(entries["MESSAGE"]) if no_colors else entries["MESSAGE"]
|
||||||
|
|
||||||
|
|
||||||
@formatter(
|
@formatter(
|
||||||
@@ -45,7 +53,7 @@ def journal_plain_formatter(entries: dict[str, str]) -> str:
|
|||||||
"MESSAGE",
|
"MESSAGE",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
def journal_verbose_formatter(entries: dict[str, str]) -> str:
|
def journal_verbose_formatter(entries: dict[str, str], no_colors: bool = False) -> str:
|
||||||
"""Format parsed journal entries to a journalctl-like format."""
|
"""Format parsed journal entries to a journalctl-like format."""
|
||||||
ts = datetime.fromtimestamp(
|
ts = datetime.fromtimestamp(
|
||||||
int(entries["__REALTIME_TIMESTAMP"]) / 1e6, UTC
|
int(entries["__REALTIME_TIMESTAMP"]) / 1e6, UTC
|
||||||
@@ -58,14 +66,24 @@ def journal_verbose_formatter(entries: dict[str, str]) -> str:
|
|||||||
else entries.get("SYSLOG_IDENTIFIER", "_UNKNOWN_")
|
else entries.get("SYSLOG_IDENTIFIER", "_UNKNOWN_")
|
||||||
)
|
)
|
||||||
|
|
||||||
return f"{ts} {entries.get('_HOSTNAME', '')} {identifier}: {entries.get('MESSAGE', '')}"
|
message = (
|
||||||
|
_strip_ansi_colors(entries.get("MESSAGE", ""))
|
||||||
|
if no_colors
|
||||||
|
else entries.get("MESSAGE", "")
|
||||||
|
)
|
||||||
|
|
||||||
|
return f"{ts} {entries.get('_HOSTNAME', '')} {identifier}: {message}"
|
||||||
|
|
||||||
|
|
||||||
async def journal_logs_reader(
|
async def journal_logs_reader(
|
||||||
journal_logs: ClientResponse, log_formatter: LogFormatter = LogFormatter.PLAIN
|
journal_logs: ClientResponse,
|
||||||
|
log_formatter: LogFormatter = LogFormatter.PLAIN,
|
||||||
|
no_colors: bool = False,
|
||||||
) -> AsyncGenerator[tuple[str | None, str]]:
|
) -> AsyncGenerator[tuple[str | None, str]]:
|
||||||
"""Read logs from systemd journal line by line, formatted using the given formatter.
|
"""Read logs from systemd journal line by line, formatted using the given formatter.
|
||||||
|
|
||||||
|
Optionally strip ANSI color codes from the entries' messages.
|
||||||
|
|
||||||
Returns a generator of (cursor, formatted_entry) tuples.
|
Returns a generator of (cursor, formatted_entry) tuples.
|
||||||
"""
|
"""
|
||||||
match log_formatter:
|
match log_formatter:
|
||||||
@@ -84,7 +102,10 @@ async def journal_logs_reader(
|
|||||||
# at EOF (likely race between at_eof and EOF check in readuntil)
|
# at EOF (likely race between at_eof and EOF check in readuntil)
|
||||||
if line == b"\n" or not line:
|
if line == b"\n" or not line:
|
||||||
if entries:
|
if entries:
|
||||||
yield entries.get("__CURSOR"), formatter_(entries)
|
yield (
|
||||||
|
entries.get("__CURSOR"),
|
||||||
|
formatter_(entries, no_colors=no_colors),
|
||||||
|
)
|
||||||
entries = {}
|
entries = {}
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|||||||
@@ -3,22 +3,25 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
import errno
|
import errno
|
||||||
|
from http import HTTPStatus
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from unittest.mock import MagicMock, PropertyMock, patch
|
from unittest.mock import MagicMock, PropertyMock, call, patch
|
||||||
|
|
||||||
|
import aiodocker
|
||||||
from awesomeversion import AwesomeVersion
|
from awesomeversion import AwesomeVersion
|
||||||
from docker.errors import DockerException, ImageNotFound, NotFound
|
from docker.errors import APIError, DockerException, NotFound
|
||||||
import pytest
|
import pytest
|
||||||
from securetar import SecureTarFile
|
from securetar import SecureTarFile
|
||||||
|
|
||||||
from supervisor.addons.addon import Addon
|
from supervisor.addons.addon import Addon
|
||||||
from supervisor.addons.const import AddonBackupMode
|
from supervisor.addons.const import AddonBackupMode
|
||||||
from supervisor.addons.model import AddonModel
|
from supervisor.addons.model import AddonModel
|
||||||
|
from supervisor.config import CoreConfig
|
||||||
from supervisor.const import AddonBoot, AddonState, BusEvent
|
from supervisor.const import AddonBoot, AddonState, BusEvent
|
||||||
from supervisor.coresys import CoreSys
|
from supervisor.coresys import CoreSys
|
||||||
from supervisor.docker.addon import DockerAddon
|
from supervisor.docker.addon import DockerAddon
|
||||||
from supervisor.docker.const import ContainerState
|
from supervisor.docker.const import ContainerState
|
||||||
from supervisor.docker.manager import CommandReturn
|
from supervisor.docker.manager import CommandReturn, DockerAPI
|
||||||
from supervisor.docker.monitor import DockerContainerStateEvent
|
from supervisor.docker.monitor import DockerContainerStateEvent
|
||||||
from supervisor.exceptions import AddonsError, AddonsJobError, AudioUpdateError
|
from supervisor.exceptions import AddonsError, AddonsJobError, AudioUpdateError
|
||||||
from supervisor.hardware.helper import HwHelper
|
from supervisor.hardware.helper import HwHelper
|
||||||
@@ -861,16 +864,14 @@ async def test_addon_loads_wrong_image(
|
|||||||
|
|
||||||
container.remove.assert_called_with(force=True, v=True)
|
container.remove.assert_called_with(force=True, v=True)
|
||||||
# one for removing the addon, one for removing the addon builder
|
# one for removing the addon, one for removing the addon builder
|
||||||
assert coresys.docker.images.remove.call_count == 2
|
assert coresys.docker.images.delete.call_count == 2
|
||||||
|
|
||||||
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
|
assert coresys.docker.images.delete.call_args_list[0] == call(
|
||||||
"image": "local/aarch64-addon-ssh:latest",
|
"local/aarch64-addon-ssh:latest", force=True
|
||||||
"force": True,
|
)
|
||||||
}
|
assert coresys.docker.images.delete.call_args_list[1] == call(
|
||||||
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
|
"local/aarch64-addon-ssh:9.2.1", force=True
|
||||||
"image": "local/aarch64-addon-ssh:9.2.1",
|
)
|
||||||
"force": True,
|
|
||||||
}
|
|
||||||
mock_run_command.assert_called_once()
|
mock_run_command.assert_called_once()
|
||||||
assert mock_run_command.call_args.args[0] == "docker.io/library/docker"
|
assert mock_run_command.call_args.args[0] == "docker.io/library/docker"
|
||||||
assert mock_run_command.call_args.kwargs["version"] == "1.0.0-cli"
|
assert mock_run_command.call_args.kwargs["version"] == "1.0.0-cli"
|
||||||
@@ -894,7 +895,9 @@ async def test_addon_loads_missing_image(
|
|||||||
mock_amd64_arch_supported,
|
mock_amd64_arch_supported,
|
||||||
):
|
):
|
||||||
"""Test addon corrects a missing image on load."""
|
"""Test addon corrects a missing image on load."""
|
||||||
coresys.docker.images.get.side_effect = ImageNotFound("missing")
|
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
|
||||||
|
HTTPStatus.NOT_FOUND, {"message": "missing"}
|
||||||
|
)
|
||||||
|
|
||||||
with (
|
with (
|
||||||
patch("pathlib.Path.is_file", return_value=True),
|
patch("pathlib.Path.is_file", return_value=True),
|
||||||
@@ -926,41 +929,51 @@ async def test_addon_loads_missing_image(
|
|||||||
assert install_addon_ssh.image == "local/amd64-addon-ssh"
|
assert install_addon_ssh.image == "local/amd64-addon-ssh"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"pull_image_exc",
|
||||||
|
[APIError("error"), aiodocker.DockerError(400, {"message": "error"})],
|
||||||
|
)
|
||||||
|
@pytest.mark.usefixtures("container", "mock_amd64_arch_supported")
|
||||||
async def test_addon_load_succeeds_with_docker_errors(
|
async def test_addon_load_succeeds_with_docker_errors(
|
||||||
coresys: CoreSys,
|
coresys: CoreSys,
|
||||||
install_addon_ssh: Addon,
|
install_addon_ssh: Addon,
|
||||||
container: MagicMock,
|
|
||||||
caplog: pytest.LogCaptureFixture,
|
caplog: pytest.LogCaptureFixture,
|
||||||
mock_amd64_arch_supported,
|
pull_image_exc: Exception,
|
||||||
):
|
):
|
||||||
"""Docker errors while building/pulling an image during load should not raise and fail setup."""
|
"""Docker errors while building/pulling an image during load should not raise and fail setup."""
|
||||||
# Build env invalid failure
|
# Build env invalid failure
|
||||||
coresys.docker.images.get.side_effect = ImageNotFound("missing")
|
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
|
||||||
|
HTTPStatus.NOT_FOUND, {"message": "missing"}
|
||||||
|
)
|
||||||
caplog.clear()
|
caplog.clear()
|
||||||
await install_addon_ssh.load()
|
await install_addon_ssh.load()
|
||||||
assert "Invalid build environment" in caplog.text
|
assert "Invalid build environment" in caplog.text
|
||||||
|
|
||||||
# Image build failure
|
# Image build failure
|
||||||
coresys.docker.images.build.side_effect = DockerException()
|
|
||||||
caplog.clear()
|
caplog.clear()
|
||||||
with (
|
with (
|
||||||
patch("pathlib.Path.is_file", return_value=True),
|
patch("pathlib.Path.is_file", return_value=True),
|
||||||
patch.object(
|
patch.object(
|
||||||
type(coresys.config),
|
CoreConfig, "local_to_extern_path", return_value="/addon/path/on/host"
|
||||||
"local_to_extern_path",
|
),
|
||||||
return_value="/addon/path/on/host",
|
patch.object(
|
||||||
|
DockerAPI,
|
||||||
|
"run_command",
|
||||||
|
return_value=MagicMock(exit_code=1, output=b"error"),
|
||||||
),
|
),
|
||||||
):
|
):
|
||||||
await install_addon_ssh.load()
|
await install_addon_ssh.load()
|
||||||
assert "Can't build local/amd64-addon-ssh:9.2.1" in caplog.text
|
assert (
|
||||||
|
"Can't build local/amd64-addon-ssh:9.2.1: Docker build failed for local/amd64-addon-ssh:9.2.1 (exit code 1). Build output:\nerror"
|
||||||
|
in caplog.text
|
||||||
|
)
|
||||||
|
|
||||||
# Image pull failure
|
# Image pull failure
|
||||||
install_addon_ssh.data["image"] = "test/amd64-addon-ssh"
|
install_addon_ssh.data["image"] = "test/amd64-addon-ssh"
|
||||||
coresys.docker.images.build.reset_mock(side_effect=True)
|
|
||||||
coresys.docker.pull_image.side_effect = DockerException()
|
|
||||||
caplog.clear()
|
caplog.clear()
|
||||||
await install_addon_ssh.load()
|
with patch.object(DockerAPI, "pull_image", side_effect=pull_image_exc):
|
||||||
assert "Unknown error with test/amd64-addon-ssh:9.2.1" in caplog.text
|
await install_addon_ssh.load()
|
||||||
|
assert "Can't install test/amd64-addon-ssh:9.2.1:" in caplog.text
|
||||||
|
|
||||||
|
|
||||||
async def test_addon_manual_only_boot(coresys: CoreSys, install_addon_example: Addon):
|
async def test_addon_manual_only_boot(coresys: CoreSys, install_addon_example: Addon):
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
"""Test addon build."""
|
"""Test addon build."""
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
from unittest.mock import PropertyMock, patch
|
from unittest.mock import PropertyMock, patch
|
||||||
|
|
||||||
from awesomeversion import AwesomeVersion
|
from awesomeversion import AwesomeVersion
|
||||||
@@ -7,6 +10,7 @@ from awesomeversion import AwesomeVersion
|
|||||||
from supervisor.addons.addon import Addon
|
from supervisor.addons.addon import Addon
|
||||||
from supervisor.addons.build import AddonBuild
|
from supervisor.addons.build import AddonBuild
|
||||||
from supervisor.coresys import CoreSys
|
from supervisor.coresys import CoreSys
|
||||||
|
from supervisor.docker.const import DOCKER_HUB
|
||||||
|
|
||||||
from tests.common import is_in_list
|
from tests.common import is_in_list
|
||||||
|
|
||||||
@@ -29,7 +33,7 @@ async def test_platform_set(coresys: CoreSys, install_addon_ssh: Addon):
|
|||||||
),
|
),
|
||||||
):
|
):
|
||||||
args = await coresys.run_in_executor(
|
args = await coresys.run_in_executor(
|
||||||
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest"
|
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
|
||||||
)
|
)
|
||||||
|
|
||||||
assert is_in_list(["--platform", "linux/amd64"], args["command"])
|
assert is_in_list(["--platform", "linux/amd64"], args["command"])
|
||||||
@@ -53,7 +57,7 @@ async def test_dockerfile_evaluation(coresys: CoreSys, install_addon_ssh: Addon)
|
|||||||
),
|
),
|
||||||
):
|
):
|
||||||
args = await coresys.run_in_executor(
|
args = await coresys.run_in_executor(
|
||||||
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest"
|
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
|
||||||
)
|
)
|
||||||
|
|
||||||
assert is_in_list(["--file", "Dockerfile"], args["command"])
|
assert is_in_list(["--file", "Dockerfile"], args["command"])
|
||||||
@@ -81,7 +85,7 @@ async def test_dockerfile_evaluation_arch(coresys: CoreSys, install_addon_ssh: A
|
|||||||
),
|
),
|
||||||
):
|
):
|
||||||
args = await coresys.run_in_executor(
|
args = await coresys.run_in_executor(
|
||||||
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest"
|
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
|
||||||
)
|
)
|
||||||
|
|
||||||
assert is_in_list(["--file", "Dockerfile.aarch64"], args["command"])
|
assert is_in_list(["--file", "Dockerfile.aarch64"], args["command"])
|
||||||
@@ -117,3 +121,158 @@ async def test_build_invalid(coresys: CoreSys, install_addon_ssh: Addon):
|
|||||||
),
|
),
|
||||||
):
|
):
|
||||||
assert not await build.is_valid()
|
assert not await build.is_valid()
|
||||||
|
|
||||||
|
|
||||||
|
async def test_docker_config_no_registries(coresys: CoreSys, install_addon_ssh: Addon):
|
||||||
|
"""Test docker config generation when no registries configured."""
|
||||||
|
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||||
|
|
||||||
|
# No registries configured by default
|
||||||
|
assert build.get_docker_config_json() is None
|
||||||
|
|
||||||
|
|
||||||
|
async def test_docker_config_no_matching_registry(
|
||||||
|
coresys: CoreSys, install_addon_ssh: Addon
|
||||||
|
):
|
||||||
|
"""Test docker config generation when registry doesn't match base image."""
|
||||||
|
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||||
|
|
||||||
|
# Configure a registry that doesn't match the base image
|
||||||
|
# pylint: disable-next=protected-access
|
||||||
|
coresys.docker.config._data["registries"] = {
|
||||||
|
"some.other.registry": {"username": "user", "password": "pass"}
|
||||||
|
}
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch.object(
|
||||||
|
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
|
||||||
|
),
|
||||||
|
patch.object(
|
||||||
|
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
|
||||||
|
),
|
||||||
|
):
|
||||||
|
# Base image is ghcr.io/home-assistant/... which doesn't match
|
||||||
|
assert build.get_docker_config_json() is None
|
||||||
|
|
||||||
|
|
||||||
|
async def test_docker_config_matching_registry(
|
||||||
|
coresys: CoreSys, install_addon_ssh: Addon
|
||||||
|
):
|
||||||
|
"""Test docker config generation when registry matches base image."""
|
||||||
|
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||||
|
|
||||||
|
# Configure ghcr.io registry which matches the default base image
|
||||||
|
# pylint: disable-next=protected-access
|
||||||
|
coresys.docker.config._data["registries"] = {
|
||||||
|
"ghcr.io": {"username": "testuser", "password": "testpass"}
|
||||||
|
}
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch.object(
|
||||||
|
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
|
||||||
|
),
|
||||||
|
patch.object(
|
||||||
|
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
|
||||||
|
),
|
||||||
|
):
|
||||||
|
config_json = build.get_docker_config_json()
|
||||||
|
assert config_json is not None
|
||||||
|
|
||||||
|
config = json.loads(config_json)
|
||||||
|
assert "auths" in config
|
||||||
|
assert "ghcr.io" in config["auths"]
|
||||||
|
|
||||||
|
# Verify base64-encoded credentials
|
||||||
|
expected_auth = base64.b64encode(b"testuser:testpass").decode()
|
||||||
|
assert config["auths"]["ghcr.io"]["auth"] == expected_auth
|
||||||
|
|
||||||
|
|
||||||
|
async def test_docker_config_docker_hub(coresys: CoreSys, install_addon_ssh: Addon):
|
||||||
|
"""Test docker config generation for Docker Hub registry."""
|
||||||
|
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||||
|
|
||||||
|
# Configure Docker Hub registry
|
||||||
|
# pylint: disable-next=protected-access
|
||||||
|
coresys.docker.config._data["registries"] = {
|
||||||
|
DOCKER_HUB: {"username": "hubuser", "password": "hubpass"}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Mock base_image to return a Docker Hub image (no registry prefix)
|
||||||
|
with patch.object(
|
||||||
|
type(build),
|
||||||
|
"base_image",
|
||||||
|
new=PropertyMock(return_value="library/alpine:latest"),
|
||||||
|
):
|
||||||
|
config_json = build.get_docker_config_json()
|
||||||
|
assert config_json is not None
|
||||||
|
|
||||||
|
config = json.loads(config_json)
|
||||||
|
# Docker Hub uses special URL as key
|
||||||
|
assert "https://index.docker.io/v1/" in config["auths"]
|
||||||
|
|
||||||
|
expected_auth = base64.b64encode(b"hubuser:hubpass").decode()
|
||||||
|
assert config["auths"]["https://index.docker.io/v1/"]["auth"] == expected_auth
|
||||||
|
|
||||||
|
|
||||||
|
async def test_docker_args_with_config_path(coresys: CoreSys, install_addon_ssh: Addon):
|
||||||
|
"""Test docker args include config volume when path provided."""
|
||||||
|
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch.object(
|
||||||
|
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
|
||||||
|
),
|
||||||
|
patch.object(
|
||||||
|
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
|
||||||
|
),
|
||||||
|
patch.object(
|
||||||
|
type(coresys.config),
|
||||||
|
"local_to_extern_path",
|
||||||
|
side_effect=lambda p: f"/extern{p}",
|
||||||
|
),
|
||||||
|
):
|
||||||
|
config_path = Path("/data/supervisor/tmp/config.json")
|
||||||
|
args = await coresys.run_in_executor(
|
||||||
|
build.get_docker_args,
|
||||||
|
AwesomeVersion("latest"),
|
||||||
|
"test-image:latest",
|
||||||
|
config_path,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check that config is mounted
|
||||||
|
assert "/extern/data/supervisor/tmp/config.json" in args["volumes"]
|
||||||
|
assert (
|
||||||
|
args["volumes"]["/extern/data/supervisor/tmp/config.json"]["bind"]
|
||||||
|
== "/root/.docker/config.json"
|
||||||
|
)
|
||||||
|
assert args["volumes"]["/extern/data/supervisor/tmp/config.json"]["mode"] == "ro"
|
||||||
|
|
||||||
|
|
||||||
|
async def test_docker_args_without_config_path(
|
||||||
|
coresys: CoreSys, install_addon_ssh: Addon
|
||||||
|
):
|
||||||
|
"""Test docker args don't include config volume when no path provided."""
|
||||||
|
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch.object(
|
||||||
|
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
|
||||||
|
),
|
||||||
|
patch.object(
|
||||||
|
type(coresys.arch), "default", new=PropertyMock(return_value="amd64")
|
||||||
|
),
|
||||||
|
patch.object(
|
||||||
|
type(coresys.config),
|
||||||
|
"local_to_extern_path",
|
||||||
|
return_value="/addon/path/on/host",
|
||||||
|
),
|
||||||
|
):
|
||||||
|
args = await coresys.run_in_executor(
|
||||||
|
build.get_docker_args, AwesomeVersion("latest"), "test-image:latest", None
|
||||||
|
)
|
||||||
|
|
||||||
|
# Only docker socket and addon path should be mounted
|
||||||
|
assert len(args["volumes"]) == 2
|
||||||
|
# Verify no docker config mount
|
||||||
|
for bind in args["volumes"].values():
|
||||||
|
assert bind["bind"] != "/root/.docker/config.json"
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import asyncio
|
|||||||
from collections.abc import AsyncGenerator, Generator
|
from collections.abc import AsyncGenerator, Generator
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
|
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, call, patch
|
||||||
|
|
||||||
from awesomeversion import AwesomeVersion
|
from awesomeversion import AwesomeVersion
|
||||||
import pytest
|
import pytest
|
||||||
@@ -514,19 +514,13 @@ async def test_shared_image_kept_on_uninstall(
|
|||||||
latest = f"{install_addon_example.image}:latest"
|
latest = f"{install_addon_example.image}:latest"
|
||||||
|
|
||||||
await coresys.addons.uninstall("local_example2")
|
await coresys.addons.uninstall("local_example2")
|
||||||
coresys.docker.images.remove.assert_not_called()
|
coresys.docker.images.delete.assert_not_called()
|
||||||
assert not coresys.addons.get("local_example2", local_only=True)
|
assert not coresys.addons.get("local_example2", local_only=True)
|
||||||
|
|
||||||
await coresys.addons.uninstall("local_example")
|
await coresys.addons.uninstall("local_example")
|
||||||
assert coresys.docker.images.remove.call_count == 2
|
assert coresys.docker.images.delete.call_count == 2
|
||||||
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
|
assert coresys.docker.images.delete.call_args_list[0] == call(latest, force=True)
|
||||||
"image": latest,
|
assert coresys.docker.images.delete.call_args_list[1] == call(image, force=True)
|
||||||
"force": True,
|
|
||||||
}
|
|
||||||
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
|
|
||||||
"image": image,
|
|
||||||
"force": True,
|
|
||||||
}
|
|
||||||
assert not coresys.addons.get("local_example", local_only=True)
|
assert not coresys.addons.get("local_example", local_only=True)
|
||||||
|
|
||||||
|
|
||||||
@@ -554,19 +548,17 @@ async def test_shared_image_kept_on_update(
|
|||||||
assert example_2.version == "1.2.0"
|
assert example_2.version == "1.2.0"
|
||||||
assert install_addon_example_image.version == "1.2.0"
|
assert install_addon_example_image.version == "1.2.0"
|
||||||
|
|
||||||
image_new = MagicMock()
|
image_new = {"Id": "image_new", "RepoTags": ["image_new:latest"]}
|
||||||
image_new.id = "image_new"
|
image_old = {"Id": "image_old", "RepoTags": ["image_old:latest"]}
|
||||||
image_old = MagicMock()
|
docker.images.inspect.side_effect = [image_new, image_old]
|
||||||
image_old.id = "image_old"
|
|
||||||
docker.images.get.side_effect = [image_new, image_old]
|
|
||||||
docker.images.list.return_value = [image_new, image_old]
|
docker.images.list.return_value = [image_new, image_old]
|
||||||
|
|
||||||
with patch.object(DockerAPI, "pull_image", return_value=image_new):
|
with patch.object(DockerAPI, "pull_image", return_value=image_new):
|
||||||
await coresys.addons.update("local_example2")
|
await coresys.addons.update("local_example2")
|
||||||
docker.images.remove.assert_not_called()
|
docker.images.delete.assert_not_called()
|
||||||
assert example_2.version == "1.3.0"
|
assert example_2.version == "1.3.0"
|
||||||
|
|
||||||
docker.images.get.side_effect = [image_new]
|
docker.images.inspect.side_effect = [image_new]
|
||||||
await coresys.addons.update("local_example_image")
|
await coresys.addons.update("local_example_image")
|
||||||
docker.images.remove.assert_called_once_with("image_old", force=True)
|
docker.images.delete.assert_called_once_with("image_old", force=True)
|
||||||
assert install_addon_example_image.version == "1.3.0"
|
assert install_addon_example_image.version == "1.3.0"
|
||||||
|
|||||||
@@ -1,95 +1 @@
|
|||||||
"""Test for API calls."""
|
"""Test for API calls."""
|
||||||
|
|
||||||
from unittest.mock import AsyncMock, MagicMock
|
|
||||||
|
|
||||||
from aiohttp.test_utils import TestClient
|
|
||||||
|
|
||||||
from supervisor.coresys import CoreSys
|
|
||||||
from supervisor.host.const import LogFormat
|
|
||||||
|
|
||||||
DEFAULT_LOG_RANGE = "entries=:-99:100"
|
|
||||||
DEFAULT_LOG_RANGE_FOLLOW = "entries=:-99:18446744073709551615"
|
|
||||||
|
|
||||||
|
|
||||||
async def common_test_api_advanced_logs(
|
|
||||||
path_prefix: str,
|
|
||||||
syslog_identifier: str,
|
|
||||||
api_client: TestClient,
|
|
||||||
journald_logs: MagicMock,
|
|
||||||
coresys: CoreSys,
|
|
||||||
os_available: None,
|
|
||||||
):
|
|
||||||
"""Template for tests of endpoints using advanced logs."""
|
|
||||||
resp = await api_client.get(f"{path_prefix}/logs")
|
|
||||||
assert resp.status == 200
|
|
||||||
assert resp.content_type == "text/plain"
|
|
||||||
|
|
||||||
journald_logs.assert_called_once_with(
|
|
||||||
params={"SYSLOG_IDENTIFIER": syslog_identifier},
|
|
||||||
range_header=DEFAULT_LOG_RANGE,
|
|
||||||
accept=LogFormat.JOURNAL,
|
|
||||||
)
|
|
||||||
|
|
||||||
journald_logs.reset_mock()
|
|
||||||
|
|
||||||
resp = await api_client.get(f"{path_prefix}/logs/follow")
|
|
||||||
assert resp.status == 200
|
|
||||||
assert resp.content_type == "text/plain"
|
|
||||||
|
|
||||||
journald_logs.assert_called_once_with(
|
|
||||||
params={"SYSLOG_IDENTIFIER": syslog_identifier, "follow": ""},
|
|
||||||
range_header=DEFAULT_LOG_RANGE_FOLLOW,
|
|
||||||
accept=LogFormat.JOURNAL,
|
|
||||||
)
|
|
||||||
|
|
||||||
journald_logs.reset_mock()
|
|
||||||
|
|
||||||
mock_response = MagicMock()
|
|
||||||
mock_response.text = AsyncMock(
|
|
||||||
return_value='{"CONTAINER_LOG_EPOCH": "12345"}\n{"CONTAINER_LOG_EPOCH": "12345"}\n'
|
|
||||||
)
|
|
||||||
journald_logs.return_value.__aenter__.return_value = mock_response
|
|
||||||
|
|
||||||
resp = await api_client.get(f"{path_prefix}/logs/latest")
|
|
||||||
assert resp.status == 200
|
|
||||||
|
|
||||||
assert journald_logs.call_count == 2
|
|
||||||
|
|
||||||
# Check the first call for getting epoch
|
|
||||||
epoch_call = journald_logs.call_args_list[0]
|
|
||||||
assert epoch_call[1]["params"] == {"CONTAINER_NAME": syslog_identifier}
|
|
||||||
assert epoch_call[1]["range_header"] == "entries=:-1:2"
|
|
||||||
|
|
||||||
# Check the second call for getting logs with the epoch
|
|
||||||
logs_call = journald_logs.call_args_list[1]
|
|
||||||
assert logs_call[1]["params"]["SYSLOG_IDENTIFIER"] == syslog_identifier
|
|
||||||
assert logs_call[1]["params"]["CONTAINER_LOG_EPOCH"] == "12345"
|
|
||||||
assert logs_call[1]["range_header"] == "entries=:0:18446744073709551615"
|
|
||||||
|
|
||||||
journald_logs.reset_mock()
|
|
||||||
|
|
||||||
resp = await api_client.get(f"{path_prefix}/logs/boots/0")
|
|
||||||
assert resp.status == 200
|
|
||||||
assert resp.content_type == "text/plain"
|
|
||||||
|
|
||||||
journald_logs.assert_called_once_with(
|
|
||||||
params={"SYSLOG_IDENTIFIER": syslog_identifier, "_BOOT_ID": "ccc"},
|
|
||||||
range_header=DEFAULT_LOG_RANGE,
|
|
||||||
accept=LogFormat.JOURNAL,
|
|
||||||
)
|
|
||||||
|
|
||||||
journald_logs.reset_mock()
|
|
||||||
|
|
||||||
resp = await api_client.get(f"{path_prefix}/logs/boots/0/follow")
|
|
||||||
assert resp.status == 200
|
|
||||||
assert resp.content_type == "text/plain"
|
|
||||||
|
|
||||||
journald_logs.assert_called_once_with(
|
|
||||||
params={
|
|
||||||
"SYSLOG_IDENTIFIER": syslog_identifier,
|
|
||||||
"_BOOT_ID": "ccc",
|
|
||||||
"follow": "",
|
|
||||||
},
|
|
||||||
range_header=DEFAULT_LOG_RANGE_FOLLOW,
|
|
||||||
accept=LogFormat.JOURNAL,
|
|
||||||
)
|
|
||||||
|
|||||||
149
tests/api/conftest.py
Normal file
149
tests/api/conftest.py
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
"""Fixtures for API tests."""
|
||||||
|
|
||||||
|
from collections.abc import Awaitable, Callable
|
||||||
|
from unittest.mock import ANY, AsyncMock, MagicMock
|
||||||
|
|
||||||
|
from aiohttp.test_utils import TestClient
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from supervisor.coresys import CoreSys
|
||||||
|
from supervisor.host.const import LogFormat, LogFormatter
|
||||||
|
|
||||||
|
DEFAULT_LOG_RANGE = "entries=:-99:100"
|
||||||
|
DEFAULT_LOG_RANGE_FOLLOW = "entries=:-99:18446744073709551615"
|
||||||
|
|
||||||
|
|
||||||
|
async def _common_test_api_advanced_logs(
|
||||||
|
path_prefix: str,
|
||||||
|
syslog_identifier: str,
|
||||||
|
api_client: TestClient,
|
||||||
|
journald_logs: MagicMock,
|
||||||
|
coresys: CoreSys,
|
||||||
|
os_available: None,
|
||||||
|
journal_logs_reader: MagicMock,
|
||||||
|
):
|
||||||
|
"""Template for tests of endpoints using advanced logs."""
|
||||||
|
resp = await api_client.get(f"{path_prefix}/logs")
|
||||||
|
assert resp.status == 200
|
||||||
|
assert resp.content_type == "text/plain"
|
||||||
|
|
||||||
|
journald_logs.assert_called_once_with(
|
||||||
|
params={"SYSLOG_IDENTIFIER": syslog_identifier},
|
||||||
|
range_header=DEFAULT_LOG_RANGE,
|
||||||
|
accept=LogFormat.JOURNAL,
|
||||||
|
)
|
||||||
|
journal_logs_reader.assert_called_with(ANY, LogFormatter.PLAIN, False)
|
||||||
|
|
||||||
|
journald_logs.reset_mock()
|
||||||
|
journal_logs_reader.reset_mock()
|
||||||
|
|
||||||
|
resp = await api_client.get(f"{path_prefix}/logs?no_colors")
|
||||||
|
assert resp.status == 200
|
||||||
|
assert resp.content_type == "text/plain"
|
||||||
|
|
||||||
|
journald_logs.assert_called_once_with(
|
||||||
|
params={"SYSLOG_IDENTIFIER": syslog_identifier},
|
||||||
|
range_header=DEFAULT_LOG_RANGE,
|
||||||
|
accept=LogFormat.JOURNAL,
|
||||||
|
)
|
||||||
|
journal_logs_reader.assert_called_with(ANY, LogFormatter.PLAIN, True)
|
||||||
|
|
||||||
|
journald_logs.reset_mock()
|
||||||
|
journal_logs_reader.reset_mock()
|
||||||
|
|
||||||
|
resp = await api_client.get(f"{path_prefix}/logs/follow")
|
||||||
|
assert resp.status == 200
|
||||||
|
assert resp.content_type == "text/plain"
|
||||||
|
|
||||||
|
journald_logs.assert_called_once_with(
|
||||||
|
params={"SYSLOG_IDENTIFIER": syslog_identifier, "follow": ""},
|
||||||
|
range_header=DEFAULT_LOG_RANGE_FOLLOW,
|
||||||
|
accept=LogFormat.JOURNAL,
|
||||||
|
)
|
||||||
|
journal_logs_reader.assert_called_with(ANY, LogFormatter.PLAIN, False)
|
||||||
|
|
||||||
|
journald_logs.reset_mock()
|
||||||
|
journal_logs_reader.reset_mock()
|
||||||
|
|
||||||
|
mock_response = MagicMock()
|
||||||
|
mock_response.text = AsyncMock(
|
||||||
|
return_value='{"CONTAINER_LOG_EPOCH": "12345"}\n{"CONTAINER_LOG_EPOCH": "12345"}\n'
|
||||||
|
)
|
||||||
|
journald_logs.return_value.__aenter__.return_value = mock_response
|
||||||
|
|
||||||
|
resp = await api_client.get(f"{path_prefix}/logs/latest")
|
||||||
|
assert resp.status == 200
|
||||||
|
|
||||||
|
assert journald_logs.call_count == 2
|
||||||
|
|
||||||
|
# Check the first call for getting epoch
|
||||||
|
epoch_call = journald_logs.call_args_list[0]
|
||||||
|
assert epoch_call[1]["params"] == {"CONTAINER_NAME": syslog_identifier}
|
||||||
|
assert epoch_call[1]["range_header"] == "entries=:-1:2"
|
||||||
|
|
||||||
|
# Check the second call for getting logs with the epoch
|
||||||
|
logs_call = journald_logs.call_args_list[1]
|
||||||
|
assert logs_call[1]["params"]["SYSLOG_IDENTIFIER"] == syslog_identifier
|
||||||
|
assert logs_call[1]["params"]["CONTAINER_LOG_EPOCH"] == "12345"
|
||||||
|
assert logs_call[1]["range_header"] == "entries=:0:18446744073709551615"
|
||||||
|
journal_logs_reader.assert_called_with(ANY, LogFormatter.PLAIN, True)
|
||||||
|
|
||||||
|
journald_logs.reset_mock()
|
||||||
|
journal_logs_reader.reset_mock()
|
||||||
|
|
||||||
|
resp = await api_client.get(f"{path_prefix}/logs/boots/0")
|
||||||
|
assert resp.status == 200
|
||||||
|
assert resp.content_type == "text/plain"
|
||||||
|
|
||||||
|
journald_logs.assert_called_once_with(
|
||||||
|
params={"SYSLOG_IDENTIFIER": syslog_identifier, "_BOOT_ID": "ccc"},
|
||||||
|
range_header=DEFAULT_LOG_RANGE,
|
||||||
|
accept=LogFormat.JOURNAL,
|
||||||
|
)
|
||||||
|
|
||||||
|
journald_logs.reset_mock()
|
||||||
|
|
||||||
|
resp = await api_client.get(f"{path_prefix}/logs/boots/0/follow")
|
||||||
|
assert resp.status == 200
|
||||||
|
assert resp.content_type == "text/plain"
|
||||||
|
|
||||||
|
journald_logs.assert_called_once_with(
|
||||||
|
params={
|
||||||
|
"SYSLOG_IDENTIFIER": syslog_identifier,
|
||||||
|
"_BOOT_ID": "ccc",
|
||||||
|
"follow": "",
|
||||||
|
},
|
||||||
|
range_header=DEFAULT_LOG_RANGE_FOLLOW,
|
||||||
|
accept=LogFormat.JOURNAL,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
async def advanced_logs_tester(
|
||||||
|
api_client: TestClient,
|
||||||
|
journald_logs: MagicMock,
|
||||||
|
coresys: CoreSys,
|
||||||
|
os_available,
|
||||||
|
journal_logs_reader: MagicMock,
|
||||||
|
) -> Callable[[str, str], Awaitable[None]]:
|
||||||
|
"""Fixture that returns a function to test advanced logs endpoints.
|
||||||
|
|
||||||
|
This allows tests to avoid explicitly passing all the required fixtures.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
async def test_my_logs(advanced_logs_tester):
|
||||||
|
await advanced_logs_tester("/path/prefix", "syslog_identifier")
|
||||||
|
"""
|
||||||
|
|
||||||
|
async def test_logs(path_prefix: str, syslog_identifier: str):
|
||||||
|
await _common_test_api_advanced_logs(
|
||||||
|
path_prefix,
|
||||||
|
syslog_identifier,
|
||||||
|
api_client,
|
||||||
|
journald_logs,
|
||||||
|
coresys,
|
||||||
|
os_available,
|
||||||
|
journal_logs_reader,
|
||||||
|
)
|
||||||
|
|
||||||
|
return test_logs
|
||||||
@@ -20,7 +20,6 @@ from supervisor.exceptions import HassioError
|
|||||||
from supervisor.store.repository import Repository
|
from supervisor.store.repository import Repository
|
||||||
|
|
||||||
from ..const import TEST_ADDON_SLUG
|
from ..const import TEST_ADDON_SLUG
|
||||||
from . import common_test_api_advanced_logs
|
|
||||||
|
|
||||||
|
|
||||||
def _create_test_event(name: str, state: ContainerState) -> DockerContainerStateEvent:
|
def _create_test_event(name: str, state: ContainerState) -> DockerContainerStateEvent:
|
||||||
@@ -72,21 +71,11 @@ async def test_addons_info_not_installed(
|
|||||||
|
|
||||||
|
|
||||||
async def test_api_addon_logs(
|
async def test_api_addon_logs(
|
||||||
api_client: TestClient,
|
advanced_logs_tester,
|
||||||
journald_logs: MagicMock,
|
|
||||||
coresys: CoreSys,
|
|
||||||
os_available,
|
|
||||||
install_addon_ssh: Addon,
|
install_addon_ssh: Addon,
|
||||||
):
|
):
|
||||||
"""Test addon logs."""
|
"""Test addon logs."""
|
||||||
await common_test_api_advanced_logs(
|
await advanced_logs_tester("/addons/local_ssh", "addon_local_ssh")
|
||||||
"/addons/local_ssh",
|
|
||||||
"addon_local_ssh",
|
|
||||||
api_client,
|
|
||||||
journald_logs,
|
|
||||||
coresys,
|
|
||||||
os_available,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def test_api_addon_logs_not_installed(api_client: TestClient):
|
async def test_api_addon_logs_not_installed(api_client: TestClient):
|
||||||
|
|||||||
@@ -1,18 +1,6 @@
|
|||||||
"""Test audio api."""
|
"""Test audio api."""
|
||||||
|
|
||||||
from unittest.mock import MagicMock
|
|
||||||
|
|
||||||
from aiohttp.test_utils import TestClient
|
async def test_api_audio_logs(advanced_logs_tester) -> None:
|
||||||
|
|
||||||
from supervisor.coresys import CoreSys
|
|
||||||
|
|
||||||
from tests.api import common_test_api_advanced_logs
|
|
||||||
|
|
||||||
|
|
||||||
async def test_api_audio_logs(
|
|
||||||
api_client: TestClient, journald_logs: MagicMock, coresys: CoreSys, os_available
|
|
||||||
):
|
|
||||||
"""Test audio logs."""
|
"""Test audio logs."""
|
||||||
await common_test_api_advanced_logs(
|
await advanced_logs_tester("/audio", "hassio_audio")
|
||||||
"/audio", "hassio_audio", api_client, journald_logs, coresys, os_available
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
"""Test DNS API."""
|
"""Test DNS API."""
|
||||||
|
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
from aiohttp.test_utils import TestClient
|
from aiohttp.test_utils import TestClient
|
||||||
|
|
||||||
from supervisor.coresys import CoreSys
|
from supervisor.coresys import CoreSys
|
||||||
from supervisor.dbus.resolved import Resolved
|
from supervisor.dbus.resolved import Resolved
|
||||||
|
|
||||||
from tests.api import common_test_api_advanced_logs
|
|
||||||
from tests.dbus_service_mocks.base import DBusServiceMock
|
from tests.dbus_service_mocks.base import DBusServiceMock
|
||||||
from tests.dbus_service_mocks.resolved import Resolved as ResolvedService
|
from tests.dbus_service_mocks.resolved import Resolved as ResolvedService
|
||||||
|
|
||||||
@@ -66,15 +65,6 @@ async def test_options(api_client: TestClient, coresys: CoreSys):
|
|||||||
restart.assert_called_once()
|
restart.assert_called_once()
|
||||||
|
|
||||||
|
|
||||||
async def test_api_dns_logs(
|
async def test_api_dns_logs(advanced_logs_tester):
|
||||||
api_client: TestClient, journald_logs: MagicMock, coresys: CoreSys, os_available
|
|
||||||
):
|
|
||||||
"""Test dns logs."""
|
"""Test dns logs."""
|
||||||
await common_test_api_advanced_logs(
|
await advanced_logs_tester("/dns", "hassio_dns")
|
||||||
"/dns",
|
|
||||||
"hassio_dns",
|
|
||||||
api_client,
|
|
||||||
journald_logs,
|
|
||||||
coresys,
|
|
||||||
os_available,
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -4,6 +4,11 @@ from aiohttp.test_utils import TestClient
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from supervisor.coresys import CoreSys
|
from supervisor.coresys import CoreSys
|
||||||
|
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
||||||
|
from supervisor.resolution.data import Issue, Suggestion
|
||||||
|
|
||||||
|
from tests.dbus_service_mocks.agent_system import System as SystemService
|
||||||
|
from tests.dbus_service_mocks.base import DBusServiceMock
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@@ -84,3 +89,79 @@ async def test_registry_not_found(api_client: TestClient):
|
|||||||
assert resp.status == 404
|
assert resp.status == 404
|
||||||
body = await resp.json()
|
body = await resp.json()
|
||||||
assert body["message"] == "Hostname bad does not exist in registries"
|
assert body["message"] == "Hostname bad does not exist in registries"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
|
||||||
|
async def test_api_migrate_docker_storage_driver(
|
||||||
|
api_client: TestClient,
|
||||||
|
coresys: CoreSys,
|
||||||
|
os_agent_services: dict[str, DBusServiceMock],
|
||||||
|
os_available,
|
||||||
|
):
|
||||||
|
"""Test Docker storage driver migration."""
|
||||||
|
system_service: SystemService = os_agent_services["agent_system"]
|
||||||
|
system_service.MigrateDockerStorageDriver.calls.clear()
|
||||||
|
|
||||||
|
resp = await api_client.post(
|
||||||
|
"/docker/migrate-storage-driver",
|
||||||
|
json={"storage_driver": "overlayfs"},
|
||||||
|
)
|
||||||
|
assert resp.status == 200
|
||||||
|
|
||||||
|
assert system_service.MigrateDockerStorageDriver.calls == [("overlayfs",)]
|
||||||
|
assert (
|
||||||
|
Issue(IssueType.REBOOT_REQUIRED, ContextType.SYSTEM)
|
||||||
|
in coresys.resolution.issues
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
Suggestion(SuggestionType.EXECUTE_REBOOT, ContextType.SYSTEM)
|
||||||
|
in coresys.resolution.suggestions
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test migration back to overlay2 (graph driver)
|
||||||
|
system_service.MigrateDockerStorageDriver.calls.clear()
|
||||||
|
resp = await api_client.post(
|
||||||
|
"/docker/migrate-storage-driver",
|
||||||
|
json={"storage_driver": "overlay2"},
|
||||||
|
)
|
||||||
|
assert resp.status == 200
|
||||||
|
assert system_service.MigrateDockerStorageDriver.calls == [("overlay2",)]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
|
||||||
|
async def test_api_migrate_docker_storage_driver_invalid_backend(
|
||||||
|
api_client: TestClient,
|
||||||
|
os_available,
|
||||||
|
):
|
||||||
|
"""Test 400 is returned for invalid storage driver."""
|
||||||
|
resp = await api_client.post(
|
||||||
|
"/docker/migrate-storage-driver",
|
||||||
|
json={"storage_driver": "invalid"},
|
||||||
|
)
|
||||||
|
assert resp.status == 400
|
||||||
|
|
||||||
|
|
||||||
|
async def test_api_migrate_docker_storage_driver_not_os(
|
||||||
|
api_client: TestClient,
|
||||||
|
coresys: CoreSys,
|
||||||
|
):
|
||||||
|
"""Test 404 is returned if not running on HAOS."""
|
||||||
|
resp = await api_client.post(
|
||||||
|
"/docker/migrate-storage-driver",
|
||||||
|
json={"storage_driver": "overlayfs"},
|
||||||
|
)
|
||||||
|
assert resp.status == 404
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("os_available", ["16.2"], indirect=True)
|
||||||
|
async def test_api_migrate_docker_storage_driver_old_os(
|
||||||
|
api_client: TestClient,
|
||||||
|
coresys: CoreSys,
|
||||||
|
os_available,
|
||||||
|
):
|
||||||
|
"""Test 404 is returned if OS is older than 17.0."""
|
||||||
|
resp = await api_client.post(
|
||||||
|
"/docker/migrate-storage-driver",
|
||||||
|
json={"storage_driver": "overlayfs"},
|
||||||
|
)
|
||||||
|
assert resp.status == 404
|
||||||
|
|||||||
@@ -18,26 +18,18 @@ from supervisor.homeassistant.const import WSEvent
|
|||||||
from supervisor.homeassistant.core import HomeAssistantCore
|
from supervisor.homeassistant.core import HomeAssistantCore
|
||||||
from supervisor.homeassistant.module import HomeAssistant
|
from supervisor.homeassistant.module import HomeAssistant
|
||||||
|
|
||||||
from tests.api import common_test_api_advanced_logs
|
from tests.common import AsyncIterator, load_json_fixture
|
||||||
from tests.common import load_json_fixture
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("legacy_route", [True, False])
|
@pytest.mark.parametrize("legacy_route", [True, False])
|
||||||
async def test_api_core_logs(
|
async def test_api_core_logs(
|
||||||
api_client: TestClient,
|
advanced_logs_tester: AsyncMock,
|
||||||
journald_logs: MagicMock,
|
|
||||||
coresys: CoreSys,
|
|
||||||
os_available,
|
|
||||||
legacy_route: bool,
|
legacy_route: bool,
|
||||||
):
|
):
|
||||||
"""Test core logs."""
|
"""Test core logs."""
|
||||||
await common_test_api_advanced_logs(
|
await advanced_logs_tester(
|
||||||
f"/{'homeassistant' if legacy_route else 'core'}",
|
f"/{'homeassistant' if legacy_route else 'core'}",
|
||||||
"homeassistant",
|
"homeassistant",
|
||||||
api_client,
|
|
||||||
journald_logs,
|
|
||||||
coresys,
|
|
||||||
os_available,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -283,9 +275,9 @@ async def test_api_progress_updates_home_assistant_update(
|
|||||||
"""Test progress updates sent to Home Assistant for updates."""
|
"""Test progress updates sent to Home Assistant for updates."""
|
||||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||||
coresys.core.set_state(CoreState.RUNNING)
|
coresys.core.set_state(CoreState.RUNNING)
|
||||||
coresys.docker.docker.api.pull.return_value = load_json_fixture(
|
|
||||||
"docker_pull_image_log.json"
|
logs = load_json_fixture("docker_pull_image_log.json")
|
||||||
)
|
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
||||||
coresys.homeassistant.version = AwesomeVersion("2025.8.0")
|
coresys.homeassistant.version = AwesomeVersion("2025.8.0")
|
||||||
|
|
||||||
with (
|
with (
|
||||||
@@ -331,29 +323,29 @@ async def test_api_progress_updates_home_assistant_update(
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 1.2,
|
"progress": 1.7,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 2.8,
|
"progress": 4.0,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
assert events[-5:] == [
|
assert events[-5:] == [
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 97.2,
|
"progress": 98.2,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 98.4,
|
"progress": 98.3,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 99.4,
|
"progress": 99.3,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -272,7 +272,7 @@ async def test_advaced_logs_query_parameters(
|
|||||||
range_header=DEFAULT_RANGE,
|
range_header=DEFAULT_RANGE,
|
||||||
accept=LogFormat.JOURNAL,
|
accept=LogFormat.JOURNAL,
|
||||||
)
|
)
|
||||||
journal_logs_reader.assert_called_with(ANY, LogFormatter.VERBOSE)
|
journal_logs_reader.assert_called_with(ANY, LogFormatter.VERBOSE, False)
|
||||||
|
|
||||||
journal_logs_reader.reset_mock()
|
journal_logs_reader.reset_mock()
|
||||||
journald_logs.reset_mock()
|
journald_logs.reset_mock()
|
||||||
@@ -290,7 +290,19 @@ async def test_advaced_logs_query_parameters(
|
|||||||
range_header="entries=:-52:53",
|
range_header="entries=:-52:53",
|
||||||
accept=LogFormat.JOURNAL,
|
accept=LogFormat.JOURNAL,
|
||||||
)
|
)
|
||||||
journal_logs_reader.assert_called_with(ANY, LogFormatter.VERBOSE)
|
journal_logs_reader.assert_called_with(ANY, LogFormatter.VERBOSE, False)
|
||||||
|
|
||||||
|
journal_logs_reader.reset_mock()
|
||||||
|
journald_logs.reset_mock()
|
||||||
|
|
||||||
|
# Check no_colors query parameter
|
||||||
|
await api_client.get("/host/logs?no_colors")
|
||||||
|
journald_logs.assert_called_once_with(
|
||||||
|
params={"SYSLOG_IDENTIFIER": coresys.host.logs.default_identifiers},
|
||||||
|
range_header=DEFAULT_RANGE,
|
||||||
|
accept=LogFormat.JOURNAL,
|
||||||
|
)
|
||||||
|
journal_logs_reader.assert_called_with(ANY, LogFormatter.VERBOSE, True)
|
||||||
|
|
||||||
|
|
||||||
async def test_advanced_logs_boot_id_offset(
|
async def test_advanced_logs_boot_id_offset(
|
||||||
@@ -343,24 +355,24 @@ async def test_advanced_logs_formatters(
|
|||||||
"""Test advanced logs formatters varying on Accept header."""
|
"""Test advanced logs formatters varying on Accept header."""
|
||||||
|
|
||||||
await api_client.get("/host/logs")
|
await api_client.get("/host/logs")
|
||||||
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.VERBOSE)
|
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.VERBOSE, False)
|
||||||
|
|
||||||
journal_logs_reader.reset_mock()
|
journal_logs_reader.reset_mock()
|
||||||
|
|
||||||
headers = {"Accept": "text/x-log"}
|
headers = {"Accept": "text/x-log"}
|
||||||
await api_client.get("/host/logs", headers=headers)
|
await api_client.get("/host/logs", headers=headers)
|
||||||
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.VERBOSE)
|
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.VERBOSE, False)
|
||||||
|
|
||||||
journal_logs_reader.reset_mock()
|
journal_logs_reader.reset_mock()
|
||||||
|
|
||||||
await api_client.get("/host/logs/identifiers/test")
|
await api_client.get("/host/logs/identifiers/test")
|
||||||
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.PLAIN)
|
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.PLAIN, False)
|
||||||
|
|
||||||
journal_logs_reader.reset_mock()
|
journal_logs_reader.reset_mock()
|
||||||
|
|
||||||
headers = {"Accept": "text/x-log"}
|
headers = {"Accept": "text/x-log"}
|
||||||
await api_client.get("/host/logs/identifiers/test", headers=headers)
|
await api_client.get("/host/logs/identifiers/test", headers=headers)
|
||||||
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.VERBOSE)
|
journal_logs_reader.assert_called_once_with(ANY, LogFormatter.VERBOSE, False)
|
||||||
|
|
||||||
|
|
||||||
async def test_advanced_logs_errors(coresys: CoreSys, api_client: TestClient):
|
async def test_advanced_logs_errors(coresys: CoreSys, api_client: TestClient):
|
||||||
|
|||||||
@@ -1,23 +1,6 @@
|
|||||||
"""Test multicast api."""
|
"""Test multicast api."""
|
||||||
|
|
||||||
from unittest.mock import MagicMock
|
|
||||||
|
|
||||||
from aiohttp.test_utils import TestClient
|
async def test_api_multicast_logs(advanced_logs_tester):
|
||||||
|
|
||||||
from supervisor.coresys import CoreSys
|
|
||||||
|
|
||||||
from tests.api import common_test_api_advanced_logs
|
|
||||||
|
|
||||||
|
|
||||||
async def test_api_multicast_logs(
|
|
||||||
api_client: TestClient, journald_logs: MagicMock, coresys: CoreSys, os_available
|
|
||||||
):
|
|
||||||
"""Test multicast logs."""
|
"""Test multicast logs."""
|
||||||
await common_test_api_advanced_logs(
|
await advanced_logs_tester("/multicast", "hassio_multicast")
|
||||||
"/multicast",
|
|
||||||
"hassio_multicast",
|
|
||||||
api_client,
|
|
||||||
journald_logs,
|
|
||||||
coresys,
|
|
||||||
os_available,
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ from supervisor.homeassistant.module import HomeAssistant
|
|||||||
from supervisor.store.addon import AddonStore
|
from supervisor.store.addon import AddonStore
|
||||||
from supervisor.store.repository import Repository
|
from supervisor.store.repository import Repository
|
||||||
|
|
||||||
from tests.common import load_json_fixture
|
from tests.common import AsyncIterator, load_json_fixture
|
||||||
from tests.const import TEST_ADDON_SLUG
|
from tests.const import TEST_ADDON_SLUG
|
||||||
|
|
||||||
REPO_URL = "https://github.com/awesome-developer/awesome-repo"
|
REPO_URL = "https://github.com/awesome-developer/awesome-repo"
|
||||||
@@ -732,9 +732,10 @@ async def test_api_progress_updates_addon_install_update(
|
|||||||
"""Test progress updates sent to Home Assistant for installs/updates."""
|
"""Test progress updates sent to Home Assistant for installs/updates."""
|
||||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||||
coresys.core.set_state(CoreState.RUNNING)
|
coresys.core.set_state(CoreState.RUNNING)
|
||||||
coresys.docker.docker.api.pull.return_value = load_json_fixture(
|
|
||||||
"docker_pull_image_log.json"
|
logs = load_json_fixture("docker_pull_image_log.json")
|
||||||
)
|
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
||||||
|
|
||||||
coresys.arch._supported_arch = ["amd64"] # pylint: disable=protected-access
|
coresys.arch._supported_arch = ["amd64"] # pylint: disable=protected-access
|
||||||
install_addon_example.data_store["version"] = AwesomeVersion("2.0.0")
|
install_addon_example.data_store["version"] = AwesomeVersion("2.0.0")
|
||||||
|
|
||||||
@@ -772,29 +773,29 @@ async def test_api_progress_updates_addon_install_update(
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 1.2,
|
"progress": 1.7,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 2.8,
|
"progress": 4.0,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
assert events[-5:] == [
|
assert events[-5:] == [
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 97.2,
|
"progress": 98.2,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 98.4,
|
"progress": 98.3,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 99.4,
|
"progress": 99.3,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -18,8 +18,7 @@ from supervisor.store.repository import Repository
|
|||||||
from supervisor.supervisor import Supervisor
|
from supervisor.supervisor import Supervisor
|
||||||
from supervisor.updater import Updater
|
from supervisor.updater import Updater
|
||||||
|
|
||||||
from tests.api import common_test_api_advanced_logs
|
from tests.common import AsyncIterator, load_json_fixture
|
||||||
from tests.common import load_json_fixture
|
|
||||||
from tests.dbus_service_mocks.base import DBusServiceMock
|
from tests.dbus_service_mocks.base import DBusServiceMock
|
||||||
from tests.dbus_service_mocks.os_agent import OSAgent as OSAgentService
|
from tests.dbus_service_mocks.os_agent import OSAgent as OSAgentService
|
||||||
|
|
||||||
@@ -155,18 +154,9 @@ async def test_api_supervisor_options_diagnostics(
|
|||||||
assert coresys.dbus.agent.diagnostics is False
|
assert coresys.dbus.agent.diagnostics is False
|
||||||
|
|
||||||
|
|
||||||
async def test_api_supervisor_logs(
|
async def test_api_supervisor_logs(advanced_logs_tester):
|
||||||
api_client: TestClient, journald_logs: MagicMock, coresys: CoreSys, os_available
|
|
||||||
):
|
|
||||||
"""Test supervisor logs."""
|
"""Test supervisor logs."""
|
||||||
await common_test_api_advanced_logs(
|
await advanced_logs_tester("/supervisor", "hassio_supervisor")
|
||||||
"/supervisor",
|
|
||||||
"hassio_supervisor",
|
|
||||||
api_client,
|
|
||||||
journald_logs,
|
|
||||||
coresys,
|
|
||||||
os_available,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def test_api_supervisor_fallback(
|
async def test_api_supervisor_fallback(
|
||||||
@@ -332,9 +322,9 @@ async def test_api_progress_updates_supervisor_update(
|
|||||||
"""Test progress updates sent to Home Assistant for updates."""
|
"""Test progress updates sent to Home Assistant for updates."""
|
||||||
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
|
||||||
coresys.core.set_state(CoreState.RUNNING)
|
coresys.core.set_state(CoreState.RUNNING)
|
||||||
coresys.docker.docker.api.pull.return_value = load_json_fixture(
|
|
||||||
"docker_pull_image_log.json"
|
logs = load_json_fixture("docker_pull_image_log.json")
|
||||||
)
|
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
||||||
|
|
||||||
with (
|
with (
|
||||||
patch.object(
|
patch.object(
|
||||||
@@ -381,29 +371,29 @@ async def test_api_progress_updates_supervisor_update(
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 1.2,
|
"progress": 1.7,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 2.8,
|
"progress": 4.0,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
assert events[-5:] == [
|
assert events[-5:] == [
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 97.2,
|
"progress": 98.2,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 98.4,
|
"progress": 98.3,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"stage": None,
|
"stage": None,
|
||||||
"progress": 99.4,
|
"progress": 99.3,
|
||||||
"done": False,
|
"done": False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
"""Common test functions."""
|
"""Common test functions."""
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
from collections.abc import Sequence
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from importlib import import_module
|
from importlib import import_module
|
||||||
from inspect import getclosurevars
|
from inspect import getclosurevars
|
||||||
import json
|
import json
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any
|
from typing import Any, Self
|
||||||
|
|
||||||
from dbus_fast.aio.message_bus import MessageBus
|
from dbus_fast.aio.message_bus import MessageBus
|
||||||
|
|
||||||
@@ -145,3 +146,22 @@ class MockResponse:
|
|||||||
|
|
||||||
async def __aexit__(self, exc_type, exc, tb):
|
async def __aexit__(self, exc_type, exc, tb):
|
||||||
"""Exit the context manager."""
|
"""Exit the context manager."""
|
||||||
|
|
||||||
|
|
||||||
|
class AsyncIterator:
|
||||||
|
"""Make list/fixture into async iterator for test mocks."""
|
||||||
|
|
||||||
|
def __init__(self, seq: Sequence[Any]) -> None:
|
||||||
|
"""Initialize with sequence."""
|
||||||
|
self.iter = iter(seq)
|
||||||
|
|
||||||
|
def __aiter__(self) -> Self:
|
||||||
|
"""Implement aiter."""
|
||||||
|
return self
|
||||||
|
|
||||||
|
async def __anext__(self) -> Any:
|
||||||
|
"""Return next in sequence."""
|
||||||
|
try:
|
||||||
|
return next(self.iter)
|
||||||
|
except StopIteration:
|
||||||
|
raise StopAsyncIteration() from None
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import subprocess
|
|||||||
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
|
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
|
from aiodocker.docker import DockerImages
|
||||||
from aiohttp import ClientSession, web
|
from aiohttp import ClientSession, web
|
||||||
from aiohttp.test_utils import TestClient
|
from aiohttp.test_utils import TestClient
|
||||||
from awesomeversion import AwesomeVersion
|
from awesomeversion import AwesomeVersion
|
||||||
@@ -55,6 +56,7 @@ from supervisor.store.repository import Repository
|
|||||||
from supervisor.utils.dt import utcnow
|
from supervisor.utils.dt import utcnow
|
||||||
|
|
||||||
from .common import (
|
from .common import (
|
||||||
|
AsyncIterator,
|
||||||
MockResponse,
|
MockResponse,
|
||||||
load_binary_fixture,
|
load_binary_fixture,
|
||||||
load_fixture,
|
load_fixture,
|
||||||
@@ -112,40 +114,46 @@ async def supervisor_name() -> None:
|
|||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
async def docker() -> DockerAPI:
|
async def docker() -> DockerAPI:
|
||||||
"""Mock DockerAPI."""
|
"""Mock DockerAPI."""
|
||||||
images = [MagicMock(tags=["ghcr.io/home-assistant/amd64-hassio-supervisor:latest"])]
|
image_inspect = {
|
||||||
image = MagicMock()
|
"Os": "linux",
|
||||||
image.attrs = {"Os": "linux", "Architecture": "amd64"}
|
"Architecture": "amd64",
|
||||||
|
"Id": "test123",
|
||||||
|
"RepoTags": ["ghcr.io/home-assistant/amd64-hassio-supervisor:latest"],
|
||||||
|
}
|
||||||
|
|
||||||
with (
|
with (
|
||||||
patch("supervisor.docker.manager.DockerClient", return_value=MagicMock()),
|
patch("supervisor.docker.manager.DockerClient", return_value=MagicMock()),
|
||||||
patch("supervisor.docker.manager.DockerAPI.images", return_value=MagicMock()),
|
|
||||||
patch(
|
patch(
|
||||||
"supervisor.docker.manager.DockerAPI.containers", return_value=MagicMock()
|
"supervisor.docker.manager.DockerAPI.containers", return_value=MagicMock()
|
||||||
),
|
),
|
||||||
patch(
|
patch("supervisor.docker.manager.DockerAPI.api", return_value=MagicMock()),
|
||||||
"supervisor.docker.manager.DockerAPI.api",
|
patch("supervisor.docker.manager.DockerAPI.info", return_value=MagicMock()),
|
||||||
return_value=(api_mock := MagicMock()),
|
|
||||||
),
|
|
||||||
patch("supervisor.docker.manager.DockerAPI.images.get", return_value=image),
|
|
||||||
patch("supervisor.docker.manager.DockerAPI.images.list", return_value=images),
|
|
||||||
patch(
|
|
||||||
"supervisor.docker.manager.DockerAPI.info",
|
|
||||||
return_value=MagicMock(),
|
|
||||||
),
|
|
||||||
patch("supervisor.docker.manager.DockerAPI.unload"),
|
patch("supervisor.docker.manager.DockerAPI.unload"),
|
||||||
|
patch("supervisor.docker.manager.aiodocker.Docker", return_value=MagicMock()),
|
||||||
|
patch(
|
||||||
|
"supervisor.docker.manager.DockerAPI.images",
|
||||||
|
new=PropertyMock(
|
||||||
|
return_value=(docker_images := MagicMock(spec=DockerImages))
|
||||||
|
),
|
||||||
|
),
|
||||||
):
|
):
|
||||||
docker_obj = await DockerAPI(MagicMock()).post_init()
|
docker_obj = await DockerAPI(MagicMock()).post_init()
|
||||||
docker_obj.config._data = {"registries": {}}
|
docker_obj.config._data = {"registries": {}}
|
||||||
with patch("supervisor.docker.monitor.DockerMonitor.load"):
|
with patch("supervisor.docker.monitor.DockerMonitor.load"):
|
||||||
await docker_obj.load()
|
await docker_obj.load()
|
||||||
|
|
||||||
|
docker_images.inspect.return_value = image_inspect
|
||||||
|
docker_images.list.return_value = [image_inspect]
|
||||||
|
docker_images.import_image.return_value = [
|
||||||
|
{"stream": "Loaded image: test:latest\n"}
|
||||||
|
]
|
||||||
|
|
||||||
|
docker_images.pull.return_value = AsyncIterator([{}])
|
||||||
|
|
||||||
docker_obj.info.logging = "journald"
|
docker_obj.info.logging = "journald"
|
||||||
docker_obj.info.storage = "overlay2"
|
docker_obj.info.storage = "overlay2"
|
||||||
docker_obj.info.version = AwesomeVersion("1.0.0")
|
docker_obj.info.version = AwesomeVersion("1.0.0")
|
||||||
|
|
||||||
# Need an iterable for logs
|
|
||||||
api_mock.pull.return_value = []
|
|
||||||
|
|
||||||
yield docker_obj
|
yield docker_obj
|
||||||
|
|
||||||
|
|
||||||
@@ -838,11 +846,9 @@ async def container(docker: DockerAPI) -> MagicMock:
|
|||||||
"""Mock attrs and status for container on attach."""
|
"""Mock attrs and status for container on attach."""
|
||||||
docker.containers.get.return_value = addon = MagicMock()
|
docker.containers.get.return_value = addon = MagicMock()
|
||||||
docker.containers.create.return_value = addon
|
docker.containers.create.return_value = addon
|
||||||
docker.images.build.return_value = (addon, "")
|
|
||||||
addon.status = "stopped"
|
addon.status = "stopped"
|
||||||
addon.attrs = {"State": {"ExitCode": 0}}
|
addon.attrs = {"State": {"ExitCode": 0}}
|
||||||
with patch.object(DockerAPI, "pull_image", return_value=addon):
|
yield addon
|
||||||
yield addon
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
|
|||||||
@@ -184,3 +184,20 @@ async def test_interface_becomes_unmanaged(
|
|||||||
assert wireless.is_connected is False
|
assert wireless.is_connected is False
|
||||||
assert eth0.connection is None
|
assert eth0.connection is None
|
||||||
assert connection.is_connected is False
|
assert connection.is_connected is False
|
||||||
|
|
||||||
|
|
||||||
|
async def test_unknown_device_type(
|
||||||
|
device_eth0_service: DeviceService, dbus_session_bus: MessageBus
|
||||||
|
):
|
||||||
|
"""Test unknown device types are handled gracefully."""
|
||||||
|
interface = NetworkInterface("/org/freedesktop/NetworkManager/Devices/1")
|
||||||
|
await interface.connect(dbus_session_bus)
|
||||||
|
|
||||||
|
# Emit an unknown device type (e.g., 1000 which doesn't exist in the enum)
|
||||||
|
device_eth0_service.emit_properties_changed({"DeviceType": 1000})
|
||||||
|
await device_eth0_service.ping()
|
||||||
|
|
||||||
|
# Should return UNKNOWN instead of crashing
|
||||||
|
assert interface.type == DeviceType.UNKNOWN
|
||||||
|
# Wireless should be None since it's not a wireless device
|
||||||
|
assert interface.wireless is None
|
||||||
|
|||||||
@@ -41,51 +41,51 @@ async def test_dbus_resolved_info(
|
|||||||
assert resolved.dns_over_tls == DNSOverTLSEnabled.NO
|
assert resolved.dns_over_tls == DNSOverTLSEnabled.NO
|
||||||
|
|
||||||
assert len(resolved.dns) == 2
|
assert len(resolved.dns) == 2
|
||||||
assert resolved.dns[0] == [0, 2, inet_aton("127.0.0.1")]
|
assert resolved.dns[0] == (0, 2, inet_aton("127.0.0.1"))
|
||||||
assert resolved.dns[1] == [0, 10, inet_pton(AF_INET6, "::1")]
|
assert resolved.dns[1] == (0, 10, inet_pton(AF_INET6, "::1"))
|
||||||
assert len(resolved.dns_ex) == 2
|
assert len(resolved.dns_ex) == 2
|
||||||
assert resolved.dns_ex[0] == [0, 2, inet_aton("127.0.0.1"), 0, ""]
|
assert resolved.dns_ex[0] == (0, 2, inet_aton("127.0.0.1"), 0, "")
|
||||||
assert resolved.dns_ex[1] == [0, 10, inet_pton(AF_INET6, "::1"), 0, ""]
|
assert resolved.dns_ex[1] == (0, 10, inet_pton(AF_INET6, "::1"), 0, "")
|
||||||
|
|
||||||
assert len(resolved.fallback_dns) == 2
|
assert len(resolved.fallback_dns) == 2
|
||||||
assert resolved.fallback_dns[0] == [0, 2, inet_aton("1.1.1.1")]
|
assert resolved.fallback_dns[0] == (0, 2, inet_aton("1.1.1.1"))
|
||||||
assert resolved.fallback_dns[1] == [
|
assert resolved.fallback_dns[1] == (
|
||||||
0,
|
0,
|
||||||
10,
|
10,
|
||||||
inet_pton(AF_INET6, "2606:4700:4700::1111"),
|
inet_pton(AF_INET6, "2606:4700:4700::1111"),
|
||||||
]
|
)
|
||||||
assert len(resolved.fallback_dns_ex) == 2
|
assert len(resolved.fallback_dns_ex) == 2
|
||||||
assert resolved.fallback_dns_ex[0] == [
|
assert resolved.fallback_dns_ex[0] == (
|
||||||
0,
|
0,
|
||||||
2,
|
2,
|
||||||
inet_aton("1.1.1.1"),
|
inet_aton("1.1.1.1"),
|
||||||
0,
|
0,
|
||||||
"cloudflare-dns.com",
|
"cloudflare-dns.com",
|
||||||
]
|
)
|
||||||
assert resolved.fallback_dns_ex[1] == [
|
assert resolved.fallback_dns_ex[1] == (
|
||||||
0,
|
0,
|
||||||
10,
|
10,
|
||||||
inet_pton(AF_INET6, "2606:4700:4700::1111"),
|
inet_pton(AF_INET6, "2606:4700:4700::1111"),
|
||||||
0,
|
0,
|
||||||
"cloudflare-dns.com",
|
"cloudflare-dns.com",
|
||||||
]
|
)
|
||||||
|
|
||||||
assert resolved.current_dns_server == [0, 2, inet_aton("127.0.0.1")]
|
assert resolved.current_dns_server == (0, 2, inet_aton("127.0.0.1"))
|
||||||
assert resolved.current_dns_server_ex == [
|
assert resolved.current_dns_server_ex == (
|
||||||
0,
|
0,
|
||||||
2,
|
2,
|
||||||
inet_aton("127.0.0.1"),
|
inet_aton("127.0.0.1"),
|
||||||
0,
|
0,
|
||||||
"",
|
"",
|
||||||
]
|
)
|
||||||
|
|
||||||
assert len(resolved.domains) == 1
|
assert len(resolved.domains) == 1
|
||||||
assert resolved.domains[0] == [0, "local.hass.io", False]
|
assert resolved.domains[0] == (0, "local.hass.io", False)
|
||||||
|
|
||||||
assert resolved.transaction_statistics == [0, 100000]
|
assert resolved.transaction_statistics == (0, 100000)
|
||||||
assert resolved.cache_statistics == [10, 50000, 10000]
|
assert resolved.cache_statistics == (10, 50000, 10000)
|
||||||
assert resolved.dnssec == DNSSECValidation.NO
|
assert resolved.dnssec == DNSSECValidation.NO
|
||||||
assert resolved.dnssec_statistics == [0, 0, 0, 0]
|
assert resolved.dnssec_statistics == (0, 0, 0, 0)
|
||||||
assert resolved.dnssec_supported is False
|
assert resolved.dnssec_supported is False
|
||||||
assert resolved.dnssec_negative_trust_anchors == [
|
assert resolved.dnssec_negative_trust_anchors == [
|
||||||
"168.192.in-addr.arpa",
|
"168.192.in-addr.arpa",
|
||||||
|
|||||||
@@ -185,10 +185,10 @@ async def test_start_transient_unit(
|
|||||||
"tmp-test.mount",
|
"tmp-test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Description", Variant("s", "Test")],
|
("Description", Variant("s", "Test")),
|
||||||
["What", Variant("s", "//homeassistant/config")],
|
("What", Variant("s", "//homeassistant/config")),
|
||||||
["Type", Variant("s", "cifs")],
|
("Type", Variant("s", "cifs")),
|
||||||
["Options", Variant("s", "username=homeassistant,password=password")],
|
("Options", Variant("s", "username=homeassistant,password=password")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
"""Mock of OS Agent System dbus service."""
|
"""Mock of OS Agent System dbus service."""
|
||||||
|
|
||||||
from dbus_fast import DBusError
|
from dbus_fast import DBusError, ErrorType
|
||||||
|
|
||||||
from .base import DBusServiceMock, dbus_method
|
from .base import DBusServiceMock, dbus_method
|
||||||
|
|
||||||
@@ -21,6 +21,7 @@ class System(DBusServiceMock):
|
|||||||
object_path = "/io/hass/os/System"
|
object_path = "/io/hass/os/System"
|
||||||
interface = "io.hass.os.System"
|
interface = "io.hass.os.System"
|
||||||
response_schedule_wipe_device: bool | DBusError = True
|
response_schedule_wipe_device: bool | DBusError = True
|
||||||
|
response_migrate_docker_storage_driver: None | DBusError = None
|
||||||
|
|
||||||
@dbus_method()
|
@dbus_method()
|
||||||
def ScheduleWipeDevice(self) -> "b":
|
def ScheduleWipeDevice(self) -> "b":
|
||||||
@@ -28,3 +29,14 @@ class System(DBusServiceMock):
|
|||||||
if isinstance(self.response_schedule_wipe_device, DBusError):
|
if isinstance(self.response_schedule_wipe_device, DBusError):
|
||||||
raise self.response_schedule_wipe_device # pylint: disable=raising-bad-type
|
raise self.response_schedule_wipe_device # pylint: disable=raising-bad-type
|
||||||
return self.response_schedule_wipe_device
|
return self.response_schedule_wipe_device
|
||||||
|
|
||||||
|
@dbus_method()
|
||||||
|
def MigrateDockerStorageDriver(self, backend: "s") -> None:
|
||||||
|
"""Migrate Docker storage driver."""
|
||||||
|
if isinstance(self.response_migrate_docker_storage_driver, DBusError):
|
||||||
|
raise self.response_migrate_docker_storage_driver # pylint: disable=raising-bad-type
|
||||||
|
if backend not in ("overlayfs", "overlay2"):
|
||||||
|
raise DBusError(
|
||||||
|
ErrorType.FAILED,
|
||||||
|
f"unsupported driver: {backend} (only 'overlayfs' and 'overlay2' are supported)",
|
||||||
|
)
|
||||||
|
|||||||
@@ -45,8 +45,8 @@ class Resolved(DBusServiceMock):
|
|||||||
def DNS(self) -> "a(iiay)":
|
def DNS(self) -> "a(iiay)":
|
||||||
"""Get DNS."""
|
"""Get DNS."""
|
||||||
return [
|
return [
|
||||||
[0, 2, bytes([127, 0, 0, 1])],
|
(0, 2, bytes([127, 0, 0, 1])),
|
||||||
[
|
(
|
||||||
0,
|
0,
|
||||||
10,
|
10,
|
||||||
bytes(
|
bytes(
|
||||||
@@ -69,15 +69,15 @@ class Resolved(DBusServiceMock):
|
|||||||
0x1,
|
0x1,
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
],
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
@dbus_property(access=PropertyAccess.READ)
|
@dbus_property(access=PropertyAccess.READ)
|
||||||
def DNSEx(self) -> "a(iiayqs)":
|
def DNSEx(self) -> "a(iiayqs)":
|
||||||
"""Get DNSEx."""
|
"""Get DNSEx."""
|
||||||
return [
|
return [
|
||||||
[0, 2, bytes([127, 0, 0, 1]), 0, ""],
|
(0, 2, bytes([127, 0, 0, 1]), 0, ""),
|
||||||
[
|
(
|
||||||
0,
|
0,
|
||||||
10,
|
10,
|
||||||
bytes(
|
bytes(
|
||||||
@@ -102,15 +102,15 @@ class Resolved(DBusServiceMock):
|
|||||||
),
|
),
|
||||||
0,
|
0,
|
||||||
"",
|
"",
|
||||||
],
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
@dbus_property(access=PropertyAccess.READ)
|
@dbus_property(access=PropertyAccess.READ)
|
||||||
def FallbackDNS(self) -> "a(iiay)":
|
def FallbackDNS(self) -> "a(iiay)":
|
||||||
"""Get FallbackDNS."""
|
"""Get FallbackDNS."""
|
||||||
return [
|
return [
|
||||||
[0, 2, bytes([1, 1, 1, 1])],
|
(0, 2, bytes([1, 1, 1, 1])),
|
||||||
[
|
(
|
||||||
0,
|
0,
|
||||||
10,
|
10,
|
||||||
bytes(
|
bytes(
|
||||||
@@ -133,15 +133,15 @@ class Resolved(DBusServiceMock):
|
|||||||
0x11,
|
0x11,
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
],
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
@dbus_property(access=PropertyAccess.READ)
|
@dbus_property(access=PropertyAccess.READ)
|
||||||
def FallbackDNSEx(self) -> "a(iiayqs)":
|
def FallbackDNSEx(self) -> "a(iiayqs)":
|
||||||
"""Get FallbackDNSEx."""
|
"""Get FallbackDNSEx."""
|
||||||
return [
|
return [
|
||||||
[0, 2, bytes([1, 1, 1, 1]), 0, "cloudflare-dns.com"],
|
(0, 2, bytes([1, 1, 1, 1]), 0, "cloudflare-dns.com"),
|
||||||
[
|
(
|
||||||
0,
|
0,
|
||||||
10,
|
10,
|
||||||
bytes(
|
bytes(
|
||||||
@@ -166,33 +166,33 @@ class Resolved(DBusServiceMock):
|
|||||||
),
|
),
|
||||||
0,
|
0,
|
||||||
"cloudflare-dns.com",
|
"cloudflare-dns.com",
|
||||||
],
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
@dbus_property(access=PropertyAccess.READ)
|
@dbus_property(access=PropertyAccess.READ)
|
||||||
def CurrentDNSServer(self) -> "(iiay)":
|
def CurrentDNSServer(self) -> "(iiay)":
|
||||||
"""Get CurrentDNSServer."""
|
"""Get CurrentDNSServer."""
|
||||||
return [0, 2, bytes([127, 0, 0, 1])]
|
return (0, 2, bytes([127, 0, 0, 1]))
|
||||||
|
|
||||||
@dbus_property(access=PropertyAccess.READ)
|
@dbus_property(access=PropertyAccess.READ)
|
||||||
def CurrentDNSServerEx(self) -> "(iiayqs)":
|
def CurrentDNSServerEx(self) -> "(iiayqs)":
|
||||||
"""Get CurrentDNSServerEx."""
|
"""Get CurrentDNSServerEx."""
|
||||||
return [0, 2, bytes([127, 0, 0, 1]), 0, ""]
|
return (0, 2, bytes([127, 0, 0, 1]), 0, "")
|
||||||
|
|
||||||
@dbus_property(access=PropertyAccess.READ)
|
@dbus_property(access=PropertyAccess.READ)
|
||||||
def Domains(self) -> "a(isb)":
|
def Domains(self) -> "a(isb)":
|
||||||
"""Get Domains."""
|
"""Get Domains."""
|
||||||
return [[0, "local.hass.io", False]]
|
return [(0, "local.hass.io", False)]
|
||||||
|
|
||||||
@dbus_property(access=PropertyAccess.READ)
|
@dbus_property(access=PropertyAccess.READ)
|
||||||
def TransactionStatistics(self) -> "(tt)":
|
def TransactionStatistics(self) -> "(tt)":
|
||||||
"""Get TransactionStatistics."""
|
"""Get TransactionStatistics."""
|
||||||
return [0, 100000]
|
return (0, 100000)
|
||||||
|
|
||||||
@dbus_property(access=PropertyAccess.READ)
|
@dbus_property(access=PropertyAccess.READ)
|
||||||
def CacheStatistics(self) -> "(ttt)":
|
def CacheStatistics(self) -> "(ttt)":
|
||||||
"""Get CacheStatistics."""
|
"""Get CacheStatistics."""
|
||||||
return [10, 50000, 10000]
|
return (10, 50000, 10000)
|
||||||
|
|
||||||
@dbus_property(access=PropertyAccess.READ)
|
@dbus_property(access=PropertyAccess.READ)
|
||||||
def DNSSEC(self) -> "s":
|
def DNSSEC(self) -> "s":
|
||||||
@@ -202,7 +202,7 @@ class Resolved(DBusServiceMock):
|
|||||||
@dbus_property(access=PropertyAccess.READ)
|
@dbus_property(access=PropertyAccess.READ)
|
||||||
def DNSSECStatistics(self) -> "(tttt)":
|
def DNSSECStatistics(self) -> "(tttt)":
|
||||||
"""Get DNSSECStatistics."""
|
"""Get DNSSECStatistics."""
|
||||||
return [0, 0, 0, 0]
|
return (0, 0, 0, 0)
|
||||||
|
|
||||||
@dbus_property(access=PropertyAccess.READ)
|
@dbus_property(access=PropertyAccess.READ)
|
||||||
def DNSSECSupported(self) -> "b":
|
def DNSSECSupported(self) -> "b":
|
||||||
|
|||||||
@@ -2,7 +2,8 @@
|
|||||||
|
|
||||||
# pylint: disable=protected-access
|
# pylint: disable=protected-access
|
||||||
from supervisor.coresys import CoreSys
|
from supervisor.coresys import CoreSys
|
||||||
from supervisor.docker.interface import DOCKER_HUB, DockerInterface
|
from supervisor.docker.const import DOCKER_HUB
|
||||||
|
from supervisor.docker.interface import DockerInterface
|
||||||
|
|
||||||
|
|
||||||
def test_no_credentials(coresys: CoreSys, test_docker_interface: DockerInterface):
|
def test_no_credentials(coresys: CoreSys, test_docker_interface: DockerInterface):
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ from pathlib import Path
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, PropertyMock, call, patch
|
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, PropertyMock, call, patch
|
||||||
|
|
||||||
|
import aiodocker
|
||||||
from awesomeversion import AwesomeVersion
|
from awesomeversion import AwesomeVersion
|
||||||
from docker.errors import DockerException, NotFound
|
from docker.errors import DockerException, NotFound
|
||||||
from docker.models.containers import Container
|
from docker.models.containers import Container
|
||||||
from docker.models.images import Image
|
|
||||||
import pytest
|
import pytest
|
||||||
from requests import RequestException
|
from requests import RequestException
|
||||||
|
|
||||||
@@ -16,7 +16,7 @@ from supervisor.addons.manager import Addon
|
|||||||
from supervisor.const import BusEvent, CoreState, CpuArch
|
from supervisor.const import BusEvent, CoreState, CpuArch
|
||||||
from supervisor.coresys import CoreSys
|
from supervisor.coresys import CoreSys
|
||||||
from supervisor.docker.const import ContainerState
|
from supervisor.docker.const import ContainerState
|
||||||
from supervisor.docker.interface import DockerInterface
|
from supervisor.docker.interface import DOCKER_HUB, DockerInterface
|
||||||
from supervisor.docker.manager import PullLogEntry, PullProgressDetail
|
from supervisor.docker.manager import PullLogEntry, PullProgressDetail
|
||||||
from supervisor.docker.monitor import DockerContainerStateEvent
|
from supervisor.docker.monitor import DockerContainerStateEvent
|
||||||
from supervisor.exceptions import (
|
from supervisor.exceptions import (
|
||||||
@@ -26,9 +26,12 @@ from supervisor.exceptions import (
|
|||||||
DockerNotFound,
|
DockerNotFound,
|
||||||
DockerRequestError,
|
DockerRequestError,
|
||||||
)
|
)
|
||||||
from supervisor.jobs import JobSchedulerOptions, SupervisorJob
|
from supervisor.homeassistant.const import WSEvent, WSType
|
||||||
|
from supervisor.jobs import ChildJobSyncFilter, JobSchedulerOptions, SupervisorJob
|
||||||
|
from supervisor.jobs.decorator import Job
|
||||||
|
from supervisor.supervisor import Supervisor
|
||||||
|
|
||||||
from tests.common import load_json_fixture
|
from tests.common import AsyncIterator, load_json_fixture
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@@ -48,35 +51,68 @@ async def test_docker_image_platform(
|
|||||||
platform: str,
|
platform: str,
|
||||||
):
|
):
|
||||||
"""Test platform set correctly from arch."""
|
"""Test platform set correctly from arch."""
|
||||||
with patch.object(
|
coresys.docker.images.inspect.return_value = {"Id": "test:1.2.3"}
|
||||||
coresys.docker.images, "get", return_value=Mock(id="test:1.2.3")
|
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test", arch=cpu_arch)
|
||||||
) as get:
|
coresys.docker.images.pull.assert_called_once_with(
|
||||||
await test_docker_interface.install(
|
"test", tag="1.2.3", platform=platform, auth=None, stream=True
|
||||||
AwesomeVersion("1.2.3"), "test", arch=cpu_arch
|
)
|
||||||
)
|
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||||
coresys.docker.docker.api.pull.assert_called_once_with(
|
|
||||||
"test", tag="1.2.3", platform=platform, stream=True, decode=True
|
|
||||||
)
|
|
||||||
get.assert_called_once_with("test:1.2.3")
|
|
||||||
|
|
||||||
|
|
||||||
async def test_docker_image_default_platform(
|
async def test_docker_image_default_platform(
|
||||||
coresys: CoreSys, test_docker_interface: DockerInterface
|
coresys: CoreSys, test_docker_interface: DockerInterface
|
||||||
):
|
):
|
||||||
"""Test platform set using supervisor arch when omitted."""
|
"""Test platform set using supervisor arch when omitted."""
|
||||||
|
coresys.docker.images.inspect.return_value = {"Id": "test:1.2.3"}
|
||||||
with (
|
with (
|
||||||
patch.object(
|
patch.object(
|
||||||
type(coresys.supervisor), "arch", PropertyMock(return_value="i386")
|
type(coresys.supervisor), "arch", PropertyMock(return_value="i386")
|
||||||
),
|
),
|
||||||
patch.object(
|
|
||||||
coresys.docker.images, "get", return_value=Mock(id="test:1.2.3")
|
|
||||||
) as get,
|
|
||||||
):
|
):
|
||||||
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
|
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
|
||||||
coresys.docker.docker.api.pull.assert_called_once_with(
|
coresys.docker.images.pull.assert_called_once_with(
|
||||||
"test", tag="1.2.3", platform="linux/386", stream=True, decode=True
|
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
||||||
)
|
)
|
||||||
get.assert_called_once_with("test:1.2.3")
|
|
||||||
|
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"image,registry_key",
|
||||||
|
[
|
||||||
|
("homeassistant/amd64-supervisor", DOCKER_HUB),
|
||||||
|
("ghcr.io/home-assistant/amd64-supervisor", "ghcr.io"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
async def test_private_registry_credentials_passed_to_pull(
|
||||||
|
coresys: CoreSys,
|
||||||
|
test_docker_interface: DockerInterface,
|
||||||
|
image: str,
|
||||||
|
registry_key: str,
|
||||||
|
):
|
||||||
|
"""Test credentials for private registries are passed to aiodocker pull."""
|
||||||
|
coresys.docker.images.inspect.return_value = {"Id": f"{image}:1.2.3"}
|
||||||
|
|
||||||
|
# Configure registry credentials
|
||||||
|
coresys.docker.config._data["registries"] = { # pylint: disable=protected-access
|
||||||
|
registry_key: {"username": "testuser", "password": "testpass"}
|
||||||
|
}
|
||||||
|
|
||||||
|
with patch.object(
|
||||||
|
type(coresys.supervisor), "arch", PropertyMock(return_value="amd64")
|
||||||
|
):
|
||||||
|
await test_docker_interface.install(
|
||||||
|
AwesomeVersion("1.2.3"), image, arch=CpuArch.AMD64
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify credentials were passed to aiodocker
|
||||||
|
expected_auth = {"username": "testuser", "password": "testpass"}
|
||||||
|
if registry_key != DOCKER_HUB:
|
||||||
|
expected_auth["registry"] = registry_key
|
||||||
|
|
||||||
|
coresys.docker.images.pull.assert_called_once_with(
|
||||||
|
image, tag="1.2.3", platform="linux/amd64", auth=expected_auth, stream=True
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@@ -207,57 +243,40 @@ async def test_attach_existing_container(
|
|||||||
|
|
||||||
async def test_attach_container_failure(coresys: CoreSys):
|
async def test_attach_container_failure(coresys: CoreSys):
|
||||||
"""Test attach fails to find container but finds image."""
|
"""Test attach fails to find container but finds image."""
|
||||||
container_collection = MagicMock()
|
coresys.docker.containers.get.side_effect = DockerException()
|
||||||
container_collection.get.side_effect = DockerException()
|
coresys.docker.images.inspect.return_value.setdefault("Config", {})["Image"] = (
|
||||||
image_collection = MagicMock()
|
"sha256:abc123"
|
||||||
image_config = {"Image": "sha256:abc123"}
|
)
|
||||||
image_collection.get.return_value = Image({"Config": image_config})
|
with patch.object(type(coresys.bus), "fire_event") as fire_event:
|
||||||
with (
|
|
||||||
patch(
|
|
||||||
"supervisor.docker.manager.DockerAPI.containers",
|
|
||||||
new=PropertyMock(return_value=container_collection),
|
|
||||||
),
|
|
||||||
patch(
|
|
||||||
"supervisor.docker.manager.DockerAPI.images",
|
|
||||||
new=PropertyMock(return_value=image_collection),
|
|
||||||
),
|
|
||||||
patch.object(type(coresys.bus), "fire_event") as fire_event,
|
|
||||||
):
|
|
||||||
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))
|
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))
|
||||||
assert not [
|
assert not [
|
||||||
event
|
event
|
||||||
for event in fire_event.call_args_list
|
for event in fire_event.call_args_list
|
||||||
if event.args[0] == BusEvent.DOCKER_CONTAINER_STATE_CHANGE
|
if event.args[0] == BusEvent.DOCKER_CONTAINER_STATE_CHANGE
|
||||||
]
|
]
|
||||||
assert coresys.homeassistant.core.instance.meta_config == image_config
|
assert (
|
||||||
|
coresys.homeassistant.core.instance.meta_config["Image"] == "sha256:abc123"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
async def test_attach_total_failure(coresys: CoreSys):
|
async def test_attach_total_failure(coresys: CoreSys):
|
||||||
"""Test attach fails to find container or image."""
|
"""Test attach fails to find container or image."""
|
||||||
container_collection = MagicMock()
|
coresys.docker.containers.get.side_effect = DockerException
|
||||||
container_collection.get.side_effect = DockerException()
|
coresys.docker.images.inspect.side_effect = aiodocker.DockerError(
|
||||||
image_collection = MagicMock()
|
400, {"message": ""}
|
||||||
image_collection.get.side_effect = DockerException()
|
)
|
||||||
with (
|
with pytest.raises(DockerError):
|
||||||
patch(
|
|
||||||
"supervisor.docker.manager.DockerAPI.containers",
|
|
||||||
new=PropertyMock(return_value=container_collection),
|
|
||||||
),
|
|
||||||
patch(
|
|
||||||
"supervisor.docker.manager.DockerAPI.images",
|
|
||||||
new=PropertyMock(return_value=image_collection),
|
|
||||||
),
|
|
||||||
pytest.raises(DockerError),
|
|
||||||
):
|
|
||||||
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))
|
await coresys.homeassistant.core.instance.attach(AwesomeVersion("2022.7.3"))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("err", [DockerException(), RequestException()])
|
@pytest.mark.parametrize(
|
||||||
|
"err", [aiodocker.DockerError(400, {"message": ""}), RequestException()]
|
||||||
|
)
|
||||||
async def test_image_pull_fail(
|
async def test_image_pull_fail(
|
||||||
coresys: CoreSys, capture_exception: Mock, err: Exception
|
coresys: CoreSys, capture_exception: Mock, err: Exception
|
||||||
):
|
):
|
||||||
"""Test failure to pull image."""
|
"""Test failure to pull image."""
|
||||||
coresys.docker.images.get.side_effect = err
|
coresys.docker.images.inspect.side_effect = err
|
||||||
with pytest.raises(DockerError):
|
with pytest.raises(DockerError):
|
||||||
await coresys.homeassistant.core.instance.install(
|
await coresys.homeassistant.core.instance.install(
|
||||||
AwesomeVersion("2022.7.3"), arch=CpuArch.AMD64
|
AwesomeVersion("2022.7.3"), arch=CpuArch.AMD64
|
||||||
@@ -289,15 +308,16 @@ async def test_install_fires_progress_events(
|
|||||||
coresys: CoreSys, test_docker_interface: DockerInterface
|
coresys: CoreSys, test_docker_interface: DockerInterface
|
||||||
):
|
):
|
||||||
"""Test progress events are fired during an install for listeners."""
|
"""Test progress events are fired during an install for listeners."""
|
||||||
|
|
||||||
# This is from a sample pull. Filtered log to just one per unique status for test
|
# This is from a sample pull. Filtered log to just one per unique status for test
|
||||||
coresys.docker.docker.api.pull.return_value = [
|
logs = [
|
||||||
{
|
{
|
||||||
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
|
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
|
||||||
"id": "2025.7.2",
|
"id": "2025.7.2",
|
||||||
},
|
},
|
||||||
{"status": "Already exists", "progressDetail": {}, "id": "6e771e15690e"},
|
{"status": "Already exists", "progressDetail": {}, "id": "6e771e15690e"},
|
||||||
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1578b14a573c"},
|
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1578b14a573c"},
|
||||||
{"status": "Waiting", "progressDetail": {}, "id": "2488d0e401e1"},
|
{"status": "Waiting", "progressDetail": {}, "id": "1578b14a573c"},
|
||||||
{
|
{
|
||||||
"status": "Downloading",
|
"status": "Downloading",
|
||||||
"progressDetail": {"current": 1378, "total": 1486},
|
"progressDetail": {"current": 1378, "total": 1486},
|
||||||
@@ -312,7 +332,11 @@ async def test_install_fires_progress_events(
|
|||||||
"id": "1578b14a573c",
|
"id": "1578b14a573c",
|
||||||
},
|
},
|
||||||
{"status": "Pull complete", "progressDetail": {}, "id": "1578b14a573c"},
|
{"status": "Pull complete", "progressDetail": {}, "id": "1578b14a573c"},
|
||||||
{"status": "Verifying Checksum", "progressDetail": {}, "id": "6a1e931d8f88"},
|
{
|
||||||
|
"status": "Verifying Checksum",
|
||||||
|
"progressDetail": {},
|
||||||
|
"id": "6a1e931d8f88",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"status": "Digest: sha256:490080d7da0f385928022927990e04f604615f7b8c622ef3e58253d0f089881d"
|
"status": "Digest: sha256:490080d7da0f385928022927990e04f604615f7b8c622ef3e58253d0f089881d"
|
||||||
},
|
},
|
||||||
@@ -320,6 +344,7 @@ async def test_install_fires_progress_events(
|
|||||||
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/odroid-n2-homeassistant:2025.7.2"
|
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/odroid-n2-homeassistant:2025.7.2"
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
||||||
|
|
||||||
events: list[PullLogEntry] = []
|
events: list[PullLogEntry] = []
|
||||||
|
|
||||||
@@ -334,10 +359,10 @@ async def test_install_fires_progress_events(
|
|||||||
),
|
),
|
||||||
):
|
):
|
||||||
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
|
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
|
||||||
coresys.docker.docker.api.pull.assert_called_once_with(
|
coresys.docker.images.pull.assert_called_once_with(
|
||||||
"test", tag="1.2.3", platform="linux/386", stream=True, decode=True
|
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
||||||
)
|
)
|
||||||
coresys.docker.images.get.assert_called_once_with("test:1.2.3")
|
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||||
|
|
||||||
await asyncio.sleep(1)
|
await asyncio.sleep(1)
|
||||||
assert events == [
|
assert events == [
|
||||||
@@ -362,7 +387,7 @@ async def test_install_fires_progress_events(
|
|||||||
job_id=ANY,
|
job_id=ANY,
|
||||||
status="Waiting",
|
status="Waiting",
|
||||||
progress_detail=PullProgressDetail(),
|
progress_detail=PullProgressDetail(),
|
||||||
id="2488d0e401e1",
|
id="1578b14a573c",
|
||||||
),
|
),
|
||||||
PullLogEntry(
|
PullLogEntry(
|
||||||
job_id=ANY,
|
job_id=ANY,
|
||||||
@@ -415,10 +440,11 @@ async def test_install_progress_rounding_does_not_cause_misses(
|
|||||||
):
|
):
|
||||||
"""Test extremely close progress events do not create rounding issues."""
|
"""Test extremely close progress events do not create rounding issues."""
|
||||||
coresys.core.set_state(CoreState.RUNNING)
|
coresys.core.set_state(CoreState.RUNNING)
|
||||||
|
|
||||||
# Current numbers chosen to create a rounding issue with original code
|
# Current numbers chosen to create a rounding issue with original code
|
||||||
# Where a progress update came in with a value between the actual previous
|
# Where a progress update came in with a value between the actual previous
|
||||||
# value and what it was rounded to. It should not raise an out of order exception
|
# value and what it was rounded to. It should not raise an out of order exception
|
||||||
coresys.docker.docker.api.pull.return_value = [
|
logs = [
|
||||||
{
|
{
|
||||||
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
|
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
|
||||||
"id": "2025.7.1",
|
"id": "2025.7.1",
|
||||||
@@ -458,29 +484,25 @@ async def test_install_progress_rounding_does_not_cause_misses(
|
|||||||
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/odroid-n2-homeassistant:2025.7.1"
|
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/odroid-n2-homeassistant:2025.7.1"
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
||||||
|
|
||||||
with (
|
# Schedule job so we can listen for the end. Then we can assert against the WS mock
|
||||||
patch.object(
|
event = asyncio.Event()
|
||||||
type(coresys.supervisor), "arch", PropertyMock(return_value="i386")
|
job, install_task = coresys.jobs.schedule_job(
|
||||||
),
|
test_docker_interface.install,
|
||||||
):
|
JobSchedulerOptions(),
|
||||||
# Schedule job so we can listen for the end. Then we can assert against the WS mock
|
AwesomeVersion("1.2.3"),
|
||||||
event = asyncio.Event()
|
"test",
|
||||||
job, install_task = coresys.jobs.schedule_job(
|
)
|
||||||
test_docker_interface.install,
|
|
||||||
JobSchedulerOptions(),
|
|
||||||
AwesomeVersion("1.2.3"),
|
|
||||||
"test",
|
|
||||||
)
|
|
||||||
|
|
||||||
async def listen_for_job_end(reference: SupervisorJob):
|
async def listen_for_job_end(reference: SupervisorJob):
|
||||||
if reference.uuid != job.uuid:
|
if reference.uuid != job.uuid:
|
||||||
return
|
return
|
||||||
event.set()
|
event.set()
|
||||||
|
|
||||||
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
|
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
|
||||||
await install_task
|
await install_task
|
||||||
await event.wait()
|
await event.wait()
|
||||||
|
|
||||||
capture_exception.assert_not_called()
|
capture_exception.assert_not_called()
|
||||||
|
|
||||||
@@ -513,11 +535,13 @@ async def test_install_raises_on_pull_error(
|
|||||||
exc_msg: str,
|
exc_msg: str,
|
||||||
):
|
):
|
||||||
"""Test exceptions raised from errors in pull log."""
|
"""Test exceptions raised from errors in pull log."""
|
||||||
coresys.docker.docker.api.pull.return_value = [
|
|
||||||
|
logs = [
|
||||||
{
|
{
|
||||||
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
|
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
|
||||||
"id": "2025.7.2",
|
"id": "2025.7.2",
|
||||||
},
|
},
|
||||||
|
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1578b14a573c"},
|
||||||
{
|
{
|
||||||
"status": "Downloading",
|
"status": "Downloading",
|
||||||
"progressDetail": {"current": 1378, "total": 1486},
|
"progressDetail": {"current": 1378, "total": 1486},
|
||||||
@@ -526,6 +550,7 @@ async def test_install_raises_on_pull_error(
|
|||||||
},
|
},
|
||||||
error_log,
|
error_log,
|
||||||
]
|
]
|
||||||
|
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
||||||
|
|
||||||
with pytest.raises(exc_type, match=exc_msg):
|
with pytest.raises(exc_type, match=exc_msg):
|
||||||
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
|
await test_docker_interface.install(AwesomeVersion("1.2.3"), "test")
|
||||||
@@ -539,11 +564,11 @@ async def test_install_progress_handles_download_restart(
|
|||||||
):
|
):
|
||||||
"""Test install handles docker progress events that include a download restart."""
|
"""Test install handles docker progress events that include a download restart."""
|
||||||
coresys.core.set_state(CoreState.RUNNING)
|
coresys.core.set_state(CoreState.RUNNING)
|
||||||
|
|
||||||
# Fixture emulates a download restart as it docker logs it
|
# Fixture emulates a download restart as it docker logs it
|
||||||
# A log out of order exception should not be raised
|
# A log out of order exception should not be raised
|
||||||
coresys.docker.docker.api.pull.return_value = load_json_fixture(
|
logs = load_json_fixture("docker_pull_image_log_restart.json")
|
||||||
"docker_pull_image_log_restart.json"
|
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
||||||
)
|
|
||||||
|
|
||||||
with (
|
with (
|
||||||
patch.object(
|
patch.object(
|
||||||
@@ -571,22 +596,45 @@ async def test_install_progress_handles_download_restart(
|
|||||||
capture_exception.assert_not_called()
|
capture_exception.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"extract_log",
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": {"current": 96, "total": 96},
|
||||||
|
"progress": "[==================================================>] 96B/96B",
|
||||||
|
"id": "02a6e69d8d00",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": {"current": 1, "units": "s"},
|
||||||
|
"progress": "1 s",
|
||||||
|
"id": "02a6e69d8d00",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
ids=["normal_extract_log", "containerd_snapshot_extract_log"],
|
||||||
|
)
|
||||||
async def test_install_progress_handles_layers_skipping_download(
|
async def test_install_progress_handles_layers_skipping_download(
|
||||||
coresys: CoreSys,
|
coresys: CoreSys,
|
||||||
test_docker_interface: DockerInterface,
|
test_docker_interface: DockerInterface,
|
||||||
capture_exception: Mock,
|
capture_exception: Mock,
|
||||||
|
extract_log: dict[str, Any],
|
||||||
):
|
):
|
||||||
"""Test install handles small layers that skip downloading phase and go directly to download complete.
|
"""Test install handles small layers that skip downloading phase and go directly to download complete.
|
||||||
|
|
||||||
Reproduces the real-world scenario from Supervisor issue #6286:
|
Reproduces the real-world scenario from Supervisor issue #6286:
|
||||||
- Small layer (02a6e69d8d00) completes Download complete at 10:14:08 without ever Downloading
|
- Small layer (02a6e69d8d00) completes Download complete at 10:14:08 without ever Downloading
|
||||||
- Normal layer (3f4a84073184) starts Downloading at 10:14:09 with progress updates
|
- Normal layer (3f4a84073184) starts Downloading at 10:14:09 with progress updates
|
||||||
|
|
||||||
|
Under containerd snapshotter this presumably can still occur and Supervisor will have even less info
|
||||||
|
since extract logs don't have a total. Supervisor should generally just ignore these and set progress
|
||||||
|
from the larger images that take all the time.
|
||||||
"""
|
"""
|
||||||
coresys.core.set_state(CoreState.RUNNING)
|
coresys.core.set_state(CoreState.RUNNING)
|
||||||
|
|
||||||
# Reproduce EXACT sequence from SupervisorNoUpdateProgressLogs.txt:
|
# Reproduce EXACT sequence from SupervisorNoUpdateProgressLogs.txt:
|
||||||
# Small layer (02a6e69d8d00) completes BEFORE normal layer (3f4a84073184) starts downloading
|
# Small layer (02a6e69d8d00) completes BEFORE normal layer (3f4a84073184) starts downloading
|
||||||
coresys.docker.docker.api.pull.return_value = [
|
logs = [
|
||||||
{"status": "Pulling from test/image", "id": "latest"},
|
{"status": "Pulling from test/image", "id": "latest"},
|
||||||
# Small layer that skips downloading (02a6e69d8d00 in logs, 96 bytes)
|
# Small layer that skips downloading (02a6e69d8d00 in logs, 96 bytes)
|
||||||
{"status": "Pulling fs layer", "progressDetail": {}, "id": "02a6e69d8d00"},
|
{"status": "Pulling fs layer", "progressDetail": {}, "id": "02a6e69d8d00"},
|
||||||
@@ -624,16 +672,12 @@ async def test_install_progress_handles_layers_skipping_download(
|
|||||||
},
|
},
|
||||||
{"status": "Pull complete", "progressDetail": {}, "id": "3f4a84073184"},
|
{"status": "Pull complete", "progressDetail": {}, "id": "3f4a84073184"},
|
||||||
# Small layer finally extracts (10:14:58 in logs)
|
# Small layer finally extracts (10:14:58 in logs)
|
||||||
{
|
extract_log,
|
||||||
"status": "Extracting",
|
|
||||||
"progressDetail": {"current": 96, "total": 96},
|
|
||||||
"progress": "[==================================================>] 96B/96B",
|
|
||||||
"id": "02a6e69d8d00",
|
|
||||||
},
|
|
||||||
{"status": "Pull complete", "progressDetail": {}, "id": "02a6e69d8d00"},
|
{"status": "Pull complete", "progressDetail": {}, "id": "02a6e69d8d00"},
|
||||||
{"status": "Digest: sha256:test"},
|
{"status": "Digest: sha256:test"},
|
||||||
{"status": "Status: Downloaded newer image for test/image:latest"},
|
{"status": "Status: Downloaded newer image for test/image:latest"},
|
||||||
]
|
]
|
||||||
|
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
||||||
|
|
||||||
# Capture immutable snapshots of install job progress using job.as_dict()
|
# Capture immutable snapshots of install job progress using job.as_dict()
|
||||||
# This solves the mutable object problem - we snapshot state at call time
|
# This solves the mutable object problem - we snapshot state at call time
|
||||||
@@ -665,13 +709,243 @@ async def test_install_progress_handles_layers_skipping_download(
|
|||||||
await install_task
|
await install_task
|
||||||
await event.wait()
|
await event.wait()
|
||||||
|
|
||||||
# First update from layer download should have rather low progress ((260937/25445459) / 2 ~ 0.5%)
|
# First update from layer download should have rather low progress ((260937/25371463) ~= 1%)
|
||||||
assert install_job_snapshots[0]["progress"] < 1
|
assert install_job_snapshots[0]["progress"] < 2
|
||||||
|
|
||||||
# Total 8 events should lead to a progress update on the install job
|
# Total 7 events should lead to a progress update on the install job:
|
||||||
assert len(install_job_snapshots) == 8
|
# 3 Downloading events + Download complete (70%) + Extracting + Pull complete (100%) + stage change
|
||||||
|
# Note: The small placeholder layer ({1,1}) is excluded from progress calculation
|
||||||
|
assert len(install_job_snapshots) == 7
|
||||||
|
|
||||||
# Job should complete successfully
|
# Job should complete successfully
|
||||||
assert job.done is True
|
assert job.done is True
|
||||||
assert job.progress == 100
|
assert job.progress == 100
|
||||||
capture_exception.assert_not_called()
|
capture_exception.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
|
async def test_missing_total_handled_gracefully(
|
||||||
|
coresys: CoreSys,
|
||||||
|
test_docker_interface: DockerInterface,
|
||||||
|
ha_ws_client: AsyncMock,
|
||||||
|
capture_exception: Mock,
|
||||||
|
):
|
||||||
|
"""Test missing 'total' fields in progress details handled gracefully."""
|
||||||
|
coresys.core.set_state(CoreState.RUNNING)
|
||||||
|
|
||||||
|
# Progress details with missing 'total' fields observed in real-world pulls
|
||||||
|
logs = [
|
||||||
|
{
|
||||||
|
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
|
||||||
|
"id": "2025.7.1",
|
||||||
|
},
|
||||||
|
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1e214cd6d7d0"},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": {"current": 436480882},
|
||||||
|
"progress": "[===================================================] 436.5MB/436.5MB",
|
||||||
|
"id": "1e214cd6d7d0",
|
||||||
|
},
|
||||||
|
{"status": "Verifying Checksum", "progressDetail": {}, "id": "1e214cd6d7d0"},
|
||||||
|
{"status": "Download complete", "progressDetail": {}, "id": "1e214cd6d7d0"},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": {"current": 436480882},
|
||||||
|
"progress": "[===================================================] 436.5MB/436.5MB",
|
||||||
|
"id": "1e214cd6d7d0",
|
||||||
|
},
|
||||||
|
{"status": "Pull complete", "progressDetail": {}, "id": "1e214cd6d7d0"},
|
||||||
|
{
|
||||||
|
"status": "Digest: sha256:7d97da645f232f82a768d0a537e452536719d56d484d419836e53dbe3e4ec736"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/odroid-n2-homeassistant:2025.7.1"
|
||||||
|
},
|
||||||
|
]
|
||||||
|
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
||||||
|
|
||||||
|
# Schedule job so we can listen for the end. Then we can assert against the WS mock
|
||||||
|
event = asyncio.Event()
|
||||||
|
job, install_task = coresys.jobs.schedule_job(
|
||||||
|
test_docker_interface.install,
|
||||||
|
JobSchedulerOptions(),
|
||||||
|
AwesomeVersion("1.2.3"),
|
||||||
|
"test",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def listen_for_job_end(reference: SupervisorJob):
|
||||||
|
if reference.uuid != job.uuid:
|
||||||
|
return
|
||||||
|
event.set()
|
||||||
|
|
||||||
|
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
|
||||||
|
await install_task
|
||||||
|
await event.wait()
|
||||||
|
|
||||||
|
capture_exception.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
|
async def test_install_progress_containerd_snapshot(
|
||||||
|
coresys: CoreSys, ha_ws_client: AsyncMock
|
||||||
|
):
|
||||||
|
"""Test install handles docker progress events using containerd snapshotter."""
|
||||||
|
coresys.core.set_state(CoreState.RUNNING)
|
||||||
|
|
||||||
|
class TestDockerInterface(DockerInterface):
|
||||||
|
"""Test interface for events."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self) -> str:
|
||||||
|
"""Name of test interface."""
|
||||||
|
return "test_interface"
|
||||||
|
|
||||||
|
@Job(
|
||||||
|
name="mock_docker_interface_install",
|
||||||
|
child_job_syncs=[
|
||||||
|
ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
async def mock_install(self) -> None:
|
||||||
|
"""Mock install."""
|
||||||
|
await super().install(
|
||||||
|
AwesomeVersion("1.2.3"), image="test", arch=CpuArch.I386
|
||||||
|
)
|
||||||
|
|
||||||
|
# Fixture emulates log as received when using containerd snapshotter
|
||||||
|
# Should not error but progress gets choppier once extraction starts
|
||||||
|
logs = load_json_fixture("docker_pull_image_log_containerd_snapshot.json")
|
||||||
|
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
||||||
|
test_docker_interface = TestDockerInterface(coresys)
|
||||||
|
|
||||||
|
with patch.object(Supervisor, "arch", PropertyMock(return_value="i386")):
|
||||||
|
await test_docker_interface.mock_install()
|
||||||
|
coresys.docker.images.pull.assert_called_once_with(
|
||||||
|
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
|
||||||
|
)
|
||||||
|
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
|
||||||
|
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
def job_event(progress: float, done: bool = False):
|
||||||
|
return {
|
||||||
|
"type": WSType.SUPERVISOR_EVENT,
|
||||||
|
"data": {
|
||||||
|
"event": WSEvent.JOB,
|
||||||
|
"data": {
|
||||||
|
"name": "mock_docker_interface_install",
|
||||||
|
"reference": "test_interface",
|
||||||
|
"uuid": ANY,
|
||||||
|
"progress": progress,
|
||||||
|
"stage": None,
|
||||||
|
"done": done,
|
||||||
|
"parent_id": None,
|
||||||
|
"errors": [],
|
||||||
|
"created": ANY,
|
||||||
|
"extra": None,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get progress values from the events
|
||||||
|
job_events = [
|
||||||
|
c.args[0]
|
||||||
|
for c in ha_ws_client.async_send_command.call_args_list
|
||||||
|
if c.args[0].get("data", {}).get("event") == WSEvent.JOB
|
||||||
|
and c.args[0].get("data", {}).get("data", {}).get("name")
|
||||||
|
== "mock_docker_interface_install"
|
||||||
|
]
|
||||||
|
progress_values = [e["data"]["data"]["progress"] for e in job_events]
|
||||||
|
|
||||||
|
# Should have multiple progress updates (not just 0 and 100)
|
||||||
|
assert len(progress_values) >= 10, (
|
||||||
|
f"Expected >=10 progress updates, got {len(progress_values)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Progress should be monotonically increasing
|
||||||
|
for i in range(1, len(progress_values)):
|
||||||
|
assert progress_values[i] >= progress_values[i - 1], (
|
||||||
|
f"Progress decreased at index {i}: {progress_values[i - 1]} -> {progress_values[i]}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Should start at 0 and end at 100
|
||||||
|
assert progress_values[0] == 0
|
||||||
|
assert progress_values[-1] == 100
|
||||||
|
|
||||||
|
# Should have progress values in the downloading phase (< 70%)
|
||||||
|
# Note: with layer scaling, early progress may be lower than before
|
||||||
|
downloading_progress = [p for p in progress_values if 0 < p < 70]
|
||||||
|
assert len(downloading_progress) > 0, (
|
||||||
|
"Expected progress updates during downloading phase"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def test_install_progress_containerd_snapshotter_real_world(
|
||||||
|
coresys: CoreSys, ha_ws_client: AsyncMock
|
||||||
|
):
|
||||||
|
"""Test install handles real-world containerd snapshotter events.
|
||||||
|
|
||||||
|
This test uses real pull events captured from a Home Assistant Core update
|
||||||
|
where some layers skip the Downloading phase entirely (going directly from
|
||||||
|
"Pulling fs layer" to "Download complete"). This causes the bug where progress
|
||||||
|
jumps from 0 to 100 without intermediate updates.
|
||||||
|
|
||||||
|
Root cause: _update_install_job_status() returns early if ANY layer has
|
||||||
|
extra=None. Layers that skip Downloading don't get extra until Download complete,
|
||||||
|
so progress cannot be calculated until ALL layers reach Download complete.
|
||||||
|
"""
|
||||||
|
coresys.core.set_state(CoreState.RUNNING)
|
||||||
|
|
||||||
|
class TestDockerInterface(DockerInterface):
|
||||||
|
"""Test interface for events."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self) -> str:
|
||||||
|
"""Name of test interface."""
|
||||||
|
return "test_interface"
|
||||||
|
|
||||||
|
@Job(
|
||||||
|
name="mock_docker_interface_install_realworld",
|
||||||
|
child_job_syncs=[
|
||||||
|
ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
async def mock_install(self) -> None:
|
||||||
|
"""Mock install."""
|
||||||
|
await super().install(
|
||||||
|
AwesomeVersion("1.2.3"), image="test", arch=CpuArch.I386
|
||||||
|
)
|
||||||
|
|
||||||
|
# Real-world fixture: 12 layers, 262 Downloading events
|
||||||
|
# Some layers skip Downloading entirely (small layers with containerd snapshotter)
|
||||||
|
logs = load_json_fixture("docker_pull_image_log_containerd_snapshotter_real.json")
|
||||||
|
coresys.docker.images.pull.return_value = AsyncIterator(logs)
|
||||||
|
test_docker_interface = TestDockerInterface(coresys)
|
||||||
|
|
||||||
|
with patch.object(Supervisor, "arch", PropertyMock(return_value="i386")):
|
||||||
|
await test_docker_interface.mock_install()
|
||||||
|
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
# Get progress events for the parent job (what UI sees)
|
||||||
|
job_events = [
|
||||||
|
c.args[0]
|
||||||
|
for c in ha_ws_client.async_send_command.call_args_list
|
||||||
|
if c.args[0].get("data", {}).get("event") == WSEvent.JOB
|
||||||
|
and c.args[0].get("data", {}).get("data", {}).get("name")
|
||||||
|
== "mock_docker_interface_install_realworld"
|
||||||
|
]
|
||||||
|
progress_values = [e["data"]["data"]["progress"] for e in job_events]
|
||||||
|
|
||||||
|
# We should have intermediate progress updates, not just 0 and 100
|
||||||
|
assert len(progress_values) > 3, (
|
||||||
|
f"BUG: Progress jumped 0->100 without intermediate updates. "
|
||||||
|
f"Got {len(progress_values)} updates: {progress_values}. "
|
||||||
|
f"Expected intermediate progress during the 262 Downloading events."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Progress should be monotonically increasing
|
||||||
|
for i in range(1, len(progress_values)):
|
||||||
|
assert progress_values[i] >= progress_values[i - 1]
|
||||||
|
|
||||||
|
# Should see progress in downloading phase (0-70%)
|
||||||
|
downloading_progress = [p for p in progress_values if 0 < p < 70]
|
||||||
|
assert len(downloading_progress) > 0
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
"""Test Docker manager."""
|
"""Test Docker manager."""
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
from pathlib import Path
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
from docker.errors import DockerException
|
from docker.errors import APIError, DockerException, NotFound
|
||||||
import pytest
|
import pytest
|
||||||
from requests import RequestException
|
from requests import RequestException
|
||||||
|
|
||||||
@@ -20,7 +21,7 @@ async def test_run_command_success(docker: DockerAPI):
|
|||||||
mock_container.logs.return_value = b"command output"
|
mock_container.logs.return_value = b"command output"
|
||||||
|
|
||||||
# Mock docker containers.run to return our mock container
|
# Mock docker containers.run to return our mock container
|
||||||
docker.docker.containers.run.return_value = mock_container
|
docker.dockerpy.containers.run.return_value = mock_container
|
||||||
|
|
||||||
# Execute the command
|
# Execute the command
|
||||||
result = docker.run_command(
|
result = docker.run_command(
|
||||||
@@ -33,7 +34,7 @@ async def test_run_command_success(docker: DockerAPI):
|
|||||||
assert result.output == b"command output"
|
assert result.output == b"command output"
|
||||||
|
|
||||||
# Verify docker.containers.run was called correctly
|
# Verify docker.containers.run was called correctly
|
||||||
docker.docker.containers.run.assert_called_once_with(
|
docker.dockerpy.containers.run.assert_called_once_with(
|
||||||
"alpine:3.18",
|
"alpine:3.18",
|
||||||
command="echo hello",
|
command="echo hello",
|
||||||
detach=True,
|
detach=True,
|
||||||
@@ -55,7 +56,7 @@ async def test_run_command_with_defaults(docker: DockerAPI):
|
|||||||
mock_container.logs.return_value = b"error output"
|
mock_container.logs.return_value = b"error output"
|
||||||
|
|
||||||
# Mock docker containers.run to return our mock container
|
# Mock docker containers.run to return our mock container
|
||||||
docker.docker.containers.run.return_value = mock_container
|
docker.dockerpy.containers.run.return_value = mock_container
|
||||||
|
|
||||||
# Execute the command with minimal parameters
|
# Execute the command with minimal parameters
|
||||||
result = docker.run_command(image="ubuntu")
|
result = docker.run_command(image="ubuntu")
|
||||||
@@ -66,7 +67,7 @@ async def test_run_command_with_defaults(docker: DockerAPI):
|
|||||||
assert result.output == b"error output"
|
assert result.output == b"error output"
|
||||||
|
|
||||||
# Verify docker.containers.run was called with defaults
|
# Verify docker.containers.run was called with defaults
|
||||||
docker.docker.containers.run.assert_called_once_with(
|
docker.dockerpy.containers.run.assert_called_once_with(
|
||||||
"ubuntu:latest", # default tag
|
"ubuntu:latest", # default tag
|
||||||
command=None, # default command
|
command=None, # default command
|
||||||
detach=True,
|
detach=True,
|
||||||
@@ -81,7 +82,7 @@ async def test_run_command_with_defaults(docker: DockerAPI):
|
|||||||
async def test_run_command_docker_exception(docker: DockerAPI):
|
async def test_run_command_docker_exception(docker: DockerAPI):
|
||||||
"""Test command execution when Docker raises an exception."""
|
"""Test command execution when Docker raises an exception."""
|
||||||
# Mock docker containers.run to raise DockerException
|
# Mock docker containers.run to raise DockerException
|
||||||
docker.docker.containers.run.side_effect = DockerException("Docker error")
|
docker.dockerpy.containers.run.side_effect = DockerException("Docker error")
|
||||||
|
|
||||||
# Execute the command and expect DockerError
|
# Execute the command and expect DockerError
|
||||||
with pytest.raises(DockerError, match="Can't execute command: Docker error"):
|
with pytest.raises(DockerError, match="Can't execute command: Docker error"):
|
||||||
@@ -91,7 +92,7 @@ async def test_run_command_docker_exception(docker: DockerAPI):
|
|||||||
async def test_run_command_request_exception(docker: DockerAPI):
|
async def test_run_command_request_exception(docker: DockerAPI):
|
||||||
"""Test command execution when requests raises an exception."""
|
"""Test command execution when requests raises an exception."""
|
||||||
# Mock docker containers.run to raise RequestException
|
# Mock docker containers.run to raise RequestException
|
||||||
docker.docker.containers.run.side_effect = RequestException("Connection error")
|
docker.dockerpy.containers.run.side_effect = RequestException("Connection error")
|
||||||
|
|
||||||
# Execute the command and expect DockerError
|
# Execute the command and expect DockerError
|
||||||
with pytest.raises(DockerError, match="Can't execute command: Connection error"):
|
with pytest.raises(DockerError, match="Can't execute command: Connection error"):
|
||||||
@@ -104,7 +105,7 @@ async def test_run_command_cleanup_on_exception(docker: DockerAPI):
|
|||||||
mock_container = MagicMock()
|
mock_container = MagicMock()
|
||||||
|
|
||||||
# Mock docker.containers.run to return container, but container.wait to raise exception
|
# Mock docker.containers.run to return container, but container.wait to raise exception
|
||||||
docker.docker.containers.run.return_value = mock_container
|
docker.dockerpy.containers.run.return_value = mock_container
|
||||||
mock_container.wait.side_effect = DockerException("Wait failed")
|
mock_container.wait.side_effect = DockerException("Wait failed")
|
||||||
|
|
||||||
# Execute the command and expect DockerError
|
# Execute the command and expect DockerError
|
||||||
@@ -123,7 +124,7 @@ async def test_run_command_custom_stdout_stderr(docker: DockerAPI):
|
|||||||
mock_container.logs.return_value = b"output"
|
mock_container.logs.return_value = b"output"
|
||||||
|
|
||||||
# Mock docker containers.run to return our mock container
|
# Mock docker containers.run to return our mock container
|
||||||
docker.docker.containers.run.return_value = mock_container
|
docker.dockerpy.containers.run.return_value = mock_container
|
||||||
|
|
||||||
# Execute the command with custom stdout/stderr
|
# Execute the command with custom stdout/stderr
|
||||||
result = docker.run_command(
|
result = docker.run_command(
|
||||||
@@ -150,7 +151,7 @@ async def test_run_container_with_cidfile(
|
|||||||
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
|
cidfile_path = coresys.config.path_cid_files / f"{container_name}.cid"
|
||||||
extern_cidfile_path = coresys.config.path_extern_cid_files / f"{container_name}.cid"
|
extern_cidfile_path = coresys.config.path_extern_cid_files / f"{container_name}.cid"
|
||||||
|
|
||||||
docker.docker.containers.run.return_value = mock_container
|
docker.dockerpy.containers.run.return_value = mock_container
|
||||||
|
|
||||||
# Mock container creation
|
# Mock container creation
|
||||||
with patch.object(
|
with patch.object(
|
||||||
@@ -351,3 +352,101 @@ async def test_run_container_with_leftover_cidfile_directory(
|
|||||||
assert cidfile_path.read_text() == mock_container.id
|
assert cidfile_path.read_text() == mock_container.id
|
||||||
|
|
||||||
assert result == mock_container
|
assert result == mock_container
|
||||||
|
|
||||||
|
|
||||||
|
async def test_repair(coresys: CoreSys, caplog: pytest.LogCaptureFixture):
|
||||||
|
"""Test repair API."""
|
||||||
|
coresys.docker.dockerpy.networks.get.side_effect = [
|
||||||
|
hassio := MagicMock(
|
||||||
|
attrs={
|
||||||
|
"Containers": {
|
||||||
|
"good": {"Name": "good"},
|
||||||
|
"corrupt": {"Name": "corrupt"},
|
||||||
|
"fail": {"Name": "fail"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
host := MagicMock(attrs={"Containers": {}}),
|
||||||
|
]
|
||||||
|
coresys.docker.dockerpy.containers.get.side_effect = [
|
||||||
|
MagicMock(),
|
||||||
|
NotFound("corrupt"),
|
||||||
|
DockerException("fail"),
|
||||||
|
]
|
||||||
|
|
||||||
|
await coresys.run_in_executor(coresys.docker.repair)
|
||||||
|
|
||||||
|
coresys.docker.dockerpy.api.prune_containers.assert_called_once()
|
||||||
|
coresys.docker.dockerpy.api.prune_images.assert_called_once_with(
|
||||||
|
filters={"dangling": False}
|
||||||
|
)
|
||||||
|
coresys.docker.dockerpy.api.prune_builds.assert_called_once()
|
||||||
|
coresys.docker.dockerpy.api.prune_volumes.assert_called_once()
|
||||||
|
coresys.docker.dockerpy.api.prune_networks.assert_called_once()
|
||||||
|
hassio.disconnect.assert_called_once_with("corrupt", force=True)
|
||||||
|
host.disconnect.assert_not_called()
|
||||||
|
assert "Docker fatal error on container fail on hassio" in caplog.text
|
||||||
|
|
||||||
|
|
||||||
|
async def test_repair_failures(coresys: CoreSys, caplog: pytest.LogCaptureFixture):
|
||||||
|
"""Test repair proceeds best it can through failures."""
|
||||||
|
coresys.docker.dockerpy.api.prune_containers.side_effect = APIError("fail")
|
||||||
|
coresys.docker.dockerpy.api.prune_images.side_effect = APIError("fail")
|
||||||
|
coresys.docker.dockerpy.api.prune_builds.side_effect = APIError("fail")
|
||||||
|
coresys.docker.dockerpy.api.prune_volumes.side_effect = APIError("fail")
|
||||||
|
coresys.docker.dockerpy.api.prune_networks.side_effect = APIError("fail")
|
||||||
|
coresys.docker.dockerpy.networks.get.side_effect = NotFound("missing")
|
||||||
|
|
||||||
|
await coresys.run_in_executor(coresys.docker.repair)
|
||||||
|
|
||||||
|
assert "Error for containers prune: fail" in caplog.text
|
||||||
|
assert "Error for images prune: fail" in caplog.text
|
||||||
|
assert "Error for builds prune: fail" in caplog.text
|
||||||
|
assert "Error for volumes prune: fail" in caplog.text
|
||||||
|
assert "Error for networks prune: fail" in caplog.text
|
||||||
|
assert "Error for networks hassio prune: missing" in caplog.text
|
||||||
|
assert "Error for networks host prune: missing" in caplog.text
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("log_starter", [("Loaded image ID"), ("Loaded image")])
|
||||||
|
async def test_import_image(coresys: CoreSys, tmp_path: Path, log_starter: str):
|
||||||
|
"""Test importing an image into docker."""
|
||||||
|
(test_tar := tmp_path / "test.tar").touch()
|
||||||
|
coresys.docker.images.import_image.return_value = [
|
||||||
|
{"stream": f"{log_starter}: imported"}
|
||||||
|
]
|
||||||
|
coresys.docker.images.inspect.return_value = {"Id": "imported"}
|
||||||
|
|
||||||
|
image = await coresys.docker.import_image(test_tar)
|
||||||
|
|
||||||
|
assert image["Id"] == "imported"
|
||||||
|
coresys.docker.images.inspect.assert_called_once_with("imported")
|
||||||
|
|
||||||
|
|
||||||
|
async def test_import_image_error(coresys: CoreSys, tmp_path: Path):
|
||||||
|
"""Test failure importing an image into docker."""
|
||||||
|
(test_tar := tmp_path / "test.tar").touch()
|
||||||
|
coresys.docker.images.import_image.return_value = [
|
||||||
|
{"errorDetail": {"message": "fail"}}
|
||||||
|
]
|
||||||
|
|
||||||
|
with pytest.raises(DockerError, match="Can't import image from tar: fail"):
|
||||||
|
await coresys.docker.import_image(test_tar)
|
||||||
|
|
||||||
|
coresys.docker.images.inspect.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
|
async def test_import_multiple_images_in_tar(
|
||||||
|
coresys: CoreSys, tmp_path: Path, caplog: pytest.LogCaptureFixture
|
||||||
|
):
|
||||||
|
"""Test importing an image into docker."""
|
||||||
|
(test_tar := tmp_path / "test.tar").touch()
|
||||||
|
coresys.docker.images.import_image.return_value = [
|
||||||
|
{"stream": "Loaded image: imported-1"},
|
||||||
|
{"stream": "Loaded image: imported-2"},
|
||||||
|
]
|
||||||
|
|
||||||
|
assert await coresys.docker.import_image(test_tar) is None
|
||||||
|
|
||||||
|
assert "Unexpected image count 2 while importing image from tar" in caplog.text
|
||||||
|
coresys.docker.images.inspect.assert_not_called()
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ async def test_events(
|
|||||||
):
|
):
|
||||||
"""Test events created from docker events."""
|
"""Test events created from docker events."""
|
||||||
event["Actor"]["Attributes"]["name"] = "some_container"
|
event["Actor"]["Attributes"]["name"] = "some_container"
|
||||||
event["id"] = "abc123"
|
event["Actor"]["ID"] = "abc123"
|
||||||
event["time"] = 123
|
event["time"] = 123
|
||||||
with (
|
with (
|
||||||
patch(
|
patch(
|
||||||
@@ -131,12 +131,12 @@ async def test_unlabeled_container(coresys: CoreSys):
|
|||||||
new=PropertyMock(
|
new=PropertyMock(
|
||||||
return_value=[
|
return_value=[
|
||||||
{
|
{
|
||||||
"id": "abc123",
|
|
||||||
"time": 123,
|
"time": 123,
|
||||||
"Type": "container",
|
"Type": "container",
|
||||||
"Action": "die",
|
"Action": "die",
|
||||||
"Actor": {
|
"Actor": {
|
||||||
"Attributes": {"name": "homeassistant", "exitCode": "137"}
|
"ID": "abc123",
|
||||||
|
"Attributes": {"name": "homeassistant", "exitCode": "137"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|||||||
196
tests/fixtures/docker_pull_image_log_containerd_snapshot.json
vendored
Normal file
196
tests/fixtures/docker_pull_image_log_containerd_snapshot.json
vendored
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"status": "Pulling from home-assistant/home-assistant",
|
||||||
|
"id": "2025.12.0.dev202511080235"
|
||||||
|
},
|
||||||
|
{ "status": "Pulling fs layer", "progressDetail": {}, "id": "eafecc6b43cc" },
|
||||||
|
{ "status": "Pulling fs layer", "progressDetail": {}, "id": "333270549f95" },
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 1048576, "total": 21863319 },
|
||||||
|
"progress": "[==\u003e ] 1.049MB/21.86MB",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 1048576, "total": 21179924 },
|
||||||
|
"progress": "[==\u003e ] 1.049MB/21.18MB",
|
||||||
|
"id": "333270549f95"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 4194304, "total": 21863319 },
|
||||||
|
"progress": "[=========\u003e ] 4.194MB/21.86MB",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 2097152, "total": 21179924 },
|
||||||
|
"progress": "[====\u003e ] 2.097MB/21.18MB",
|
||||||
|
"id": "333270549f95"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 7340032, "total": 21863319 },
|
||||||
|
"progress": "[================\u003e ] 7.34MB/21.86MB",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 4194304, "total": 21179924 },
|
||||||
|
"progress": "[=========\u003e ] 4.194MB/21.18MB",
|
||||||
|
"id": "333270549f95"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 13631488, "total": 21863319 },
|
||||||
|
"progress": "[===============================\u003e ] 13.63MB/21.86MB",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 8388608, "total": 21179924 },
|
||||||
|
"progress": "[===================\u003e ] 8.389MB/21.18MB",
|
||||||
|
"id": "333270549f95"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 17825792, "total": 21863319 },
|
||||||
|
"progress": "[========================================\u003e ] 17.83MB/21.86MB",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 12582912, "total": 21179924 },
|
||||||
|
"progress": "[=============================\u003e ] 12.58MB/21.18MB",
|
||||||
|
"id": "333270549f95"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 21863319, "total": 21863319 },
|
||||||
|
"progress": "[==================================================\u003e] 21.86MB/21.86MB",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 16777216, "total": 21179924 },
|
||||||
|
"progress": "[=======================================\u003e ] 16.78MB/21.18MB",
|
||||||
|
"id": "333270549f95"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Downloading",
|
||||||
|
"progressDetail": { "current": 21179924, "total": 21179924 },
|
||||||
|
"progress": "[==================================================\u003e] 21.18MB/21.18MB",
|
||||||
|
"id": "333270549f95"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Download complete",
|
||||||
|
"progressDetail": { "hidecounts": true },
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Download complete",
|
||||||
|
"progressDetail": { "hidecounts": true },
|
||||||
|
"id": "333270549f95"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 1, "units": "s" },
|
||||||
|
"progress": "1 s",
|
||||||
|
"id": "333270549f95"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 1, "units": "s" },
|
||||||
|
"progress": "1 s",
|
||||||
|
"id": "333270549f95"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Pull complete",
|
||||||
|
"progressDetail": { "hidecounts": true },
|
||||||
|
"id": "333270549f95"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 1, "units": "s" },
|
||||||
|
"progress": "1 s",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 1, "units": "s" },
|
||||||
|
"progress": "1 s",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 2, "units": "s" },
|
||||||
|
"progress": "2 s",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 2, "units": "s" },
|
||||||
|
"progress": "2 s",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 3, "units": "s" },
|
||||||
|
"progress": "3 s",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 3, "units": "s" },
|
||||||
|
"progress": "3 s",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 4, "units": "s" },
|
||||||
|
"progress": "4 s",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 4, "units": "s" },
|
||||||
|
"progress": "4 s",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 5, "units": "s" },
|
||||||
|
"progress": "5 s",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 5, "units": "s" },
|
||||||
|
"progress": "5 s",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 6, "units": "s" },
|
||||||
|
"progress": "6 s",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Extracting",
|
||||||
|
"progressDetail": { "current": 6, "units": "s" },
|
||||||
|
"progress": "6 s",
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Pull complete",
|
||||||
|
"progressDetail": { "hidecounts": true },
|
||||||
|
"id": "eafecc6b43cc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Digest: sha256:bfc9efc13552c0c228f3d9d35987331cce68b43c9bc79c80a57eeadadd44cccf"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/home-assistant:2025.12.0.dev202511080235"
|
||||||
|
}
|
||||||
|
]
|
||||||
5649
tests/fixtures/docker_pull_image_log_containerd_snapshotter_real.json
vendored
Normal file
5649
tests/fixtures/docker_pull_image_log_containerd_snapshotter_real.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,11 +1,14 @@
|
|||||||
"""Test Home Assistant core."""
|
"""Test Home Assistant core."""
|
||||||
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, patch
|
from http import HTTPStatus
|
||||||
|
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, call, patch
|
||||||
|
|
||||||
|
import aiodocker
|
||||||
from awesomeversion import AwesomeVersion
|
from awesomeversion import AwesomeVersion
|
||||||
from docker.errors import APIError, DockerException, ImageNotFound, NotFound
|
from docker.errors import APIError, DockerException, NotFound
|
||||||
import pytest
|
import pytest
|
||||||
|
from requests import RequestException
|
||||||
from time_machine import travel
|
from time_machine import travel
|
||||||
|
|
||||||
from supervisor.const import CpuArch
|
from supervisor.const import CpuArch
|
||||||
@@ -23,8 +26,12 @@ from supervisor.exceptions import (
|
|||||||
from supervisor.homeassistant.api import APIState
|
from supervisor.homeassistant.api import APIState
|
||||||
from supervisor.homeassistant.core import HomeAssistantCore
|
from supervisor.homeassistant.core import HomeAssistantCore
|
||||||
from supervisor.homeassistant.module import HomeAssistant
|
from supervisor.homeassistant.module import HomeAssistant
|
||||||
|
from supervisor.resolution.const import ContextType, IssueType
|
||||||
|
from supervisor.resolution.data import Issue
|
||||||
from supervisor.updater import Updater
|
from supervisor.updater import Updater
|
||||||
|
|
||||||
|
from tests.common import AsyncIterator
|
||||||
|
|
||||||
|
|
||||||
async def test_update_fails_if_out_of_date(coresys: CoreSys):
|
async def test_update_fails_if_out_of_date(coresys: CoreSys):
|
||||||
"""Test update of Home Assistant fails when supervisor or plugin is out of date."""
|
"""Test update of Home Assistant fails when supervisor or plugin is out of date."""
|
||||||
@@ -52,11 +59,23 @@ async def test_update_fails_if_out_of_date(coresys: CoreSys):
|
|||||||
await coresys.homeassistant.core.update()
|
await coresys.homeassistant.core.update()
|
||||||
|
|
||||||
|
|
||||||
async def test_install_landingpage_docker_error(
|
@pytest.mark.parametrize(
|
||||||
coresys: CoreSys, capture_exception: Mock, caplog: pytest.LogCaptureFixture
|
"err",
|
||||||
|
[
|
||||||
|
aiodocker.DockerError(HTTPStatus.TOO_MANY_REQUESTS, {"message": "ratelimit"}),
|
||||||
|
APIError("ratelimit", MagicMock(status_code=HTTPStatus.TOO_MANY_REQUESTS)),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
async def test_install_landingpage_docker_ratelimit_error(
|
||||||
|
coresys: CoreSys,
|
||||||
|
capture_exception: Mock,
|
||||||
|
caplog: pytest.LogCaptureFixture,
|
||||||
|
err: Exception,
|
||||||
):
|
):
|
||||||
"""Test install landing page fails due to docker error."""
|
"""Test install landing page fails due to docker ratelimit error."""
|
||||||
coresys.security.force = True
|
coresys.security.force = True
|
||||||
|
coresys.docker.images.pull.side_effect = [err, AsyncIterator([{}])]
|
||||||
|
|
||||||
with (
|
with (
|
||||||
patch.object(DockerHomeAssistant, "attach", side_effect=DockerError),
|
patch.object(DockerHomeAssistant, "attach", side_effect=DockerError),
|
||||||
patch.object(
|
patch.object(
|
||||||
@@ -69,19 +88,35 @@ async def test_install_landingpage_docker_error(
|
|||||||
),
|
),
|
||||||
patch("supervisor.homeassistant.core.asyncio.sleep") as sleep,
|
patch("supervisor.homeassistant.core.asyncio.sleep") as sleep,
|
||||||
):
|
):
|
||||||
coresys.docker.images.get.side_effect = [APIError("fail"), MagicMock()]
|
|
||||||
await coresys.homeassistant.core.install_landingpage()
|
await coresys.homeassistant.core.install_landingpage()
|
||||||
sleep.assert_awaited_once_with(30)
|
sleep.assert_awaited_once_with(30)
|
||||||
|
|
||||||
assert "Failed to install landingpage, retrying after 30sec" in caplog.text
|
assert "Failed to install landingpage, retrying after 30sec" in caplog.text
|
||||||
capture_exception.assert_not_called()
|
capture_exception.assert_not_called()
|
||||||
|
assert (
|
||||||
|
Issue(IssueType.DOCKER_RATELIMIT, ContextType.SYSTEM)
|
||||||
|
in coresys.resolution.issues
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"err",
|
||||||
|
[
|
||||||
|
aiodocker.DockerError(HTTPStatus.INTERNAL_SERVER_ERROR, {"message": "fail"}),
|
||||||
|
APIError("fail"),
|
||||||
|
DockerException(),
|
||||||
|
RequestException(),
|
||||||
|
OSError(),
|
||||||
|
],
|
||||||
|
)
|
||||||
async def test_install_landingpage_other_error(
|
async def test_install_landingpage_other_error(
|
||||||
coresys: CoreSys, capture_exception: Mock, caplog: pytest.LogCaptureFixture
|
coresys: CoreSys,
|
||||||
|
capture_exception: Mock,
|
||||||
|
caplog: pytest.LogCaptureFixture,
|
||||||
|
err: Exception,
|
||||||
):
|
):
|
||||||
"""Test install landing page fails due to other error."""
|
"""Test install landing page fails due to other error."""
|
||||||
coresys.docker.images.get.side_effect = [(err := OSError()), MagicMock()]
|
coresys.docker.images.inspect.side_effect = [err, MagicMock()]
|
||||||
|
|
||||||
with (
|
with (
|
||||||
patch.object(DockerHomeAssistant, "attach", side_effect=DockerError),
|
patch.object(DockerHomeAssistant, "attach", side_effect=DockerError),
|
||||||
@@ -102,11 +137,23 @@ async def test_install_landingpage_other_error(
|
|||||||
capture_exception.assert_called_once_with(err)
|
capture_exception.assert_called_once_with(err)
|
||||||
|
|
||||||
|
|
||||||
async def test_install_docker_error(
|
@pytest.mark.parametrize(
|
||||||
coresys: CoreSys, capture_exception: Mock, caplog: pytest.LogCaptureFixture
|
"err",
|
||||||
|
[
|
||||||
|
aiodocker.DockerError(HTTPStatus.TOO_MANY_REQUESTS, {"message": "ratelimit"}),
|
||||||
|
APIError("ratelimit", MagicMock(status_code=HTTPStatus.TOO_MANY_REQUESTS)),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
async def test_install_docker_ratelimit_error(
|
||||||
|
coresys: CoreSys,
|
||||||
|
capture_exception: Mock,
|
||||||
|
caplog: pytest.LogCaptureFixture,
|
||||||
|
err: Exception,
|
||||||
):
|
):
|
||||||
"""Test install fails due to docker error."""
|
"""Test install fails due to docker ratelimit error."""
|
||||||
coresys.security.force = True
|
coresys.security.force = True
|
||||||
|
coresys.docker.images.pull.side_effect = [err, AsyncIterator([{}])]
|
||||||
|
|
||||||
with (
|
with (
|
||||||
patch.object(HomeAssistantCore, "start"),
|
patch.object(HomeAssistantCore, "start"),
|
||||||
patch.object(DockerHomeAssistant, "cleanup"),
|
patch.object(DockerHomeAssistant, "cleanup"),
|
||||||
@@ -123,19 +170,35 @@ async def test_install_docker_error(
|
|||||||
),
|
),
|
||||||
patch("supervisor.homeassistant.core.asyncio.sleep") as sleep,
|
patch("supervisor.homeassistant.core.asyncio.sleep") as sleep,
|
||||||
):
|
):
|
||||||
coresys.docker.images.get.side_effect = [APIError("fail"), MagicMock()]
|
|
||||||
await coresys.homeassistant.core.install()
|
await coresys.homeassistant.core.install()
|
||||||
sleep.assert_awaited_once_with(30)
|
sleep.assert_awaited_once_with(30)
|
||||||
|
|
||||||
assert "Error on Home Assistant installation. Retrying in 30sec" in caplog.text
|
assert "Error on Home Assistant installation. Retrying in 30sec" in caplog.text
|
||||||
capture_exception.assert_not_called()
|
capture_exception.assert_not_called()
|
||||||
|
assert (
|
||||||
|
Issue(IssueType.DOCKER_RATELIMIT, ContextType.SYSTEM)
|
||||||
|
in coresys.resolution.issues
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"err",
|
||||||
|
[
|
||||||
|
aiodocker.DockerError(HTTPStatus.INTERNAL_SERVER_ERROR, {"message": "fail"}),
|
||||||
|
APIError("fail"),
|
||||||
|
DockerException(),
|
||||||
|
RequestException(),
|
||||||
|
OSError(),
|
||||||
|
],
|
||||||
|
)
|
||||||
async def test_install_other_error(
|
async def test_install_other_error(
|
||||||
coresys: CoreSys, capture_exception: Mock, caplog: pytest.LogCaptureFixture
|
coresys: CoreSys,
|
||||||
|
capture_exception: Mock,
|
||||||
|
caplog: pytest.LogCaptureFixture,
|
||||||
|
err: Exception,
|
||||||
):
|
):
|
||||||
"""Test install fails due to other error."""
|
"""Test install fails due to other error."""
|
||||||
coresys.docker.images.get.side_effect = [(err := OSError()), MagicMock()]
|
coresys.docker.images.inspect.side_effect = [err, MagicMock()]
|
||||||
|
|
||||||
with (
|
with (
|
||||||
patch.object(HomeAssistantCore, "start"),
|
patch.object(HomeAssistantCore, "start"),
|
||||||
@@ -161,21 +224,29 @@ async def test_install_other_error(
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"container_exists,image_exists", [(False, True), (True, False), (True, True)]
|
("container_exc", "image_exc", "remove_calls"),
|
||||||
|
[
|
||||||
|
(NotFound("missing"), None, []),
|
||||||
|
(
|
||||||
|
None,
|
||||||
|
aiodocker.DockerError(404, {"message": "missing"}),
|
||||||
|
[call(force=True, v=True)],
|
||||||
|
),
|
||||||
|
(None, None, [call(force=True, v=True)]),
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
@pytest.mark.usefixtures("path_extern")
|
||||||
async def test_start(
|
async def test_start(
|
||||||
coresys: CoreSys, container_exists: bool, image_exists: bool, path_extern
|
coresys: CoreSys,
|
||||||
|
container_exc: DockerException | None,
|
||||||
|
image_exc: aiodocker.DockerError | None,
|
||||||
|
remove_calls: list[call],
|
||||||
):
|
):
|
||||||
"""Test starting Home Assistant."""
|
"""Test starting Home Assistant."""
|
||||||
if image_exists:
|
coresys.docker.images.inspect.return_value = {"Id": "123"}
|
||||||
coresys.docker.images.get.return_value.id = "123"
|
coresys.docker.images.inspect.side_effect = image_exc
|
||||||
else:
|
coresys.docker.containers.get.return_value.id = "123"
|
||||||
coresys.docker.images.get.side_effect = ImageNotFound("missing")
|
coresys.docker.containers.get.side_effect = container_exc
|
||||||
|
|
||||||
if container_exists:
|
|
||||||
coresys.docker.containers.get.return_value.image.id = "123"
|
|
||||||
else:
|
|
||||||
coresys.docker.containers.get.side_effect = NotFound("missing")
|
|
||||||
|
|
||||||
with (
|
with (
|
||||||
patch.object(
|
patch.object(
|
||||||
@@ -198,18 +269,14 @@ async def test_start(
|
|||||||
assert run.call_args.kwargs["hostname"] == "homeassistant"
|
assert run.call_args.kwargs["hostname"] == "homeassistant"
|
||||||
|
|
||||||
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
coresys.docker.containers.get.return_value.stop.assert_not_called()
|
||||||
if container_exists:
|
assert (
|
||||||
coresys.docker.containers.get.return_value.remove.assert_called_once_with(
|
coresys.docker.containers.get.return_value.remove.call_args_list == remove_calls
|
||||||
force=True,
|
)
|
||||||
v=True,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
coresys.docker.containers.get.return_value.remove.assert_not_called()
|
|
||||||
|
|
||||||
|
|
||||||
async def test_start_existing_container(coresys: CoreSys, path_extern):
|
async def test_start_existing_container(coresys: CoreSys, path_extern):
|
||||||
"""Test starting Home Assistant when container exists and is viable."""
|
"""Test starting Home Assistant when container exists and is viable."""
|
||||||
coresys.docker.images.get.return_value.id = "123"
|
coresys.docker.images.inspect.return_value = {"Id": "123"}
|
||||||
coresys.docker.containers.get.return_value.image.id = "123"
|
coresys.docker.containers.get.return_value.image.id = "123"
|
||||||
coresys.docker.containers.get.return_value.status = "exited"
|
coresys.docker.containers.get.return_value.status = "exited"
|
||||||
|
|
||||||
@@ -394,24 +461,33 @@ async def test_core_loads_wrong_image_for_machine(
|
|||||||
"""Test core is loaded with wrong image for machine."""
|
"""Test core is loaded with wrong image for machine."""
|
||||||
coresys.homeassistant.set_image("ghcr.io/home-assistant/odroid-n2-homeassistant")
|
coresys.homeassistant.set_image("ghcr.io/home-assistant/odroid-n2-homeassistant")
|
||||||
coresys.homeassistant.version = AwesomeVersion("2024.4.0")
|
coresys.homeassistant.version = AwesomeVersion("2024.4.0")
|
||||||
container.attrs["Config"] = {"Labels": {"io.hass.version": "2024.4.0"}}
|
|
||||||
|
|
||||||
await coresys.homeassistant.core.load()
|
with patch.object(
|
||||||
|
DockerAPI,
|
||||||
|
"pull_image",
|
||||||
|
return_value={
|
||||||
|
"Id": "abc123",
|
||||||
|
"Config": {"Labels": {"io.hass.version": "2024.4.0"}},
|
||||||
|
},
|
||||||
|
) as pull_image:
|
||||||
|
container.attrs |= pull_image.return_value
|
||||||
|
await coresys.homeassistant.core.load()
|
||||||
|
pull_image.assert_called_once_with(
|
||||||
|
ANY,
|
||||||
|
"ghcr.io/home-assistant/qemux86-64-homeassistant",
|
||||||
|
"2024.4.0",
|
||||||
|
platform="linux/amd64",
|
||||||
|
auth=None,
|
||||||
|
)
|
||||||
|
|
||||||
container.remove.assert_called_once_with(force=True, v=True)
|
container.remove.assert_called_once_with(force=True, v=True)
|
||||||
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
|
assert coresys.docker.images.delete.call_args_list[0] == call(
|
||||||
"image": "ghcr.io/home-assistant/odroid-n2-homeassistant:latest",
|
"ghcr.io/home-assistant/odroid-n2-homeassistant:latest",
|
||||||
"force": True,
|
force=True,
|
||||||
}
|
)
|
||||||
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
|
assert coresys.docker.images.delete.call_args_list[1] == call(
|
||||||
"image": "ghcr.io/home-assistant/odroid-n2-homeassistant:2024.4.0",
|
"ghcr.io/home-assistant/odroid-n2-homeassistant:2024.4.0",
|
||||||
"force": True,
|
force=True,
|
||||||
}
|
|
||||||
coresys.docker.pull_image.assert_called_once_with(
|
|
||||||
ANY,
|
|
||||||
"ghcr.io/home-assistant/qemux86-64-homeassistant",
|
|
||||||
"2024.4.0",
|
|
||||||
platform="linux/amd64",
|
|
||||||
)
|
)
|
||||||
assert (
|
assert (
|
||||||
coresys.homeassistant.image == "ghcr.io/home-assistant/qemux86-64-homeassistant"
|
coresys.homeassistant.image == "ghcr.io/home-assistant/qemux86-64-homeassistant"
|
||||||
@@ -428,8 +504,8 @@ async def test_core_load_allows_image_override(coresys: CoreSys, container: Magi
|
|||||||
await coresys.homeassistant.core.load()
|
await coresys.homeassistant.core.load()
|
||||||
|
|
||||||
container.remove.assert_not_called()
|
container.remove.assert_not_called()
|
||||||
coresys.docker.images.remove.assert_not_called()
|
coresys.docker.images.delete.assert_not_called()
|
||||||
coresys.docker.images.get.assert_not_called()
|
coresys.docker.images.inspect.assert_not_called()
|
||||||
assert (
|
assert (
|
||||||
coresys.homeassistant.image == "ghcr.io/home-assistant/odroid-n2-homeassistant"
|
coresys.homeassistant.image == "ghcr.io/home-assistant/odroid-n2-homeassistant"
|
||||||
)
|
)
|
||||||
@@ -440,27 +516,37 @@ async def test_core_loads_wrong_image_for_architecture(
|
|||||||
):
|
):
|
||||||
"""Test core is loaded with wrong image for architecture."""
|
"""Test core is loaded with wrong image for architecture."""
|
||||||
coresys.homeassistant.version = AwesomeVersion("2024.4.0")
|
coresys.homeassistant.version = AwesomeVersion("2024.4.0")
|
||||||
container.attrs["Config"] = {"Labels": {"io.hass.version": "2024.4.0"}}
|
coresys.docker.images.inspect.return_value = img_data = (
|
||||||
coresys.docker.images.get("ghcr.io/home-assistant/qemux86-64-homeassistant").attrs[
|
coresys.docker.images.inspect.return_value
|
||||||
"Architecture"
|
| {
|
||||||
] = "arm64"
|
"Architecture": "arm64",
|
||||||
|
"Config": {"Labels": {"io.hass.version": "2024.4.0"}},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
container.attrs |= img_data
|
||||||
|
|
||||||
await coresys.homeassistant.core.load()
|
with patch.object(
|
||||||
|
DockerAPI,
|
||||||
|
"pull_image",
|
||||||
|
return_value=img_data | {"Architecture": "amd64"},
|
||||||
|
) as pull_image:
|
||||||
|
await coresys.homeassistant.core.load()
|
||||||
|
pull_image.assert_called_once_with(
|
||||||
|
ANY,
|
||||||
|
"ghcr.io/home-assistant/qemux86-64-homeassistant",
|
||||||
|
"2024.4.0",
|
||||||
|
platform="linux/amd64",
|
||||||
|
auth=None,
|
||||||
|
)
|
||||||
|
|
||||||
container.remove.assert_called_once_with(force=True, v=True)
|
container.remove.assert_called_once_with(force=True, v=True)
|
||||||
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
|
assert coresys.docker.images.delete.call_args_list[0] == call(
|
||||||
"image": "ghcr.io/home-assistant/qemux86-64-homeassistant:latest",
|
"ghcr.io/home-assistant/qemux86-64-homeassistant:latest",
|
||||||
"force": True,
|
force=True,
|
||||||
}
|
)
|
||||||
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
|
assert coresys.docker.images.delete.call_args_list[1] == call(
|
||||||
"image": "ghcr.io/home-assistant/qemux86-64-homeassistant:2024.4.0",
|
"ghcr.io/home-assistant/qemux86-64-homeassistant:2024.4.0",
|
||||||
"force": True,
|
force=True,
|
||||||
}
|
|
||||||
coresys.docker.pull_image.assert_called_once_with(
|
|
||||||
ANY,
|
|
||||||
"ghcr.io/home-assistant/qemux86-64-homeassistant",
|
|
||||||
"2024.4.0",
|
|
||||||
platform="linux/amd64",
|
|
||||||
)
|
)
|
||||||
assert (
|
assert (
|
||||||
coresys.homeassistant.image == "ghcr.io/home-assistant/qemux86-64-homeassistant"
|
coresys.homeassistant.image == "ghcr.io/home-assistant/qemux86-64-homeassistant"
|
||||||
|
|||||||
@@ -90,6 +90,49 @@ async def test_logs_coloured(journald_gateway: MagicMock, coresys: CoreSys):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def test_logs_no_colors(journald_gateway: MagicMock, coresys: CoreSys):
|
||||||
|
"""Test ANSI color codes being stripped when no_colors=True."""
|
||||||
|
journald_gateway.content.feed_data(
|
||||||
|
load_fixture("logs_export_supervisor.txt").encode("utf-8")
|
||||||
|
)
|
||||||
|
journald_gateway.content.feed_eof()
|
||||||
|
|
||||||
|
async with coresys.host.logs.journald_logs() as resp:
|
||||||
|
cursor, line = await anext(journal_logs_reader(resp, no_colors=True))
|
||||||
|
assert (
|
||||||
|
cursor
|
||||||
|
== "s=83fee99ca0c3466db5fc120d52ca7dd8;i=2049389;b=f5a5c442fa6548cf97474d2d57c920b3;m=4263828e8c;t=612dda478b01b;x=9ae12394c9326930"
|
||||||
|
)
|
||||||
|
# Colors should be stripped
|
||||||
|
assert (
|
||||||
|
line == "24-03-04 23:56:56 INFO (MainThread) [__main__] Closing Supervisor"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def test_logs_verbose_no_colors(journald_gateway: MagicMock, coresys: CoreSys):
|
||||||
|
"""Test ANSI color codes being stripped from verbose formatted logs when no_colors=True."""
|
||||||
|
journald_gateway.content.feed_data(
|
||||||
|
load_fixture("logs_export_supervisor.txt").encode("utf-8")
|
||||||
|
)
|
||||||
|
journald_gateway.content.feed_eof()
|
||||||
|
|
||||||
|
async with coresys.host.logs.journald_logs() as resp:
|
||||||
|
cursor, line = await anext(
|
||||||
|
journal_logs_reader(
|
||||||
|
resp, log_formatter=LogFormatter.VERBOSE, no_colors=True
|
||||||
|
)
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
cursor
|
||||||
|
== "s=83fee99ca0c3466db5fc120d52ca7dd8;i=2049389;b=f5a5c442fa6548cf97474d2d57c920b3;m=4263828e8c;t=612dda478b01b;x=9ae12394c9326930"
|
||||||
|
)
|
||||||
|
# Colors should be stripped in verbose format too
|
||||||
|
assert (
|
||||||
|
line
|
||||||
|
== "2024-03-04 22:56:56.709 ha-hloub hassio_supervisor[466]: 24-03-04 23:56:56 INFO (MainThread) [__main__] Closing Supervisor"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
async def test_boot_ids(
|
async def test_boot_ids(
|
||||||
journald_gateway: MagicMock,
|
journald_gateway: MagicMock,
|
||||||
coresys: CoreSys,
|
coresys: CoreSys,
|
||||||
|
|||||||
@@ -1179,7 +1179,6 @@ async def test_job_scheduled_delay(coresys: CoreSys):
|
|||||||
|
|
||||||
async def test_job_scheduled_at(coresys: CoreSys):
|
async def test_job_scheduled_at(coresys: CoreSys):
|
||||||
"""Test job that schedules a job to start at a specified time."""
|
"""Test job that schedules a job to start at a specified time."""
|
||||||
dt = datetime.now()
|
|
||||||
|
|
||||||
class TestClass:
|
class TestClass:
|
||||||
"""Test class."""
|
"""Test class."""
|
||||||
@@ -1189,10 +1188,12 @@ async def test_job_scheduled_at(coresys: CoreSys):
|
|||||||
self.coresys = coresys
|
self.coresys = coresys
|
||||||
|
|
||||||
@Job(name="test_job_scheduled_at_job_scheduler")
|
@Job(name="test_job_scheduled_at_job_scheduler")
|
||||||
async def job_scheduler(self) -> tuple[SupervisorJob, asyncio.TimerHandle]:
|
async def job_scheduler(
|
||||||
|
self, scheduled_time: datetime
|
||||||
|
) -> tuple[SupervisorJob, asyncio.TimerHandle]:
|
||||||
"""Schedule a job to run at specified time."""
|
"""Schedule a job to run at specified time."""
|
||||||
return self.coresys.jobs.schedule_job(
|
return self.coresys.jobs.schedule_job(
|
||||||
self.job_task, JobSchedulerOptions(start_at=dt + timedelta(seconds=0.1))
|
self.job_task, JobSchedulerOptions(start_at=scheduled_time)
|
||||||
)
|
)
|
||||||
|
|
||||||
@Job(name="test_job_scheduled_at_job_task")
|
@Job(name="test_job_scheduled_at_job_task")
|
||||||
@@ -1201,29 +1202,28 @@ async def test_job_scheduled_at(coresys: CoreSys):
|
|||||||
self.coresys.jobs.current.stage = "work"
|
self.coresys.jobs.current.stage = "work"
|
||||||
|
|
||||||
test = TestClass(coresys)
|
test = TestClass(coresys)
|
||||||
job_started = asyncio.Event()
|
|
||||||
job_ended = asyncio.Event()
|
# Schedule job to run 0.1 seconds from now
|
||||||
|
scheduled_time = datetime.now() + timedelta(seconds=0.1)
|
||||||
|
job, _ = await test.job_scheduler(scheduled_time)
|
||||||
|
started = False
|
||||||
|
ended = False
|
||||||
|
|
||||||
async def start_listener(evt_job: SupervisorJob):
|
async def start_listener(evt_job: SupervisorJob):
|
||||||
if evt_job.uuid == job.uuid:
|
nonlocal started
|
||||||
job_started.set()
|
started = started or evt_job.uuid == job.uuid
|
||||||
|
|
||||||
async def end_listener(evt_job: SupervisorJob):
|
async def end_listener(evt_job: SupervisorJob):
|
||||||
if evt_job.uuid == job.uuid:
|
nonlocal ended
|
||||||
job_ended.set()
|
ended = ended or evt_job.uuid == job.uuid
|
||||||
|
|
||||||
async with time_machine.travel(dt):
|
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_START, start_listener)
|
||||||
job, _ = await test.job_scheduler()
|
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, end_listener)
|
||||||
|
|
||||||
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_START, start_listener)
|
await asyncio.sleep(0.2)
|
||||||
coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, end_listener)
|
|
||||||
|
|
||||||
# Advance time to exactly when job should start and wait for completion
|
|
||||||
async with time_machine.travel(dt + timedelta(seconds=0.1)):
|
|
||||||
await asyncio.wait_for(
|
|
||||||
asyncio.gather(job_started.wait(), job_ended.wait()), timeout=1.0
|
|
||||||
)
|
|
||||||
|
|
||||||
|
assert started
|
||||||
|
assert ended
|
||||||
assert job.done
|
assert job.done
|
||||||
assert job.name == "test_job_scheduled_at_job_task"
|
assert job.name == "test_job_scheduled_at_job_task"
|
||||||
assert job.stage == "work"
|
assert job.stage == "work"
|
||||||
|
|||||||
@@ -115,7 +115,17 @@ async def test_not_started(coresys):
|
|||||||
assert filter_data(coresys, SAMPLE_EVENT, {}) == SAMPLE_EVENT
|
assert filter_data(coresys, SAMPLE_EVENT, {}) == SAMPLE_EVENT
|
||||||
|
|
||||||
await coresys.core.set_state(CoreState.SETUP)
|
await coresys.core.set_state(CoreState.SETUP)
|
||||||
assert filter_data(coresys, SAMPLE_EVENT, {}) == SAMPLE_EVENT
|
filtered = filter_data(coresys, SAMPLE_EVENT, {})
|
||||||
|
# During SETUP, we should have basic system info available
|
||||||
|
assert "contexts" in filtered
|
||||||
|
assert "versions" in filtered["contexts"]
|
||||||
|
assert "docker" in filtered["contexts"]["versions"]
|
||||||
|
assert "supervisor" in filtered["contexts"]["versions"]
|
||||||
|
assert "host" in filtered["contexts"]
|
||||||
|
assert "machine" in filtered["contexts"]["host"]
|
||||||
|
assert filtered["contexts"]["versions"]["docker"] == coresys.docker.info.version
|
||||||
|
assert filtered["contexts"]["versions"]["supervisor"] == coresys.supervisor.version
|
||||||
|
assert filtered["contexts"]["host"]["machine"] == coresys.machine
|
||||||
|
|
||||||
|
|
||||||
async def test_defaults(coresys):
|
async def test_defaults(coresys):
|
||||||
|
|||||||
@@ -119,10 +119,10 @@ async def test_load(
|
|||||||
"mnt-data-supervisor-mounts-backup_test.mount",
|
"mnt-data-supervisor-mounts-backup_test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "noserverino,guest")],
|
("Options", Variant("s", "noserverino,guest")),
|
||||||
["Type", Variant("s", "cifs")],
|
("Type", Variant("s", "cifs")),
|
||||||
["Description", Variant("s", "Supervisor cifs mount: backup_test")],
|
("Description", Variant("s", "Supervisor cifs mount: backup_test")),
|
||||||
["What", Variant("s", "//backup.local/backups")],
|
("What", Variant("s", "//backup.local/backups")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
),
|
),
|
||||||
@@ -130,10 +130,10 @@ async def test_load(
|
|||||||
"mnt-data-supervisor-mounts-media_test.mount",
|
"mnt-data-supervisor-mounts-media_test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "soft,timeo=200")],
|
("Options", Variant("s", "soft,timeo=200")),
|
||||||
["Type", Variant("s", "nfs")],
|
("Type", Variant("s", "nfs")),
|
||||||
["Description", Variant("s", "Supervisor nfs mount: media_test")],
|
("Description", Variant("s", "Supervisor nfs mount: media_test")),
|
||||||
["What", Variant("s", "media.local:/media")],
|
("What", Variant("s", "media.local:/media")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
),
|
),
|
||||||
@@ -141,12 +141,12 @@ async def test_load(
|
|||||||
"mnt-data-supervisor-media-media_test.mount",
|
"mnt-data-supervisor-media-media_test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "bind")],
|
("Options", Variant("s", "bind")),
|
||||||
[
|
(
|
||||||
"Description",
|
"Description",
|
||||||
Variant("s", "Supervisor bind mount: bind_media_test"),
|
Variant("s", "Supervisor bind mount: bind_media_test"),
|
||||||
],
|
),
|
||||||
["What", Variant("s", "/mnt/data/supervisor/mounts/media_test")],
|
("What", Variant("s", "/mnt/data/supervisor/mounts/media_test")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
),
|
),
|
||||||
@@ -198,10 +198,10 @@ async def test_load_share_mount(
|
|||||||
"mnt-data-supervisor-mounts-share_test.mount",
|
"mnt-data-supervisor-mounts-share_test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "soft,timeo=200")],
|
("Options", Variant("s", "soft,timeo=200")),
|
||||||
["Type", Variant("s", "nfs")],
|
("Type", Variant("s", "nfs")),
|
||||||
["Description", Variant("s", "Supervisor nfs mount: share_test")],
|
("Description", Variant("s", "Supervisor nfs mount: share_test")),
|
||||||
["What", Variant("s", "share.local:/share")],
|
("What", Variant("s", "share.local:/share")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
),
|
),
|
||||||
@@ -209,9 +209,9 @@ async def test_load_share_mount(
|
|||||||
"mnt-data-supervisor-share-share_test.mount",
|
"mnt-data-supervisor-share-share_test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "bind")],
|
("Options", Variant("s", "bind")),
|
||||||
["Description", Variant("s", "Supervisor bind mount: bind_share_test")],
|
("Description", Variant("s", "Supervisor bind mount: bind_share_test")),
|
||||||
["What", Variant("s", "/mnt/data/supervisor/mounts/share_test")],
|
("What", Variant("s", "/mnt/data/supervisor/mounts/share_test")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
),
|
),
|
||||||
@@ -318,12 +318,12 @@ async def test_mount_failed_during_load(
|
|||||||
"mnt-data-supervisor-media-media_test.mount",
|
"mnt-data-supervisor-media-media_test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "ro,bind")],
|
("Options", Variant("s", "ro,bind")),
|
||||||
[
|
(
|
||||||
"Description",
|
"Description",
|
||||||
Variant("s", "Supervisor bind mount: emergency_media_test"),
|
Variant("s", "Supervisor bind mount: emergency_media_test"),
|
||||||
],
|
),
|
||||||
["What", Variant("s", "/mnt/data/supervisor/emergency/media_test")],
|
("What", Variant("s", "/mnt/data/supervisor/emergency/media_test")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
@@ -634,10 +634,10 @@ async def test_reload_mounts_attempts_initial_mount(
|
|||||||
"mnt-data-supervisor-mounts-media_test.mount",
|
"mnt-data-supervisor-mounts-media_test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "soft,timeo=200")],
|
("Options", Variant("s", "soft,timeo=200")),
|
||||||
["Type", Variant("s", "nfs")],
|
("Type", Variant("s", "nfs")),
|
||||||
["Description", Variant("s", "Supervisor nfs mount: media_test")],
|
("Description", Variant("s", "Supervisor nfs mount: media_test")),
|
||||||
["What", Variant("s", "media.local:/media")],
|
("What", Variant("s", "media.local:/media")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
),
|
),
|
||||||
@@ -645,9 +645,9 @@ async def test_reload_mounts_attempts_initial_mount(
|
|||||||
"mnt-data-supervisor-media-media_test.mount",
|
"mnt-data-supervisor-media-media_test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "bind")],
|
("Options", Variant("s", "bind")),
|
||||||
["Description", Variant("s", "Supervisor bind mount: bind_media_test")],
|
("Description", Variant("s", "Supervisor bind mount: bind_media_test")),
|
||||||
["What", Variant("s", "/mnt/data/supervisor/mounts/media_test")],
|
("What", Variant("s", "/mnt/data/supervisor/mounts/media_test")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -105,7 +105,7 @@ async def test_cifs_mount(
|
|||||||
"mnt-data-supervisor-mounts-test.mount",
|
"mnt-data-supervisor-mounts-test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
[
|
(
|
||||||
"Options",
|
"Options",
|
||||||
Variant(
|
Variant(
|
||||||
"s",
|
"s",
|
||||||
@@ -117,10 +117,10 @@ async def test_cifs_mount(
|
|||||||
]
|
]
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
],
|
),
|
||||||
["Type", Variant("s", "cifs")],
|
("Type", Variant("s", "cifs")),
|
||||||
["Description", Variant("s", "Supervisor cifs mount: test")],
|
("Description", Variant("s", "Supervisor cifs mount: test")),
|
||||||
["What", Variant("s", "//test.local/camera")],
|
("What", Variant("s", "//test.local/camera")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
@@ -177,10 +177,10 @@ async def test_cifs_mount_read_only(
|
|||||||
"mnt-data-supervisor-mounts-test.mount",
|
"mnt-data-supervisor-mounts-test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "ro,noserverino,guest")],
|
("Options", Variant("s", "ro,noserverino,guest")),
|
||||||
["Type", Variant("s", "cifs")],
|
("Type", Variant("s", "cifs")),
|
||||||
["Description", Variant("s", "Supervisor cifs mount: test")],
|
("Description", Variant("s", "Supervisor cifs mount: test")),
|
||||||
["What", Variant("s", "//test.local/camera")],
|
("What", Variant("s", "//test.local/camera")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
@@ -237,10 +237,10 @@ async def test_nfs_mount(
|
|||||||
"mnt-data-supervisor-mounts-test.mount",
|
"mnt-data-supervisor-mounts-test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "port=1234,soft,timeo=200")],
|
("Options", Variant("s", "port=1234,soft,timeo=200")),
|
||||||
["Type", Variant("s", "nfs")],
|
("Type", Variant("s", "nfs")),
|
||||||
["Description", Variant("s", "Supervisor nfs mount: test")],
|
("Description", Variant("s", "Supervisor nfs mount: test")),
|
||||||
["What", Variant("s", "test.local:/media/camera")],
|
("What", Variant("s", "test.local:/media/camera")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
@@ -283,10 +283,10 @@ async def test_nfs_mount_read_only(
|
|||||||
"mnt-data-supervisor-mounts-test.mount",
|
"mnt-data-supervisor-mounts-test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "ro,port=1234,soft,timeo=200")],
|
("Options", Variant("s", "ro,port=1234,soft,timeo=200")),
|
||||||
["Type", Variant("s", "nfs")],
|
("Type", Variant("s", "nfs")),
|
||||||
["Description", Variant("s", "Supervisor nfs mount: test")],
|
("Description", Variant("s", "Supervisor nfs mount: test")),
|
||||||
["What", Variant("s", "test.local:/media/camera")],
|
("What", Variant("s", "test.local:/media/camera")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
@@ -331,10 +331,10 @@ async def test_load(
|
|||||||
"mnt-data-supervisor-mounts-test.mount",
|
"mnt-data-supervisor-mounts-test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "noserverino,guest")],
|
("Options", Variant("s", "noserverino,guest")),
|
||||||
["Type", Variant("s", "cifs")],
|
("Type", Variant("s", "cifs")),
|
||||||
["Description", Variant("s", "Supervisor cifs mount: test")],
|
("Description", Variant("s", "Supervisor cifs mount: test")),
|
||||||
["What", Variant("s", "//test.local/share")],
|
("What", Variant("s", "//test.local/share")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
@@ -736,10 +736,10 @@ async def test_mount_fails_if_down(
|
|||||||
"mnt-data-supervisor-mounts-test.mount",
|
"mnt-data-supervisor-mounts-test.mount",
|
||||||
"fail",
|
"fail",
|
||||||
[
|
[
|
||||||
["Options", Variant("s", "port=1234,soft,timeo=200")],
|
("Options", Variant("s", "port=1234,soft,timeo=200")),
|
||||||
["Type", Variant("s", "nfs")],
|
("Type", Variant("s", "nfs")),
|
||||||
["Description", Variant("s", "Supervisor nfs mount: test")],
|
("Description", Variant("s", "Supervisor nfs mount: test")),
|
||||||
["What", Variant("s", "test.local:/media/camera")],
|
("What", Variant("s", "test.local:/media/camera")),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, patch
|
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, call, patch
|
||||||
|
|
||||||
from awesomeversion import AwesomeVersion
|
from awesomeversion import AwesomeVersion
|
||||||
import pytest
|
import pytest
|
||||||
@@ -11,6 +11,7 @@ from supervisor.const import BusEvent, CpuArch
|
|||||||
from supervisor.coresys import CoreSys
|
from supervisor.coresys import CoreSys
|
||||||
from supervisor.docker.const import ContainerState
|
from supervisor.docker.const import ContainerState
|
||||||
from supervisor.docker.interface import DockerInterface
|
from supervisor.docker.interface import DockerInterface
|
||||||
|
from supervisor.docker.manager import DockerAPI
|
||||||
from supervisor.docker.monitor import DockerContainerStateEvent
|
from supervisor.docker.monitor import DockerContainerStateEvent
|
||||||
from supervisor.exceptions import (
|
from supervisor.exceptions import (
|
||||||
AudioError,
|
AudioError,
|
||||||
@@ -359,21 +360,26 @@ async def test_load_with_incorrect_image(
|
|||||||
plugin.version = AwesomeVersion("2024.4.0")
|
plugin.version = AwesomeVersion("2024.4.0")
|
||||||
|
|
||||||
container.status = "running"
|
container.status = "running"
|
||||||
container.attrs["Config"] = {"Labels": {"io.hass.version": "2024.4.0"}}
|
coresys.docker.images.inspect.return_value = img_data = (
|
||||||
|
coresys.docker.images.inspect.return_value
|
||||||
|
| {"Config": {"Labels": {"io.hass.version": "2024.4.0"}}}
|
||||||
|
)
|
||||||
|
container.attrs |= img_data
|
||||||
|
|
||||||
await plugin.load()
|
with patch.object(DockerAPI, "pull_image", return_value=img_data) as pull_image:
|
||||||
|
await plugin.load()
|
||||||
|
pull_image.assert_called_once_with(
|
||||||
|
ANY, correct_image, "2024.4.0", platform="linux/amd64", auth=None
|
||||||
|
)
|
||||||
|
|
||||||
container.remove.assert_called_once_with(force=True, v=True)
|
container.remove.assert_called_once_with(force=True, v=True)
|
||||||
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
|
assert coresys.docker.images.delete.call_args_list[0] == call(
|
||||||
"image": f"{old_image}:latest",
|
f"{old_image}:latest",
|
||||||
"force": True,
|
force=True,
|
||||||
}
|
)
|
||||||
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
|
assert coresys.docker.images.delete.call_args_list[1] == call(
|
||||||
"image": f"{old_image}:2024.4.0",
|
f"{old_image}:2024.4.0",
|
||||||
"force": True,
|
force=True,
|
||||||
}
|
|
||||||
coresys.docker.pull_image.assert_called_once_with(
|
|
||||||
ANY, correct_image, "2024.4.0", platform="linux/amd64"
|
|
||||||
)
|
)
|
||||||
assert plugin.image == correct_image
|
assert plugin.image == correct_image
|
||||||
|
|
||||||
|
|||||||
@@ -5,10 +5,7 @@ from unittest.mock import MagicMock, patch
|
|||||||
|
|
||||||
from supervisor.const import CoreState
|
from supervisor.const import CoreState
|
||||||
from supervisor.coresys import CoreSys
|
from supervisor.coresys import CoreSys
|
||||||
from supervisor.resolution.evaluations.operating_system import (
|
from supervisor.resolution.evaluations.operating_system import EvaluateOperatingSystem
|
||||||
SUPPORTED_OS,
|
|
||||||
EvaluateOperatingSystem,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def test_evaluation(coresys: CoreSys):
|
async def test_evaluation(coresys: CoreSys):
|
||||||
@@ -25,13 +22,7 @@ async def test_evaluation(coresys: CoreSys):
|
|||||||
assert operating_system.reason in coresys.resolution.unsupported
|
assert operating_system.reason in coresys.resolution.unsupported
|
||||||
|
|
||||||
coresys.os._available = True
|
coresys.os._available = True
|
||||||
await operating_system()
|
assert coresys.os.available
|
||||||
assert operating_system.reason not in coresys.resolution.unsupported
|
|
||||||
coresys.os._available = False
|
|
||||||
|
|
||||||
coresys.host._info = MagicMock(
|
|
||||||
operating_system=SUPPORTED_OS[0], timezone=None, timezone_tzinfo=None
|
|
||||||
)
|
|
||||||
await operating_system()
|
await operating_system()
|
||||||
assert operating_system.reason not in coresys.resolution.unsupported
|
assert operating_system.reason not in coresys.resolution.unsupported
|
||||||
|
|
||||||
|
|||||||
43
tests/resolution/evaluation/test_system_architecture.py
Normal file
43
tests/resolution/evaluation/test_system_architecture.py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
"""Test evaluation supported system architectures."""
|
||||||
|
|
||||||
|
from unittest.mock import PropertyMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from supervisor.const import CoreState
|
||||||
|
from supervisor.coresys import CoreSys
|
||||||
|
from supervisor.resolution.evaluations.system_architecture import (
|
||||||
|
EvaluateSystemArchitecture,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("arch", ["i386", "armhf", "armv7"])
|
||||||
|
async def test_evaluation_unsupported_architectures(
|
||||||
|
coresys: CoreSys,
|
||||||
|
arch: str,
|
||||||
|
):
|
||||||
|
"""Test evaluation of unsupported system architectures."""
|
||||||
|
system_architecture = EvaluateSystemArchitecture(coresys)
|
||||||
|
await coresys.core.set_state(CoreState.INITIALIZE)
|
||||||
|
|
||||||
|
with patch.object(
|
||||||
|
type(coresys.supervisor), "arch", PropertyMock(return_value=arch)
|
||||||
|
):
|
||||||
|
await system_architecture()
|
||||||
|
assert system_architecture.reason in coresys.resolution.unsupported
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("arch", ["amd64", "aarch64"])
|
||||||
|
async def test_evaluation_supported_architectures(
|
||||||
|
coresys: CoreSys,
|
||||||
|
arch: str,
|
||||||
|
):
|
||||||
|
"""Test evaluation of supported system architectures."""
|
||||||
|
system_architecture = EvaluateSystemArchitecture(coresys)
|
||||||
|
await coresys.core.set_state(CoreState.INITIALIZE)
|
||||||
|
|
||||||
|
with patch.object(
|
||||||
|
type(coresys.supervisor), "arch", PropertyMock(return_value=arch)
|
||||||
|
):
|
||||||
|
await system_architecture()
|
||||||
|
assert system_architecture.reason not in coresys.resolution.unsupported
|
||||||
@@ -1,8 +1,9 @@
|
|||||||
"""Test fixup addon execute repair."""
|
"""Test fixup addon execute repair."""
|
||||||
|
|
||||||
from unittest.mock import MagicMock, patch
|
from http import HTTPStatus
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
from docker.errors import NotFound
|
import aiodocker
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from supervisor.addons.addon import Addon
|
from supervisor.addons.addon import Addon
|
||||||
@@ -17,7 +18,9 @@ from supervisor.resolution.fixups.addon_execute_repair import FixupAddonExecuteR
|
|||||||
|
|
||||||
async def test_fixup(docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon):
|
async def test_fixup(docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon):
|
||||||
"""Test fixup rebuilds addon's container."""
|
"""Test fixup rebuilds addon's container."""
|
||||||
docker.images.get.side_effect = NotFound("missing")
|
docker.images.inspect.side_effect = aiodocker.DockerError(
|
||||||
|
HTTPStatus.NOT_FOUND, {"message": "missing"}
|
||||||
|
)
|
||||||
install_addon_ssh.data["image"] = "test_image"
|
install_addon_ssh.data["image"] = "test_image"
|
||||||
|
|
||||||
addon_execute_repair = FixupAddonExecuteRepair(coresys)
|
addon_execute_repair = FixupAddonExecuteRepair(coresys)
|
||||||
@@ -41,7 +44,9 @@ async def test_fixup_max_auto_attempts(
|
|||||||
docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon
|
docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon
|
||||||
):
|
):
|
||||||
"""Test fixup stops being auto-applied after 5 failures."""
|
"""Test fixup stops being auto-applied after 5 failures."""
|
||||||
docker.images.get.side_effect = NotFound("missing")
|
docker.images.inspect.side_effect = aiodocker.DockerError(
|
||||||
|
HTTPStatus.NOT_FOUND, {"message": "missing"}
|
||||||
|
)
|
||||||
install_addon_ssh.data["image"] = "test_image"
|
install_addon_ssh.data["image"] = "test_image"
|
||||||
|
|
||||||
addon_execute_repair = FixupAddonExecuteRepair(coresys)
|
addon_execute_repair = FixupAddonExecuteRepair(coresys)
|
||||||
@@ -82,8 +87,6 @@ async def test_fixup_image_exists(
|
|||||||
docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon
|
docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon
|
||||||
):
|
):
|
||||||
"""Test fixup dismisses if image exists."""
|
"""Test fixup dismisses if image exists."""
|
||||||
docker.images.get.return_value = MagicMock()
|
|
||||||
|
|
||||||
addon_execute_repair = FixupAddonExecuteRepair(coresys)
|
addon_execute_repair = FixupAddonExecuteRepair(coresys)
|
||||||
assert addon_execute_repair.auto is True
|
assert addon_execute_repair.auto is True
|
||||||
|
|
||||||
|
|||||||
@@ -86,6 +86,22 @@ def test_format_verbose_newlines():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_format_verbose_colors():
|
||||||
|
"""Test verbose formatter with ANSI colors in message."""
|
||||||
|
fields = {
|
||||||
|
"__REALTIME_TIMESTAMP": "1379403171000000",
|
||||||
|
"_HOSTNAME": "homeassistant",
|
||||||
|
"SYSLOG_IDENTIFIER": "python",
|
||||||
|
"_PID": "666",
|
||||||
|
"MESSAGE": "\x1b[32mHello, world!\x1b[0m",
|
||||||
|
}
|
||||||
|
|
||||||
|
assert (
|
||||||
|
journal_verbose_formatter(fields)
|
||||||
|
== "2013-09-17 07:32:51.000 homeassistant python[666]: \x1b[32mHello, world!\x1b[0m"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
async def test_parsing_simple():
|
async def test_parsing_simple():
|
||||||
"""Test plain formatter."""
|
"""Test plain formatter."""
|
||||||
journal_logs, stream = _journal_logs_mock()
|
journal_logs, stream = _journal_logs_mock()
|
||||||
@@ -297,3 +313,54 @@ async def test_parsing_non_utf8_in_binary_message():
|
|||||||
)
|
)
|
||||||
_, line = await anext(journal_logs_reader(journal_logs))
|
_, line = await anext(journal_logs_reader(journal_logs))
|
||||||
assert line == "Hello, \ufffd world!"
|
assert line == "Hello, \ufffd world!"
|
||||||
|
|
||||||
|
|
||||||
|
def test_format_plain_no_colors():
|
||||||
|
"""Test plain formatter strips ANSI color codes when no_colors=True."""
|
||||||
|
fields = {"MESSAGE": "\x1b[32mHello, world!\x1b[0m"}
|
||||||
|
assert journal_plain_formatter(fields, no_colors=True) == "Hello, world!"
|
||||||
|
|
||||||
|
|
||||||
|
def test_format_verbose_no_colors():
|
||||||
|
"""Test verbose formatter strips ANSI color codes when no_colors=True."""
|
||||||
|
fields = {
|
||||||
|
"__REALTIME_TIMESTAMP": "1379403171000000",
|
||||||
|
"_HOSTNAME": "homeassistant",
|
||||||
|
"SYSLOG_IDENTIFIER": "python",
|
||||||
|
"_PID": "666",
|
||||||
|
"MESSAGE": "\x1b[32mHello, world!\x1b[0m",
|
||||||
|
}
|
||||||
|
assert (
|
||||||
|
journal_verbose_formatter(fields, no_colors=True)
|
||||||
|
== "2013-09-17 07:32:51.000 homeassistant python[666]: Hello, world!"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def test_parsing_colored_logs_verbose_no_colors():
|
||||||
|
"""Test verbose formatter strips colors from colored logs."""
|
||||||
|
journal_logs, stream = _journal_logs_mock()
|
||||||
|
stream.feed_data(
|
||||||
|
b"__REALTIME_TIMESTAMP=1379403171000000\n"
|
||||||
|
b"_HOSTNAME=homeassistant\n"
|
||||||
|
b"SYSLOG_IDENTIFIER=python\n"
|
||||||
|
b"_PID=666\n"
|
||||||
|
b"MESSAGE\n\x0e\x00\x00\x00\x00\x00\x00\x00\x1b[31mERROR\x1b[0m\n"
|
||||||
|
b"AFTER=after\n\n"
|
||||||
|
)
|
||||||
|
_, line = await anext(
|
||||||
|
journal_logs_reader(
|
||||||
|
journal_logs, log_formatter=LogFormatter.VERBOSE, no_colors=True
|
||||||
|
)
|
||||||
|
)
|
||||||
|
assert line == "2013-09-17 07:32:51.000 homeassistant python[666]: ERROR"
|
||||||
|
|
||||||
|
|
||||||
|
async def test_parsing_multiple_color_codes():
|
||||||
|
"""Test stripping multiple ANSI color codes in single message."""
|
||||||
|
journal_logs, stream = _journal_logs_mock()
|
||||||
|
stream.feed_data(
|
||||||
|
b"MESSAGE\n\x29\x00\x00\x00\x00\x00\x00\x00\x1b[31mRed\x1b[0m \x1b[32mGreen\x1b[0m \x1b[34mBlue\x1b[0m\n"
|
||||||
|
b"AFTER=after\n\n"
|
||||||
|
)
|
||||||
|
_, line = await anext(journal_logs_reader(journal_logs, no_colors=True))
|
||||||
|
assert line == "Red Green Blue"
|
||||||
|
|||||||
Reference in New Issue
Block a user