mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-08-27 18:09:20 +00:00
Compare commits
259 Commits
2023.07.1
...
faster_bac
Author | SHA1 | Date | |
---|---|---|---|
![]() |
af3256e41e | ||
![]() |
a163121ad4 | ||
![]() |
eb85be2770 | ||
![]() |
2da27937a5 | ||
![]() |
2a29b801a4 | ||
![]() |
57e65714b0 | ||
![]() |
0ae40cb51c | ||
![]() |
ddd195dfc6 | ||
![]() |
54b9f23ec5 | ||
![]() |
242dd3e626 | ||
![]() |
1b8acb5b60 | ||
![]() |
a7ab96ab12 | ||
![]() |
06ab11cf87 | ||
![]() |
1410a1b06e | ||
![]() |
5baf19f7a3 | ||
![]() |
6c66a7ba17 | ||
![]() |
37b6e09475 | ||
![]() |
e08c8ca26d | ||
![]() |
2c09e7929f | ||
![]() |
3e760f0d85 | ||
![]() |
3cc6bd19ad | ||
![]() |
b7ddfba71d | ||
![]() |
32f21d208f | ||
![]() |
ed7edd9fe0 | ||
![]() |
fd3c995c7c | ||
![]() |
c0d1a2d53b | ||
![]() |
76bc3015a7 | ||
![]() |
ad2896243b | ||
![]() |
d0dcded42d | ||
![]() |
a0dfa01287 | ||
![]() |
4ec5c90180 | ||
![]() |
a0c813bfc1 | ||
![]() |
5f7b3a7087 | ||
![]() |
6426f02a2c | ||
![]() |
7fef92c480 | ||
![]() |
c64744dedf | ||
![]() |
72a2088931 | ||
![]() |
db54556b0f | ||
![]() |
a2653d8462 | ||
![]() |
ef778238f6 | ||
![]() |
4cc0ddc35d | ||
![]() |
a0429179a0 | ||
![]() |
5cfb45c668 | ||
![]() |
a53b7041f5 | ||
![]() |
f534fae293 | ||
![]() |
f7cbd968d2 | ||
![]() |
844d76290c | ||
![]() |
8c8122eee0 | ||
![]() |
d63f0d5e0b | ||
![]() |
96f4ba5d25 | ||
![]() |
72e64676da | ||
![]() |
883e54f989 | ||
![]() |
c2d4be3304 | ||
![]() |
de737ddb91 | ||
![]() |
11ec6dd9ac | ||
![]() |
df7541e397 | ||
![]() |
95ac53d780 | ||
![]() |
e8c4b32a65 | ||
![]() |
eca535c978 | ||
![]() |
9088810b49 | ||
![]() |
172a7053ed | ||
![]() |
3d5bd2adef | ||
![]() |
cb03d039f4 | ||
![]() |
bb31b1bc6e | ||
![]() |
727532858e | ||
![]() |
c0868d9dac | ||
![]() |
ce26e1dac6 | ||
![]() |
c74f87ca12 | ||
![]() |
043111b91c | ||
![]() |
5c579e557c | ||
![]() |
f8f51740c1 | ||
![]() |
176b63df52 | ||
![]() |
e1979357a5 | ||
![]() |
030527a4f2 | ||
![]() |
cca74da1f3 | ||
![]() |
928aff342f | ||
![]() |
60a97235df | ||
![]() |
c77779cf9d | ||
![]() |
9351796ba8 | ||
![]() |
bef0f023d4 | ||
![]() |
3116f183f5 | ||
![]() |
16b71a22d1 | ||
![]() |
5f4581042c | ||
![]() |
6976a4cf2e | ||
![]() |
68d86b3b7b | ||
![]() |
d7d34d36c8 | ||
![]() |
68da328cc5 | ||
![]() |
78870186d7 | ||
![]() |
d634273b48 | ||
![]() |
2d970eee02 | ||
![]() |
1f0ea3c6f7 | ||
![]() |
d736913f7f | ||
![]() |
3e95a9d282 | ||
![]() |
7cd7259992 | ||
![]() |
87385cf28e | ||
![]() |
3a00c94325 | ||
![]() |
38d5d2307f | ||
![]() |
a0c12e7228 | ||
![]() |
b6625ad909 | ||
![]() |
6f01341055 | ||
![]() |
6762a4153a | ||
![]() |
31200df89f | ||
![]() |
18e422ca77 | ||
![]() |
1b362716e3 | ||
![]() |
1e49129197 | ||
![]() |
a8f818fca5 | ||
![]() |
0f600da096 | ||
![]() |
b04efe4eac | ||
![]() |
7361d39231 | ||
![]() |
059c0df16c | ||
![]() |
6f6b849335 | ||
![]() |
a390500309 | ||
![]() |
7c576da32c | ||
![]() |
6d021c1659 | ||
![]() |
37c1c89d44 | ||
![]() |
010043f116 | ||
![]() |
b1010c3c61 | ||
![]() |
7f0204bfc3 | ||
![]() |
a508cc5efd | ||
![]() |
65c90696d5 | ||
![]() |
b9f47898d6 | ||
![]() |
26f554e46a | ||
![]() |
b57889c84f | ||
![]() |
77fd1b4017 | ||
![]() |
ab6745bc99 | ||
![]() |
a5ea3cae72 | ||
![]() |
8bcd1b4efd | ||
![]() |
a24657e565 | ||
![]() |
b7721420fa | ||
![]() |
6c564fe4fd | ||
![]() |
012bfd7e6c | ||
![]() |
a70f81aa01 | ||
![]() |
1376a38de5 | ||
![]() |
1827ecda65 | ||
![]() |
994c981228 | ||
![]() |
5bbfbf44ae | ||
![]() |
ace58ba735 | ||
![]() |
f9840306a0 | ||
![]() |
322b3bbb4e | ||
![]() |
501318f468 | ||
![]() |
0234f38b23 | ||
![]() |
8743e0072f | ||
![]() |
a79e06afa7 | ||
![]() |
682b8e0535 | ||
![]() |
d70aa5f9a9 | ||
![]() |
1c815dcad1 | ||
![]() |
afa467a32b | ||
![]() |
274218d48e | ||
![]() |
7e73df26ab | ||
![]() |
ef8fc80c95 | ||
![]() |
05c39144e3 | ||
![]() |
f5cd35af47 | ||
![]() |
c69ecdafd0 | ||
![]() |
fa90c247ec | ||
![]() |
0cd7bd47bb | ||
![]() |
36d48d19fc | ||
![]() |
9322b68d47 | ||
![]() |
e11ff64b15 | ||
![]() |
3776dabfcf | ||
![]() |
d4e5831f0f | ||
![]() |
7b3b478e88 | ||
![]() |
f5afe13e91 | ||
![]() |
49ce468d83 | ||
![]() |
b26551c812 | ||
![]() |
394ba580d2 | ||
![]() |
2f7a54f5fd | ||
![]() |
360e085926 | ||
![]() |
042921925d | ||
![]() |
dcf024387b | ||
![]() |
e1232bc9e7 | ||
![]() |
d96598b5dd | ||
![]() |
2605f85668 | ||
![]() |
2c8e6ca0cd | ||
![]() |
0225f574be | ||
![]() |
34090bf2eb | ||
![]() |
5ae585ce13 | ||
![]() |
2bb10a32d7 | ||
![]() |
435743dd2c | ||
![]() |
98589fba6d | ||
![]() |
32da679e02 | ||
![]() |
44daffc65b | ||
![]() |
0aafda1477 | ||
![]() |
60604e33b9 | ||
![]() |
98268b377a | ||
![]() |
de54979471 | ||
![]() |
ee6e339587 | ||
![]() |
c16cf89318 | ||
![]() |
c66cb7423e | ||
![]() |
f5bd95a519 | ||
![]() |
500f9ec1c1 | ||
![]() |
a4713d4a1e | ||
![]() |
04452dfb1a | ||
![]() |
69d09851d9 | ||
![]() |
1b649fe5cd | ||
![]() |
38572a5a86 | ||
![]() |
f5f51169e6 | ||
![]() |
07c2178ae1 | ||
![]() |
f30d21361f | ||
![]() |
6adb4fbcf7 | ||
![]() |
d73962bd7d | ||
![]() |
f4b43739da | ||
![]() |
4838b280ad | ||
![]() |
f93b753c03 | ||
![]() |
de06361cb0 | ||
![]() |
15ce48c8aa | ||
![]() |
38758d05a8 | ||
![]() |
a79fa14ee7 | ||
![]() |
1eb95b4d33 | ||
![]() |
d04e47f5b3 | ||
![]() |
dad5118f21 | ||
![]() |
acc0e5c989 | ||
![]() |
204fcdf479 | ||
![]() |
93ba8a3574 | ||
![]() |
f2f9e3b514 | ||
![]() |
61288559b3 | ||
![]() |
bd2c99a455 | ||
![]() |
1937348b24 | ||
![]() |
b7b2fae325 | ||
![]() |
11115923b2 | ||
![]() |
295133d2e9 | ||
![]() |
3018b851c8 | ||
![]() |
222c3fd485 | ||
![]() |
9650fd2ba1 | ||
![]() |
c88fd9a7d9 | ||
![]() |
1611beccd1 | ||
![]() |
71077fb0f7 | ||
![]() |
9647fba98f | ||
![]() |
86f004e45a | ||
![]() |
a98334ede8 | ||
![]() |
e19c2d6805 | ||
![]() |
847736dab8 | ||
![]() |
45f930ab21 | ||
![]() |
6ea54f1ddb | ||
![]() |
81ce0a60f6 | ||
![]() |
bf5d839c22 | ||
![]() |
fc385cfac0 | ||
![]() |
12d55b8411 | ||
![]() |
e60af93e2b | ||
![]() |
1691f0eac7 | ||
![]() |
be4a6a1564 | ||
![]() |
24c5613a50 | ||
![]() |
5266927bf7 | ||
![]() |
4bd2000174 | ||
![]() |
b8178414a4 | ||
![]() |
f9bc2f5993 | ||
![]() |
f1a72ee418 | ||
![]() |
b19dcef5b7 | ||
![]() |
1f92ab42ca | ||
![]() |
1f940a04fd | ||
![]() |
f771eaab5f | ||
![]() |
d1379a8154 | ||
![]() |
e488f02557 | ||
![]() |
f11cc86254 | ||
![]() |
175667bfe8 | ||
![]() |
0a0f14ddea | ||
![]() |
9e08677ade | ||
![]() |
abbf8b9b65 | ||
![]() |
96d5fc244e | ||
![]() |
3b38047fd4 |
@@ -7,34 +7,32 @@
|
||||
"appPort": ["9123:8123", "7357:4357"],
|
||||
"postCreateCommand": "bash devcontainer_bootstrap",
|
||||
"runArgs": ["-e", "GIT_EDITOR=code --wait", "--privileged"],
|
||||
"extensions": [
|
||||
"ms-python.python",
|
||||
"ms-python.vscode-pylance",
|
||||
"visualstudioexptteam.vscodeintellicode",
|
||||
"esbenp.prettier-vscode"
|
||||
],
|
||||
"mounts": ["type=volume,target=/var/lib/docker"],
|
||||
"settings": {
|
||||
"terminal.integrated.profiles.linux": {
|
||||
"zsh": {
|
||||
"path": "/usr/bin/zsh"
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"ms-python.python",
|
||||
"ms-python.pylint",
|
||||
"ms-python.vscode-pylance",
|
||||
"visualstudioexptteam.vscodeintellicode",
|
||||
"esbenp.prettier-vscode"
|
||||
],
|
||||
"settings": {
|
||||
"terminal.integrated.profiles.linux": {
|
||||
"zsh": {
|
||||
"path": "/usr/bin/zsh"
|
||||
}
|
||||
},
|
||||
"terminal.integrated.defaultProfile.linux": "zsh",
|
||||
"editor.formatOnPaste": false,
|
||||
"editor.formatOnSave": true,
|
||||
"editor.formatOnType": true,
|
||||
"files.trimTrailingWhitespace": true,
|
||||
"python.pythonPath": "/usr/local/bin/python3",
|
||||
"python.formatting.provider": "black",
|
||||
"python.formatting.blackArgs": ["--target-version", "py312"],
|
||||
"python.formatting.blackPath": "/usr/local/bin/black"
|
||||
}
|
||||
},
|
||||
"terminal.integrated.defaultProfile.linux": "zsh",
|
||||
"editor.formatOnPaste": false,
|
||||
"editor.formatOnSave": true,
|
||||
"editor.formatOnType": true,
|
||||
"files.trimTrailingWhitespace": true,
|
||||
"python.pythonPath": "/usr/local/bin/python3",
|
||||
"python.linting.pylintEnabled": true,
|
||||
"python.linting.enabled": true,
|
||||
"python.formatting.provider": "black",
|
||||
"python.formatting.blackArgs": ["--target-version", "py310"],
|
||||
"python.formatting.blackPath": "/usr/local/bin/black",
|
||||
"python.linting.banditPath": "/usr/local/bin/bandit",
|
||||
"python.linting.flake8Path": "/usr/local/bin/flake8",
|
||||
"python.linting.mypyPath": "/usr/local/bin/mypy",
|
||||
"python.linting.pylintPath": "/usr/local/bin/pylint",
|
||||
"python.linting.pydocstylePath": "/usr/local/bin/pydocstyle"
|
||||
}
|
||||
}
|
||||
},
|
||||
"mounts": ["type=volume,target=/var/lib/docker"]
|
||||
}
|
||||
|
36
.github/workflows/builder.yml
vendored
36
.github/workflows/builder.yml
vendored
@@ -33,7 +33,7 @@ on:
|
||||
- setup.py
|
||||
|
||||
env:
|
||||
DEFAULT_PYTHON: "3.11"
|
||||
DEFAULT_PYTHON: "3.12"
|
||||
BUILD_NAME: supervisor
|
||||
BUILD_TYPE: supervisor
|
||||
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
requirements: ${{ steps.requirements.outputs.changed }}
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -70,13 +70,13 @@ jobs:
|
||||
- name: Get changed files
|
||||
id: changed_files
|
||||
if: steps.version.outputs.publish == 'false'
|
||||
uses: jitterbit/get-changed-files@v1
|
||||
uses: masesgroup/retrieve-changed-files@v3.0.0
|
||||
|
||||
- name: Check if requirements files changed
|
||||
id: requirements
|
||||
run: |
|
||||
if [[ "${{ steps.changed_files.outputs.all }}" =~ (requirements.txt|build.json) ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
if [[ "${{ steps.changed_files.outputs.all }}" =~ (requirements.txt|build.yaml) ]]; then
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
build:
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
arch: ${{ fromJson(needs.init.outputs.architectures) }}
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -106,13 +106,13 @@ jobs:
|
||||
|
||||
- name: Build wheels
|
||||
if: needs.init.outputs.requirements == 'true'
|
||||
uses: home-assistant/wheels@2023.04.0
|
||||
uses: home-assistant/wheels@2024.01.0
|
||||
with:
|
||||
abi: cp311
|
||||
abi: cp312
|
||||
tag: musllinux_1_2
|
||||
arch: ${{ matrix.arch }}
|
||||
wheels-key: ${{ secrets.WHEELS_KEY }}
|
||||
apk: "libffi-dev;openssl-dev"
|
||||
apk: "libffi-dev;openssl-dev;yaml-dev"
|
||||
skip-binary: aiohttp
|
||||
env-file: true
|
||||
requirements: "requirements.txt"
|
||||
@@ -125,20 +125,20 @@ jobs:
|
||||
|
||||
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: actions/setup-python@v4.6.1
|
||||
uses: actions/setup-python@v5.0.0
|
||||
with:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
|
||||
- name: Install Cosign
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: sigstore/cosign-installer@v3.1.1
|
||||
uses: sigstore/cosign-installer@v3.3.0
|
||||
with:
|
||||
cosign-release: "v2.0.2"
|
||||
|
||||
- name: Install dirhash and calc hash
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
run: |
|
||||
pip3 install dirhash
|
||||
pip3 install setuptools dirhash
|
||||
dir_hash="$(dirhash "${{ github.workspace }}/supervisor" -a sha256 --match "*.py")"
|
||||
echo "${dir_hash}" > rootfs/supervisor.sha256
|
||||
|
||||
@@ -149,7 +149,7 @@ jobs:
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: docker/login-action@v2.2.0
|
||||
uses: docker/login-action@v3.0.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -160,7 +160,7 @@ jobs:
|
||||
run: echo "BUILD_ARGS=--test" >> $GITHUB_ENV
|
||||
|
||||
- name: Build supervisor
|
||||
uses: home-assistant/builder@2023.06.1
|
||||
uses: home-assistant/builder@2024.01.0
|
||||
with:
|
||||
args: |
|
||||
$BUILD_ARGS \
|
||||
@@ -178,7 +178,7 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Initialize git
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
@@ -203,11 +203,11 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Build the Supervisor
|
||||
if: needs.init.outputs.publish != 'true'
|
||||
uses: home-assistant/builder@2023.06.1
|
||||
uses: home-assistant/builder@2024.01.0
|
||||
with:
|
||||
args: |
|
||||
--test \
|
||||
@@ -324,7 +324,7 @@ jobs:
|
||||
if [ "$(echo $test | jq -r '.result')" != "ok" ]; then
|
||||
exit 1
|
||||
fi
|
||||
echo "::set-output name=slug::$(echo $test | jq -r '.data.slug')"
|
||||
echo "slug=$(echo $test | jq -r '.data.slug')" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Uninstall SSH add-on
|
||||
run: |
|
||||
|
95
.github/workflows/ci.yaml
vendored
95
.github/workflows/ci.yaml
vendored
@@ -8,8 +8,8 @@ on:
|
||||
pull_request: ~
|
||||
|
||||
env:
|
||||
DEFAULT_PYTHON: "3.11"
|
||||
PRE_COMMIT_HOME: ~/.cache/pre-commit
|
||||
DEFAULT_PYTHON: "3.12"
|
||||
PRE_COMMIT_CACHE: ~/.cache/pre-commit
|
||||
|
||||
concurrency:
|
||||
group: "${{ github.workflow }}-${{ github.ref }}"
|
||||
@@ -25,15 +25,15 @@ jobs:
|
||||
name: Prepare Python dependencies
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python
|
||||
id: python
|
||||
uses: actions/setup-python@v4.6.1
|
||||
uses: actions/setup-python@v5.0.0
|
||||
with:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -47,9 +47,10 @@ jobs:
|
||||
pip install -r requirements.txt -r requirements_tests.txt
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_HOME }}
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
lookup-only: true
|
||||
key: |
|
||||
${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
restore-keys: |
|
||||
@@ -66,15 +67,15 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.6.1
|
||||
uses: actions/setup-python@v5.0.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -87,7 +88,7 @@ jobs:
|
||||
- name: Run black
|
||||
run: |
|
||||
. venv/bin/activate
|
||||
black --target-version py38 --check supervisor tests setup.py
|
||||
black --target-version py312 --check supervisor tests setup.py
|
||||
|
||||
lint-dockerfile:
|
||||
name: Check Dockerfile
|
||||
@@ -95,7 +96,7 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Register hadolint problem matcher
|
||||
run: |
|
||||
echo "::add-matcher::.github/workflows/matchers/hadolint.json"
|
||||
@@ -110,15 +111,15 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.6.1
|
||||
uses: actions/setup-python@v5.0.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -130,9 +131,9 @@ jobs:
|
||||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_HOME }}
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
- name: Fail job if cache restore failed
|
||||
@@ -154,15 +155,15 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.6.1
|
||||
uses: actions/setup-python@v5.0.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -186,15 +187,15 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.6.1
|
||||
uses: actions/setup-python@v5.0.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -206,9 +207,9 @@ jobs:
|
||||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_HOME }}
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
- name: Fail job if cache restore failed
|
||||
@@ -227,15 +228,15 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.6.1
|
||||
uses: actions/setup-python@v5.0.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -247,9 +248,9 @@ jobs:
|
||||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_HOME }}
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
- name: Fail job if cache restore failed
|
||||
@@ -271,15 +272,15 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.6.1
|
||||
uses: actions/setup-python@v5.0.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -303,15 +304,15 @@ jobs:
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.6.1
|
||||
uses: actions/setup-python@v5.0.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -323,9 +324,9 @@ jobs:
|
||||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_HOME }}
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
- name: Fail job if cache restore failed
|
||||
@@ -344,19 +345,19 @@ jobs:
|
||||
name: Run tests Python ${{ needs.prepare.outputs.python-version }}
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.6.1
|
||||
uses: actions/setup-python@v5.0.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.1.1
|
||||
uses: sigstore/cosign-installer@v3.3.0
|
||||
with:
|
||||
cosign-release: "v2.0.2"
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -391,7 +392,7 @@ jobs:
|
||||
-o console_output_style=count \
|
||||
tests
|
||||
- name: Upload coverage artifact
|
||||
uses: actions/upload-artifact@v3.1.2
|
||||
uses: actions/upload-artifact@v4.0.0
|
||||
with:
|
||||
name: coverage-${{ matrix.python-version }}
|
||||
path: .coverage
|
||||
@@ -402,15 +403,15 @@ jobs:
|
||||
needs: ["pytest", "prepare"]
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v4.6.1
|
||||
uses: actions/setup-python@v5.0.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v3.3.1
|
||||
uses: actions/cache@v3.3.3
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -421,7 +422,7 @@ jobs:
|
||||
echo "Failed to restore Python virtual environment from cache"
|
||||
exit 1
|
||||
- name: Download all coverage artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4.1.1
|
||||
- name: Combine coverage results
|
||||
run: |
|
||||
. venv/bin/activate
|
||||
|
2
.github/workflows/lock.yml
vendored
2
.github/workflows/lock.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
lock:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@v4.0.1
|
||||
- uses: dessant/lock-threads@v5.0.1
|
||||
with:
|
||||
github-token: ${{ github.token }}
|
||||
issue-inactive-days: "30"
|
||||
|
6
.github/workflows/release-drafter.yml
vendored
6
.github/workflows/release-drafter.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Release Drafter
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -33,10 +33,10 @@ jobs:
|
||||
|
||||
echo Current version: $latest
|
||||
echo New target version: $datepre.$newpost
|
||||
echo "::set-output name=version::$datepre.$newpost"
|
||||
echo "version=$datepre.$newpost" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Run Release Drafter
|
||||
uses: release-drafter/release-drafter@v5.24.0
|
||||
uses: release-drafter/release-drafter@v5.25.0
|
||||
with:
|
||||
tag: ${{ steps.version.outputs.version }}
|
||||
name: ${{ steps.version.outputs.version }}
|
||||
|
4
.github/workflows/sentry.yaml
vendored
4
.github/workflows/sentry.yaml
vendored
@@ -10,9 +10,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v3.5.3
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Sentry Release
|
||||
uses: getsentry/action-release@v1.4.1
|
||||
uses: getsentry/action-release@v1.6.0
|
||||
env:
|
||||
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
|
||||
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}
|
||||
|
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v8.0.0
|
||||
- uses: actions/stale@v9.0.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
days-before-stale: 30
|
||||
|
@@ -3,4 +3,5 @@ ignored:
|
||||
- DL3006
|
||||
- DL3013
|
||||
- DL3018
|
||||
- DL3042
|
||||
- SC2155
|
||||
|
@@ -1,16 +1,16 @@
|
||||
repos:
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.1.0
|
||||
rev: 23.12.1
|
||||
hooks:
|
||||
- id: black
|
||||
args:
|
||||
- --safe
|
||||
- --quiet
|
||||
- --target-version
|
||||
- py310
|
||||
- py312
|
||||
files: ^((supervisor|tests)/.+)?[^/]+\.py$
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 6.0.0
|
||||
rev: 7.0.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies:
|
||||
@@ -18,17 +18,17 @@ repos:
|
||||
- pydocstyle==6.3.0
|
||||
files: ^(supervisor|script|tests)/.+\.py$
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.3.0
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: check-executables-have-shebangs
|
||||
stages: [manual]
|
||||
- id: check-json
|
||||
- repo: https://github.com/PyCQA/isort
|
||||
rev: 5.12.0
|
||||
rev: 5.13.2
|
||||
hooks:
|
||||
- id: isort
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.4.0
|
||||
rev: v3.15.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py310-plus]
|
||||
args: [--py312-plus]
|
||||
|
@@ -15,6 +15,7 @@ WORKDIR /usr/src
|
||||
RUN \
|
||||
set -x \
|
||||
&& apk add --no-cache \
|
||||
findutils \
|
||||
eudev \
|
||||
eudev-libs \
|
||||
git \
|
||||
@@ -22,6 +23,7 @@ RUN \
|
||||
libpulse \
|
||||
musl \
|
||||
openssl \
|
||||
yaml \
|
||||
\
|
||||
&& curl -Lso /usr/bin/cosign "https://github.com/home-assistant/cosign/releases/download/${COSIGN_VERSION}/cosign_${BUILD_ARCH}" \
|
||||
&& chmod a+x /usr/bin/cosign
|
||||
@@ -30,15 +32,14 @@ RUN \
|
||||
COPY requirements.txt .
|
||||
RUN \
|
||||
export MAKEFLAGS="-j$(nproc)" \
|
||||
&& pip3 install --no-cache-dir --no-index --only-binary=:all: --find-links \
|
||||
"https://wheels.home-assistant.io/musllinux/" \
|
||||
&& pip3 install --only-binary=:all: \
|
||||
-r ./requirements.txt \
|
||||
&& rm -f requirements.txt
|
||||
|
||||
# Install Home Assistant Supervisor
|
||||
COPY . supervisor
|
||||
RUN \
|
||||
pip3 install --no-cache-dir -e ./supervisor \
|
||||
pip3 install -e ./supervisor \
|
||||
&& python3 -m compileall ./supervisor/supervisor
|
||||
|
||||
|
||||
|
10
build.yaml
10
build.yaml
@@ -1,10 +1,10 @@
|
||||
image: ghcr.io/home-assistant/{arch}-hassio-supervisor
|
||||
build_from:
|
||||
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.11-alpine3.16
|
||||
armhf: ghcr.io/home-assistant/armhf-base-python:3.11-alpine3.16
|
||||
armv7: ghcr.io/home-assistant/armv7-base-python:3.11-alpine3.16
|
||||
amd64: ghcr.io/home-assistant/amd64-base-python:3.11-alpine3.16
|
||||
i386: ghcr.io/home-assistant/i386-base-python:3.11-alpine3.16
|
||||
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.12-alpine3.18
|
||||
armhf: ghcr.io/home-assistant/armhf-base-python:3.12-alpine3.18
|
||||
armv7: ghcr.io/home-assistant/armv7-base-python:3.12-alpine3.18
|
||||
amd64: ghcr.io/home-assistant/amd64-base-python:3.12-alpine3.18
|
||||
i386: ghcr.io/home-assistant/i386-base-python:3.12-alpine3.18
|
||||
codenotary:
|
||||
signer: notary@home-assistant.io
|
||||
base_image: notary@home-assistant.io
|
||||
|
45
pylintrc
45
pylintrc
@@ -1,45 +0,0 @@
|
||||
[MASTER]
|
||||
reports=no
|
||||
jobs=2
|
||||
|
||||
good-names=id,i,j,k,ex,Run,_,fp,T,os
|
||||
|
||||
extension-pkg-whitelist=
|
||||
ciso8601
|
||||
|
||||
# Reasons disabled:
|
||||
# format - handled by black
|
||||
# locally-disabled - it spams too much
|
||||
# duplicate-code - unavoidable
|
||||
# cyclic-import - doesn't test if both import on load
|
||||
# abstract-class-not-used - is flaky, should not show up but does
|
||||
# unused-argument - generic callbacks and setup methods create a lot of warnings
|
||||
# too-many-* - are not enforced for the sake of readability
|
||||
# too-few-* - same as too-many-*
|
||||
# abstract-method - with intro of async there are always methods missing
|
||||
disable=
|
||||
format,
|
||||
abstract-method,
|
||||
cyclic-import,
|
||||
duplicate-code,
|
||||
locally-disabled,
|
||||
no-else-return,
|
||||
not-context-manager,
|
||||
too-few-public-methods,
|
||||
too-many-arguments,
|
||||
too-many-branches,
|
||||
too-many-instance-attributes,
|
||||
too-many-lines,
|
||||
too-many-locals,
|
||||
too-many-public-methods,
|
||||
too-many-return-statements,
|
||||
too-many-statements,
|
||||
unused-argument,
|
||||
consider-using-with
|
||||
|
||||
[EXCEPTIONS]
|
||||
overgeneral-exceptions=builtins.Exception
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
ignored-modules = distutils
|
112
pyproject.toml
Normal file
112
pyproject.toml
Normal file
@@ -0,0 +1,112 @@
|
||||
[build-system]
|
||||
requires = ["setuptools~=68.0.0", "wheel~=0.40.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "Supervisor"
|
||||
dynamic = ["version", "dependencies"]
|
||||
license = { text = "Apache-2.0" }
|
||||
description = "Open-source private cloud os for Home-Assistant based on HassOS"
|
||||
readme = "README.md"
|
||||
authors = [
|
||||
{ name = "The Home Assistant Authors", email = "hello@home-assistant.io" },
|
||||
]
|
||||
keywords = ["docker", "home-assistant", "api"]
|
||||
requires-python = ">=3.12.0"
|
||||
|
||||
[project.urls]
|
||||
"Homepage" = "https://www.home-assistant.io/"
|
||||
"Source Code" = "https://github.com/home-assistant/supervisor"
|
||||
"Bug Reports" = "https://github.com/home-assistant/supervisor/issues"
|
||||
"Docs: Dev" = "https://developers.home-assistant.io/"
|
||||
"Discord" = "https://www.home-assistant.io/join-chat/"
|
||||
"Forum" = "https://community.home-assistant.io/"
|
||||
|
||||
[tool.setuptools]
|
||||
platforms = ["any"]
|
||||
zip-safe = false
|
||||
include-package-data = true
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
include = ["supervisor*"]
|
||||
|
||||
[tool.pylint.MAIN]
|
||||
py-version = "3.11"
|
||||
# Use a conservative default here; 2 should speed up most setups and not hurt
|
||||
# any too bad. Override on command line as appropriate.
|
||||
jobs = 2
|
||||
persistent = false
|
||||
extension-pkg-allow-list = ["ciso8601"]
|
||||
|
||||
[tool.pylint.BASIC]
|
||||
class-const-naming-style = "any"
|
||||
good-names = ["id", "i", "j", "k", "ex", "Run", "_", "fp", "T", "os"]
|
||||
|
||||
[tool.pylint."MESSAGES CONTROL"]
|
||||
# Reasons disabled:
|
||||
# format - handled by black
|
||||
# abstract-method - with intro of async there are always methods missing
|
||||
# cyclic-import - doesn't test if both import on load
|
||||
# duplicate-code - unavoidable
|
||||
# locally-disabled - it spams too much
|
||||
# too-many-* - are not enforced for the sake of readability
|
||||
# too-few-* - same as too-many-*
|
||||
# unused-argument - generic callbacks and setup methods create a lot of warnings
|
||||
disable = [
|
||||
"format",
|
||||
"abstract-method",
|
||||
"cyclic-import",
|
||||
"duplicate-code",
|
||||
"locally-disabled",
|
||||
"no-else-return",
|
||||
"not-context-manager",
|
||||
"too-few-public-methods",
|
||||
"too-many-arguments",
|
||||
"too-many-branches",
|
||||
"too-many-instance-attributes",
|
||||
"too-many-lines",
|
||||
"too-many-locals",
|
||||
"too-many-public-methods",
|
||||
"too-many-return-statements",
|
||||
"too-many-statements",
|
||||
"unused-argument",
|
||||
"consider-using-with",
|
||||
]
|
||||
|
||||
[tool.pylint.REPORTS]
|
||||
score = false
|
||||
|
||||
[tool.pylint.TYPECHECK]
|
||||
ignored-modules = ["distutils"]
|
||||
|
||||
[tool.pylint.FORMAT]
|
||||
expected-line-ending-format = "LF"
|
||||
|
||||
[tool.pylint.EXCEPTIONS]
|
||||
overgeneral-exceptions = ["builtins.BaseException", "builtins.Exception"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests"]
|
||||
norecursedirs = [".git"]
|
||||
log_format = "%(asctime)s.%(msecs)03d %(levelname)-8s %(threadName)s %(name)s:%(filename)s:%(lineno)s %(message)s"
|
||||
log_date_format = "%Y-%m-%d %H:%M:%S"
|
||||
asyncio_mode = "auto"
|
||||
filterwarnings = [
|
||||
"error",
|
||||
"ignore:pkg_resources is deprecated as an API:DeprecationWarning:dirhash",
|
||||
"ignore::pytest.PytestUnraisableExceptionWarning",
|
||||
]
|
||||
|
||||
[tool.isort]
|
||||
multi_line_output = 3
|
||||
include_trailing_comma = true
|
||||
force_grid_wrap = 0
|
||||
line_length = 88
|
||||
indent = " "
|
||||
force_sort_within_sections = true
|
||||
sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"]
|
||||
default_section = "THIRDPARTY"
|
||||
forced_separate = "tests"
|
||||
combine_as_imports = true
|
||||
use_parentheses = true
|
||||
known_first_party = ["supervisor", "tests"]
|
@@ -1,2 +0,0 @@
|
||||
[pytest]
|
||||
asyncio_mode = auto
|
@@ -1,26 +1,30 @@
|
||||
aiodns==3.0.0
|
||||
aiohttp==3.8.4
|
||||
async_timeout==4.0.2
|
||||
aiodns==3.1.1
|
||||
aiohttp==3.9.1
|
||||
aiohttp-fast-url-dispatcher==0.3.0
|
||||
async_timeout==4.0.3
|
||||
atomicwrites-homeassistant==1.4.1
|
||||
attrs==23.1.0
|
||||
awesomeversion==23.5.0
|
||||
brotli==1.0.9
|
||||
ciso8601==2.3.0
|
||||
colorlog==6.7.0
|
||||
attrs==23.2.0
|
||||
awesomeversion==23.11.0
|
||||
brotli==1.1.0
|
||||
ciso8601==2.3.1
|
||||
colorlog==6.8.0
|
||||
cpe==1.2.1
|
||||
cryptography==41.0.1
|
||||
debugpy==1.6.7
|
||||
deepmerge==1.1.0
|
||||
cryptography==41.0.7
|
||||
debugpy==1.8.0
|
||||
deepmerge==1.1.1
|
||||
dirhash==0.2.1
|
||||
docker==6.1.3
|
||||
faust-cchardet==2.1.18
|
||||
gitpython==3.1.31
|
||||
jinja2==3.1.2
|
||||
docker==7.0.0
|
||||
faust-cchardet==2.1.19
|
||||
gitpython==3.1.41
|
||||
jinja2==3.1.3
|
||||
orjson==3.9.10
|
||||
pulsectl==23.5.2
|
||||
pyudev==0.24.1
|
||||
ruamel.yaml==0.17.21
|
||||
securetar==2023.3.0
|
||||
sentry-sdk==1.27.0
|
||||
voluptuous==0.13.1
|
||||
dbus-fast==1.86.0
|
||||
typing_extensions==4.7.1
|
||||
PyYAML==6.0.1
|
||||
securetar==2023.12.0
|
||||
sentry-sdk==1.39.2
|
||||
setuptools==69.0.3
|
||||
voluptuous==0.14.1
|
||||
dbus-fast==2.21.0
|
||||
typing_extensions==4.9.0
|
||||
zlib-fast==0.1.0
|
||||
|
@@ -1,16 +1,16 @@
|
||||
black==23.3.0
|
||||
coverage==7.2.7
|
||||
black==23.12.1
|
||||
coverage==7.4.0
|
||||
flake8-docstrings==1.7.0
|
||||
flake8==6.0.0
|
||||
pre-commit==3.3.3
|
||||
flake8==7.0.0
|
||||
pre-commit==3.6.0
|
||||
pydocstyle==6.3.0
|
||||
pylint==2.17.4
|
||||
pytest-aiohttp==1.0.4
|
||||
pytest-asyncio==0.18.3
|
||||
pylint==3.0.3
|
||||
pytest-aiohttp==1.0.5
|
||||
pytest-asyncio==0.23.3
|
||||
pytest-cov==4.1.0
|
||||
pytest-timeout==2.1.0
|
||||
pytest==7.4.0
|
||||
pyupgrade==3.7.0
|
||||
time-machine==2.10.0
|
||||
typing_extensions==4.7.1
|
||||
urllib3==2.0.3
|
||||
pytest-timeout==2.2.0
|
||||
pytest==7.4.4
|
||||
pyupgrade==3.15.0
|
||||
time-machine==2.13.0
|
||||
typing_extensions==4.9.0
|
||||
urllib3==2.1.0
|
||||
|
@@ -15,7 +15,7 @@ do
|
||||
if [[ "${supervisor_state}" = "running" ]]; then
|
||||
|
||||
# Check API
|
||||
if bashio::supervisor.ping; then
|
||||
if bashio::supervisor.ping > /dev/null; then
|
||||
failed_count=0
|
||||
else
|
||||
bashio::log.warning "Maybe found an issue on API healthy"
|
||||
|
14
setup.cfg
14
setup.cfg
@@ -1,17 +1,3 @@
|
||||
[isort]
|
||||
multi_line_output = 3
|
||||
include_trailing_comma=True
|
||||
force_grid_wrap=0
|
||||
line_length=88
|
||||
indent = " "
|
||||
force_sort_within_sections = true
|
||||
sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
|
||||
default_section = THIRDPARTY
|
||||
forced_separate = tests
|
||||
combine_as_imports = true
|
||||
use_parentheses = true
|
||||
known_first_party = supervisor,tests
|
||||
|
||||
[flake8]
|
||||
exclude = .venv,.git,.tox,docs,venv,bin,lib,deps,build
|
||||
doctests = True
|
||||
|
75
setup.py
75
setup.py
@@ -1,60 +1,27 @@
|
||||
"""Home Assistant Supervisor setup."""
|
||||
from pathlib import Path
|
||||
import re
|
||||
|
||||
from setuptools import setup
|
||||
|
||||
from supervisor.const import SUPERVISOR_VERSION
|
||||
RE_SUPERVISOR_VERSION = re.compile(r"^SUPERVISOR_VERSION =\s*(.+)$")
|
||||
|
||||
SUPERVISOR_DIR = Path(__file__).parent
|
||||
REQUIREMENTS_FILE = SUPERVISOR_DIR / "requirements.txt"
|
||||
CONST_FILE = SUPERVISOR_DIR / "supervisor/const.py"
|
||||
|
||||
REQUIREMENTS = REQUIREMENTS_FILE.read_text(encoding="utf-8")
|
||||
CONSTANTS = CONST_FILE.read_text(encoding="utf-8")
|
||||
|
||||
|
||||
def _get_supervisor_version():
|
||||
for line in CONSTANTS.split("/n"):
|
||||
if match := RE_SUPERVISOR_VERSION.match(line):
|
||||
return match.group(1)
|
||||
return "99.9.9dev"
|
||||
|
||||
|
||||
setup(
|
||||
name="Supervisor",
|
||||
version=SUPERVISOR_VERSION,
|
||||
license="BSD License",
|
||||
author="The Home Assistant Authors",
|
||||
author_email="hello@home-assistant.io",
|
||||
url="https://home-assistant.io/",
|
||||
description=("Open-source private cloud os for Home-Assistant" " based on HassOS"),
|
||||
long_description=(
|
||||
"A maintainless private cloud operator system that"
|
||||
"setup a Home-Assistant instance. Based on HassOS"
|
||||
),
|
||||
classifiers=[
|
||||
"Intended Audience :: End Users/Desktop",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"Operating System :: OS Independent",
|
||||
"Topic :: Home Automation",
|
||||
"Topic :: Software Development :: Libraries :: Python Modules",
|
||||
"Topic :: Scientific/Engineering :: Atmospheric Science",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Intended Audience :: Developers",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
],
|
||||
keywords=["docker", "home-assistant", "api"],
|
||||
zip_safe=False,
|
||||
platforms="any",
|
||||
packages=[
|
||||
"supervisor.addons",
|
||||
"supervisor.api",
|
||||
"supervisor.backups",
|
||||
"supervisor.dbus.network",
|
||||
"supervisor.dbus.network.setting",
|
||||
"supervisor.dbus",
|
||||
"supervisor.discovery.services",
|
||||
"supervisor.discovery",
|
||||
"supervisor.docker",
|
||||
"supervisor.homeassistant",
|
||||
"supervisor.host",
|
||||
"supervisor.jobs",
|
||||
"supervisor.misc",
|
||||
"supervisor.plugins",
|
||||
"supervisor.resolution.checks",
|
||||
"supervisor.resolution.evaluations",
|
||||
"supervisor.resolution.fixups",
|
||||
"supervisor.resolution",
|
||||
"supervisor.security",
|
||||
"supervisor.services.modules",
|
||||
"supervisor.services",
|
||||
"supervisor.store",
|
||||
"supervisor.utils",
|
||||
"supervisor",
|
||||
],
|
||||
include_package_data=True,
|
||||
version=_get_supervisor_version(),
|
||||
dependencies=REQUIREMENTS.split("/n"),
|
||||
)
|
||||
|
@@ -5,7 +5,13 @@ import logging
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
from supervisor import bootstrap
|
||||
import zlib_fast
|
||||
|
||||
# Enable fast zlib before importing supervisor
|
||||
zlib_fast.enable()
|
||||
|
||||
from supervisor import bootstrap # noqa: E402
|
||||
from supervisor.utils.logging import activate_log_queue_handler # noqa: E402
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -38,6 +44,8 @@ if __name__ == "__main__":
|
||||
executor = ThreadPoolExecutor(thread_name_prefix="SyncWorker")
|
||||
loop.set_default_executor(executor)
|
||||
|
||||
activate_log_queue_handler()
|
||||
|
||||
_LOGGER.info("Initializing Supervisor setup")
|
||||
coresys = loop.run_until_complete(bootstrap.initialize_coresys())
|
||||
loop.set_debug(coresys.config.debug)
|
||||
|
@@ -1,457 +1 @@
|
||||
"""Init file for Supervisor add-ons."""
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
import tarfile
|
||||
from typing import Union
|
||||
|
||||
from ..const import AddonBoot, AddonStartup, AddonState
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import (
|
||||
AddonConfigurationError,
|
||||
AddonsError,
|
||||
AddonsJobError,
|
||||
AddonsNotSupportedError,
|
||||
CoreDNSError,
|
||||
DockerAPIError,
|
||||
DockerError,
|
||||
DockerNotFound,
|
||||
HomeAssistantAPIError,
|
||||
HostAppArmorError,
|
||||
)
|
||||
from ..jobs.decorator import Job, JobCondition
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..store.addon import AddonStore
|
||||
from ..utils import check_exception_chain
|
||||
from ..utils.sentry import capture_exception
|
||||
from .addon import Addon
|
||||
from .const import ADDON_UPDATE_CONDITIONS
|
||||
from .data import AddonsData
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
AnyAddon = Union[Addon, AddonStore]
|
||||
|
||||
|
||||
class AddonManager(CoreSysAttributes):
|
||||
"""Manage add-ons inside Supervisor."""
|
||||
|
||||
def __init__(self, coresys: CoreSys):
|
||||
"""Initialize Docker base wrapper."""
|
||||
self.coresys: CoreSys = coresys
|
||||
self.data: AddonsData = AddonsData(coresys)
|
||||
self.local: dict[str, Addon] = {}
|
||||
self.store: dict[str, AddonStore] = {}
|
||||
|
||||
@property
|
||||
def all(self) -> list[AnyAddon]:
|
||||
"""Return a list of all add-ons."""
|
||||
addons: dict[str, AnyAddon] = {**self.store, **self.local}
|
||||
return list(addons.values())
|
||||
|
||||
@property
|
||||
def installed(self) -> list[Addon]:
|
||||
"""Return a list of all installed add-ons."""
|
||||
return list(self.local.values())
|
||||
|
||||
def get(self, addon_slug: str, local_only: bool = False) -> AnyAddon | None:
|
||||
"""Return an add-on from slug.
|
||||
|
||||
Prio:
|
||||
1 - Local
|
||||
2 - Store
|
||||
"""
|
||||
if addon_slug in self.local:
|
||||
return self.local[addon_slug]
|
||||
if not local_only:
|
||||
return self.store.get(addon_slug)
|
||||
return None
|
||||
|
||||
def from_token(self, token: str) -> Addon | None:
|
||||
"""Return an add-on from Supervisor token."""
|
||||
for addon in self.installed:
|
||||
if token == addon.supervisor_token:
|
||||
return addon
|
||||
return None
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Start up add-on management."""
|
||||
tasks = []
|
||||
for slug in self.data.system:
|
||||
addon = self.local[slug] = Addon(self.coresys, slug)
|
||||
tasks.append(self.sys_create_task(addon.load()))
|
||||
|
||||
# Run initial tasks
|
||||
_LOGGER.info("Found %d installed add-ons", len(tasks))
|
||||
if tasks:
|
||||
await asyncio.wait(tasks)
|
||||
|
||||
# Sync DNS
|
||||
await self.sync_dns()
|
||||
|
||||
async def boot(self, stage: AddonStartup) -> None:
|
||||
"""Boot add-ons with mode auto."""
|
||||
tasks: list[Addon] = []
|
||||
for addon in self.installed:
|
||||
if addon.boot != AddonBoot.AUTO or addon.startup != stage:
|
||||
continue
|
||||
tasks.append(addon)
|
||||
|
||||
# Evaluate add-ons which need to be started
|
||||
_LOGGER.info("Phase '%s' starting %d add-ons", stage, len(tasks))
|
||||
if not tasks:
|
||||
return
|
||||
|
||||
# Start Add-ons sequential
|
||||
# avoid issue on slow IO
|
||||
# Config.wait_boot is deprecated. Until addons update with healthchecks,
|
||||
# add a sleep task for it to keep the same minimum amount of wait time
|
||||
wait_boot: list[Awaitable[None]] = [asyncio.sleep(self.sys_config.wait_boot)]
|
||||
for addon in tasks:
|
||||
try:
|
||||
if start_task := await addon.start():
|
||||
wait_boot.append(start_task)
|
||||
except AddonsError as err:
|
||||
# Check if there is an system/user issue
|
||||
if check_exception_chain(
|
||||
err, (DockerAPIError, DockerNotFound, AddonConfigurationError)
|
||||
):
|
||||
addon.boot = AddonBoot.MANUAL
|
||||
addon.save_persist()
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
capture_exception(err)
|
||||
else:
|
||||
continue
|
||||
|
||||
_LOGGER.warning("Can't start Add-on %s", addon.slug)
|
||||
|
||||
# Ignore exceptions from waiting for addon startup, addon errors handled elsewhere
|
||||
await asyncio.gather(*wait_boot, return_exceptions=True)
|
||||
|
||||
async def shutdown(self, stage: AddonStartup) -> None:
|
||||
"""Shutdown addons."""
|
||||
tasks: list[Addon] = []
|
||||
for addon in self.installed:
|
||||
if addon.state != AddonState.STARTED or addon.startup != stage:
|
||||
continue
|
||||
tasks.append(addon)
|
||||
|
||||
# Evaluate add-ons which need to be stopped
|
||||
_LOGGER.info("Phase '%s' stopping %d add-ons", stage, len(tasks))
|
||||
if not tasks:
|
||||
return
|
||||
|
||||
# Stop Add-ons sequential
|
||||
# avoid issue on slow IO
|
||||
for addon in tasks:
|
||||
try:
|
||||
await addon.stop()
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.warning("Can't stop Add-on %s: %s", addon.slug, err)
|
||||
capture_exception(err)
|
||||
|
||||
@Job(
|
||||
conditions=ADDON_UPDATE_CONDITIONS,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def install(self, slug: str) -> None:
|
||||
"""Install an add-on."""
|
||||
if slug in self.local:
|
||||
raise AddonsError(f"Add-on {slug} is already installed", _LOGGER.warning)
|
||||
store = self.store.get(slug)
|
||||
|
||||
if not store:
|
||||
raise AddonsError(f"Add-on {slug} does not exist", _LOGGER.error)
|
||||
|
||||
store.validate_availability()
|
||||
|
||||
self.data.install(store)
|
||||
addon = Addon(self.coresys, slug)
|
||||
await addon.load()
|
||||
|
||||
if not addon.path_data.is_dir():
|
||||
_LOGGER.info(
|
||||
"Creating Home Assistant add-on data folder %s", addon.path_data
|
||||
)
|
||||
addon.path_data.mkdir()
|
||||
|
||||
# Setup/Fix AppArmor profile
|
||||
await addon.install_apparmor()
|
||||
|
||||
try:
|
||||
await addon.instance.install(store.version, store.image, arch=addon.arch)
|
||||
except DockerError as err:
|
||||
self.data.uninstall(addon)
|
||||
raise AddonsError() from err
|
||||
|
||||
self.local[slug] = addon
|
||||
|
||||
# Reload ingress tokens
|
||||
if addon.with_ingress:
|
||||
await self.sys_ingress.reload()
|
||||
|
||||
_LOGGER.info("Add-on '%s' successfully installed", slug)
|
||||
|
||||
async def uninstall(self, slug: str) -> None:
|
||||
"""Remove an add-on."""
|
||||
if slug not in self.local:
|
||||
_LOGGER.warning("Add-on %s is not installed", slug)
|
||||
return
|
||||
addon = self.local[slug]
|
||||
|
||||
try:
|
||||
await addon.instance.remove()
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
addon.state = AddonState.UNKNOWN
|
||||
|
||||
await addon.unload()
|
||||
|
||||
# Cleanup audio settings
|
||||
if addon.path_pulse.exists():
|
||||
with suppress(OSError):
|
||||
addon.path_pulse.unlink()
|
||||
|
||||
# Cleanup AppArmor profile
|
||||
with suppress(HostAppArmorError):
|
||||
await addon.uninstall_apparmor()
|
||||
|
||||
# Cleanup Ingress panel from sidebar
|
||||
if addon.ingress_panel:
|
||||
addon.ingress_panel = False
|
||||
with suppress(HomeAssistantAPIError):
|
||||
await self.sys_ingress.update_hass_panel(addon)
|
||||
|
||||
# Cleanup Ingress dynamic port assignment
|
||||
if addon.with_ingress:
|
||||
self.sys_create_task(self.sys_ingress.reload())
|
||||
self.sys_ingress.del_dynamic_port(slug)
|
||||
|
||||
# Cleanup discovery data
|
||||
for message in self.sys_discovery.list_messages:
|
||||
if message.addon != addon.slug:
|
||||
continue
|
||||
self.sys_discovery.remove(message)
|
||||
|
||||
# Cleanup services data
|
||||
for service in self.sys_services.list_services:
|
||||
if addon.slug not in service.active:
|
||||
continue
|
||||
service.del_service_data(addon)
|
||||
|
||||
self.data.uninstall(addon)
|
||||
self.local.pop(slug)
|
||||
|
||||
_LOGGER.info("Add-on '%s' successfully removed", slug)
|
||||
|
||||
@Job(
|
||||
conditions=ADDON_UPDATE_CONDITIONS,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def update(
|
||||
self, slug: str, backup: bool | None = False
|
||||
) -> Awaitable[None] | None:
|
||||
"""Update add-on.
|
||||
|
||||
Returns a coroutine that completes when addon has state 'started' (see addon.start)
|
||||
if addon is started after update. Else nothing is returned.
|
||||
"""
|
||||
if slug not in self.local:
|
||||
raise AddonsError(f"Add-on {slug} is not installed", _LOGGER.error)
|
||||
addon = self.local[slug]
|
||||
|
||||
if addon.is_detached:
|
||||
raise AddonsError(
|
||||
f"Add-on {slug} is not available inside store", _LOGGER.error
|
||||
)
|
||||
store = self.store[slug]
|
||||
|
||||
if addon.version == store.version:
|
||||
raise AddonsError(f"No update available for add-on {slug}", _LOGGER.warning)
|
||||
|
||||
# Check if available, Maybe something have changed
|
||||
store.validate_availability()
|
||||
|
||||
if backup:
|
||||
await self.sys_backups.do_backup_partial(
|
||||
name=f"addon_{addon.slug}_{addon.version}",
|
||||
homeassistant=False,
|
||||
addons=[addon.slug],
|
||||
)
|
||||
|
||||
# Update instance
|
||||
last_state: AddonState = addon.state
|
||||
old_image = addon.image
|
||||
try:
|
||||
await addon.instance.update(store.version, store.image)
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
_LOGGER.info("Add-on '%s' successfully updated", slug)
|
||||
self.data.update(store)
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerError):
|
||||
await addon.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Setup/Fix AppArmor profile
|
||||
await addon.install_apparmor()
|
||||
|
||||
# restore state
|
||||
return (
|
||||
await addon.start()
|
||||
if last_state in [AddonState.STARTED, AddonState.STARTUP]
|
||||
else None
|
||||
)
|
||||
|
||||
@Job(
|
||||
conditions=[
|
||||
JobCondition.FREE_SPACE,
|
||||
JobCondition.INTERNET_HOST,
|
||||
JobCondition.HEALTHY,
|
||||
],
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def rebuild(self, slug: str) -> Awaitable[None] | None:
|
||||
"""Perform a rebuild of local build add-on.
|
||||
|
||||
Returns a coroutine that completes when addon has state 'started' (see addon.start)
|
||||
if addon is started after rebuild. Else nothing is returned.
|
||||
"""
|
||||
if slug not in self.local:
|
||||
raise AddonsError(f"Add-on {slug} is not installed", _LOGGER.error)
|
||||
addon = self.local[slug]
|
||||
|
||||
if addon.is_detached:
|
||||
raise AddonsError(
|
||||
f"Add-on {slug} is not available inside store", _LOGGER.error
|
||||
)
|
||||
store = self.store[slug]
|
||||
|
||||
# Check if a rebuild is possible now
|
||||
if addon.version != store.version:
|
||||
raise AddonsError(
|
||||
"Version changed, use Update instead Rebuild", _LOGGER.error
|
||||
)
|
||||
if not addon.need_build:
|
||||
raise AddonsNotSupportedError(
|
||||
"Can't rebuild a image based add-on", _LOGGER.error
|
||||
)
|
||||
|
||||
# remove docker container but not addon config
|
||||
last_state: AddonState = addon.state
|
||||
try:
|
||||
await addon.instance.remove()
|
||||
await addon.instance.install(addon.version)
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
self.data.update(store)
|
||||
_LOGGER.info("Add-on '%s' successfully rebuilt", slug)
|
||||
|
||||
# restore state
|
||||
return (
|
||||
await addon.start()
|
||||
if last_state in [AddonState.STARTED, AddonState.STARTUP]
|
||||
else None
|
||||
)
|
||||
|
||||
@Job(
|
||||
conditions=[
|
||||
JobCondition.FREE_SPACE,
|
||||
JobCondition.INTERNET_HOST,
|
||||
JobCondition.HEALTHY,
|
||||
],
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def restore(
|
||||
self, slug: str, tar_file: tarfile.TarFile
|
||||
) -> Awaitable[None] | None:
|
||||
"""Restore state of an add-on.
|
||||
|
||||
Returns a coroutine that completes when addon has state 'started' (see addon.start)
|
||||
if addon is started after restore. Else nothing is returned.
|
||||
"""
|
||||
if slug not in self.local:
|
||||
_LOGGER.debug("Add-on %s is not local available for restore", slug)
|
||||
addon = Addon(self.coresys, slug)
|
||||
else:
|
||||
_LOGGER.debug("Add-on %s is local available for restore", slug)
|
||||
addon = self.local[slug]
|
||||
|
||||
wait_for_start = await addon.restore(tar_file)
|
||||
|
||||
# Check if new
|
||||
if slug not in self.local:
|
||||
_LOGGER.info("Detect new Add-on after restore %s", slug)
|
||||
self.local[slug] = addon
|
||||
|
||||
# Update ingress
|
||||
if addon.with_ingress:
|
||||
await self.sys_ingress.reload()
|
||||
with suppress(HomeAssistantAPIError):
|
||||
await self.sys_ingress.update_hass_panel(addon)
|
||||
|
||||
return wait_for_start
|
||||
|
||||
@Job(conditions=[JobCondition.FREE_SPACE, JobCondition.INTERNET_HOST])
|
||||
async def repair(self) -> None:
|
||||
"""Repair local add-ons."""
|
||||
needs_repair: list[Addon] = []
|
||||
|
||||
# Evaluate Add-ons to repair
|
||||
for addon in self.installed:
|
||||
if await addon.instance.exists():
|
||||
continue
|
||||
needs_repair.append(addon)
|
||||
|
||||
_LOGGER.info("Found %d add-ons to repair", len(needs_repair))
|
||||
if not needs_repair:
|
||||
return
|
||||
|
||||
for addon in needs_repair:
|
||||
_LOGGER.info("Repairing for add-on: %s", addon.slug)
|
||||
with suppress(DockerError, KeyError):
|
||||
# Need pull a image again
|
||||
if not addon.need_build:
|
||||
await addon.instance.install(addon.version, addon.image)
|
||||
continue
|
||||
|
||||
# Need local lookup
|
||||
if addon.need_build and not addon.is_detached:
|
||||
store = self.store[addon.slug]
|
||||
# If this add-on is available for rebuild
|
||||
if addon.version == store.version:
|
||||
await addon.instance.install(addon.version, addon.image)
|
||||
continue
|
||||
|
||||
_LOGGER.error("Can't repair %s", addon.slug)
|
||||
with suppress(AddonsError):
|
||||
await self.uninstall(addon.slug)
|
||||
|
||||
async def sync_dns(self) -> None:
|
||||
"""Sync add-ons DNS names."""
|
||||
# Update hosts
|
||||
for addon in self.installed:
|
||||
try:
|
||||
if not await addon.instance.is_running():
|
||||
continue
|
||||
except DockerError as err:
|
||||
_LOGGER.warning("Add-on %s is corrupt: %s", addon.slug, err)
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.CORRUPT_DOCKER,
|
||||
ContextType.ADDON,
|
||||
reference=addon.slug,
|
||||
suggestions=[SuggestionType.EXECUTE_REPAIR],
|
||||
)
|
||||
capture_exception(err)
|
||||
else:
|
||||
self.sys_plugins.dns.add_host(
|
||||
ipv4=addon.ip_address, names=[addon.hostname], write=False
|
||||
)
|
||||
|
||||
# Write hosts files
|
||||
with suppress(CoreDNSError):
|
||||
self.sys_plugins.dns.write_hosts()
|
||||
|
@@ -3,6 +3,7 @@ import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
from copy import deepcopy
|
||||
import errno
|
||||
from ipaddress import IPv4Address
|
||||
import logging
|
||||
from pathlib import Path, PurePath
|
||||
@@ -64,12 +65,15 @@ from ..exceptions import (
|
||||
AddonsNotSupportedError,
|
||||
ConfigurationFileError,
|
||||
DockerError,
|
||||
HomeAssistantAPIError,
|
||||
HostAppArmorError,
|
||||
)
|
||||
from ..hardware.data import Device
|
||||
from ..homeassistant.const import WSEvent, WSType
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..resolution.const import UnhealthyReason
|
||||
from ..store.addon import AddonStore
|
||||
from ..utils import check_port
|
||||
from ..utils.apparmor import adjust_profile
|
||||
from ..utils.json import read_json_file, write_json_file
|
||||
@@ -80,6 +84,7 @@ from .const import (
|
||||
WATCHDOG_THROTTLE_MAX_CALLS,
|
||||
WATCHDOG_THROTTLE_PERIOD,
|
||||
AddonBackupMode,
|
||||
MappingType,
|
||||
)
|
||||
from .model import AddonModel, Data
|
||||
from .options import AddonOptions
|
||||
@@ -129,54 +134,7 @@ class Addon(AddonModel):
|
||||
)
|
||||
self._listeners: list[EventListener] = []
|
||||
self._startup_event = asyncio.Event()
|
||||
|
||||
@Job(
|
||||
name=f"addon_{slug}_restart_after_problem",
|
||||
limit=JobExecutionLimit.THROTTLE_RATE_LIMIT,
|
||||
throttle_period=WATCHDOG_THROTTLE_PERIOD,
|
||||
throttle_max_calls=WATCHDOG_THROTTLE_MAX_CALLS,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def restart_after_problem(addon: Addon, state: ContainerState):
|
||||
"""Restart unhealthy or failed addon."""
|
||||
attempts = 0
|
||||
while await addon.instance.current_state() == state:
|
||||
if not addon.in_progress:
|
||||
_LOGGER.warning(
|
||||
"Watchdog found addon %s is %s, restarting...",
|
||||
addon.name,
|
||||
state.value,
|
||||
)
|
||||
try:
|
||||
if state == ContainerState.FAILED:
|
||||
# Ensure failed container is removed before attempting reanimation
|
||||
if attempts == 0:
|
||||
with suppress(DockerError):
|
||||
await addon.instance.stop(remove_container=True)
|
||||
|
||||
await (await addon.start())
|
||||
else:
|
||||
await (await addon.restart())
|
||||
except AddonsError as err:
|
||||
attempts = attempts + 1
|
||||
_LOGGER.error(
|
||||
"Watchdog restart of addon %s failed!", addon.name
|
||||
)
|
||||
capture_exception(err)
|
||||
else:
|
||||
break
|
||||
|
||||
if attempts >= WATCHDOG_MAX_ATTEMPTS:
|
||||
_LOGGER.critical(
|
||||
"Watchdog cannot restart addon %s, failed all %s attempts",
|
||||
addon.name,
|
||||
attempts,
|
||||
)
|
||||
break
|
||||
|
||||
await asyncio.sleep(WATCHDOG_RETRY_SECONDS)
|
||||
|
||||
self._restart_after_problem = restart_after_problem
|
||||
self._startup_task: asyncio.Task | None = None
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Return internal representation."""
|
||||
@@ -228,6 +186,7 @@ class Addon(AddonModel):
|
||||
)
|
||||
)
|
||||
|
||||
await self._check_ingress_port()
|
||||
with suppress(DockerError):
|
||||
await self.instance.attach(version=self.version)
|
||||
|
||||
@@ -246,6 +205,11 @@ class Addon(AddonModel):
|
||||
"""Return add-on data from store."""
|
||||
return self.sys_store.data.addons.get(self.slug, self.data)
|
||||
|
||||
@property
|
||||
def addon_store(self) -> AddonStore | None:
|
||||
"""Return store representation of addon."""
|
||||
return self.sys_addons.store.get(self.slug)
|
||||
|
||||
@property
|
||||
def persist(self) -> Data:
|
||||
"""Return add-on data/config."""
|
||||
@@ -434,7 +398,7 @@ class Addon(AddonModel):
|
||||
|
||||
port = self.data[ATTR_INGRESS_PORT]
|
||||
if port == 0:
|
||||
return self.sys_ingress.get_dynamic_port(self.slug)
|
||||
raise RuntimeError(f"No port set for add-on {self.slug}")
|
||||
return port
|
||||
|
||||
@property
|
||||
@@ -500,6 +464,21 @@ class Addon(AddonModel):
|
||||
"""Return add-on data path external for Docker."""
|
||||
return PurePath(self.sys_config.path_extern_addons_data, self.slug)
|
||||
|
||||
@property
|
||||
def addon_config_used(self) -> bool:
|
||||
"""Add-on is using its public config folder."""
|
||||
return MappingType.ADDON_CONFIG in self.map_volumes
|
||||
|
||||
@property
|
||||
def path_config(self) -> Path:
|
||||
"""Return add-on config path inside Supervisor."""
|
||||
return Path(self.sys_config.path_addon_configs, self.slug)
|
||||
|
||||
@property
|
||||
def path_extern_config(self) -> PurePath:
|
||||
"""Return add-on config path external for Docker."""
|
||||
return PurePath(self.sys_config.path_extern_addon_configs, self.slug)
|
||||
|
||||
@property
|
||||
def path_options(self) -> Path:
|
||||
"""Return path to add-on options."""
|
||||
@@ -563,7 +542,7 @@ class Addon(AddonModel):
|
||||
|
||||
# TCP monitoring
|
||||
if s_prefix == "tcp":
|
||||
return await self.sys_run_in_executor(check_port, self.ip_address, port)
|
||||
return await check_port(self.ip_address, port)
|
||||
|
||||
# lookup the correct protocol from config
|
||||
if t_proto:
|
||||
@@ -579,7 +558,7 @@ class Addon(AddonModel):
|
||||
) as req:
|
||||
if req.status < 300:
|
||||
return True
|
||||
except (asyncio.TimeoutError, aiohttp.ClientError):
|
||||
except (TimeoutError, aiohttp.ClientError):
|
||||
pass
|
||||
|
||||
return False
|
||||
@@ -606,16 +585,201 @@ class Addon(AddonModel):
|
||||
|
||||
raise AddonConfigurationError()
|
||||
|
||||
@Job(
|
||||
name="addon_unload",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def unload(self) -> None:
|
||||
"""Unload add-on and remove data."""
|
||||
if self._startup_task:
|
||||
# If we were waiting on startup, cancel that and let the task finish before proceeding
|
||||
self._startup_task.cancel(f"Removing add-on {self.name} from system")
|
||||
with suppress(asyncio.CancelledError):
|
||||
await self._startup_task
|
||||
|
||||
for listener in self._listeners:
|
||||
self.sys_bus.remove_listener(listener)
|
||||
|
||||
if not self.path_data.is_dir():
|
||||
if self.path_data.is_dir():
|
||||
_LOGGER.info("Removing add-on data folder %s", self.path_data)
|
||||
await remove_data(self.path_data)
|
||||
|
||||
async def _check_ingress_port(self):
|
||||
"""Assign a ingress port if dynamic port selection is used."""
|
||||
if not self.with_ingress:
|
||||
return
|
||||
|
||||
_LOGGER.info("Removing add-on data folder %s", self.path_data)
|
||||
await remove_data(self.path_data)
|
||||
if self.data[ATTR_INGRESS_PORT] == 0:
|
||||
self.data[ATTR_INGRESS_PORT] = await self.sys_ingress.get_dynamic_port(
|
||||
self.slug
|
||||
)
|
||||
|
||||
@Job(
|
||||
name="addon_install",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def install(self) -> None:
|
||||
"""Install and setup this addon."""
|
||||
self.sys_addons.data.install(self.addon_store)
|
||||
await self.load()
|
||||
|
||||
if not self.path_data.is_dir():
|
||||
_LOGGER.info(
|
||||
"Creating Home Assistant add-on data folder %s", self.path_data
|
||||
)
|
||||
self.path_data.mkdir()
|
||||
|
||||
# Setup/Fix AppArmor profile
|
||||
await self.install_apparmor()
|
||||
|
||||
# Install image
|
||||
try:
|
||||
await self.instance.install(
|
||||
self.latest_version, self.addon_store.image, arch=self.arch
|
||||
)
|
||||
except DockerError as err:
|
||||
self.sys_addons.data.uninstall(self)
|
||||
raise AddonsError() from err
|
||||
|
||||
# Add to addon manager
|
||||
self.sys_addons.local[self.slug] = self
|
||||
|
||||
# Reload ingress tokens
|
||||
if self.with_ingress:
|
||||
await self.sys_ingress.reload()
|
||||
|
||||
@Job(
|
||||
name="addon_uninstall",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def uninstall(self) -> None:
|
||||
"""Uninstall and cleanup this addon."""
|
||||
try:
|
||||
await self.instance.remove()
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
self.state = AddonState.UNKNOWN
|
||||
|
||||
await self.unload()
|
||||
|
||||
# Cleanup audio settings
|
||||
if self.path_pulse.exists():
|
||||
with suppress(OSError):
|
||||
self.path_pulse.unlink()
|
||||
|
||||
# Cleanup AppArmor profile
|
||||
with suppress(HostAppArmorError):
|
||||
await self.uninstall_apparmor()
|
||||
|
||||
# Cleanup Ingress panel from sidebar
|
||||
if self.ingress_panel:
|
||||
self.ingress_panel = False
|
||||
with suppress(HomeAssistantAPIError):
|
||||
await self.sys_ingress.update_hass_panel(self)
|
||||
|
||||
# Cleanup Ingress dynamic port assignment
|
||||
if self.with_ingress:
|
||||
self.sys_create_task(self.sys_ingress.reload())
|
||||
self.sys_ingress.del_dynamic_port(self.slug)
|
||||
|
||||
# Cleanup discovery data
|
||||
for message in self.sys_discovery.list_messages:
|
||||
if message.addon != self.slug:
|
||||
continue
|
||||
self.sys_discovery.remove(message)
|
||||
|
||||
# Cleanup services data
|
||||
for service in self.sys_services.list_services:
|
||||
if self.slug not in service.active:
|
||||
continue
|
||||
service.del_service_data(self)
|
||||
|
||||
# Remove from addon manager
|
||||
self.sys_addons.data.uninstall(self)
|
||||
self.sys_addons.local.pop(self.slug)
|
||||
|
||||
@Job(
|
||||
name="addon_update",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def update(self) -> asyncio.Task | None:
|
||||
"""Update this addon to latest version.
|
||||
|
||||
Returns a Task that completes when addon has state 'started' (see start)
|
||||
if it was running. Else nothing is returned.
|
||||
"""
|
||||
old_image = self.image
|
||||
# Cache data to prevent races with other updates to global
|
||||
store = self.addon_store.clone()
|
||||
|
||||
try:
|
||||
await self.instance.update(store.version, store.image, arch=self.arch)
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
# Stop the addon if running
|
||||
if (last_state := self.state) in {AddonState.STARTED, AddonState.STARTUP}:
|
||||
await self.stop()
|
||||
|
||||
try:
|
||||
_LOGGER.info("Add-on '%s' successfully updated", self.slug)
|
||||
self.sys_addons.data.update(store)
|
||||
await self._check_ingress_port()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(
|
||||
old_image=old_image, image=store.image, version=store.version
|
||||
)
|
||||
|
||||
# Setup/Fix AppArmor profile
|
||||
await self.install_apparmor()
|
||||
|
||||
finally:
|
||||
# restore state. Return Task for caller if no exception
|
||||
out = (
|
||||
await self.start()
|
||||
if last_state in {AddonState.STARTED, AddonState.STARTUP}
|
||||
else None
|
||||
)
|
||||
return out
|
||||
|
||||
@Job(
|
||||
name="addon_rebuild",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def rebuild(self) -> asyncio.Task | None:
|
||||
"""Rebuild this addons container and image.
|
||||
|
||||
Returns a Task that completes when addon has state 'started' (see start)
|
||||
if it was running. Else nothing is returned.
|
||||
"""
|
||||
last_state: AddonState = self.state
|
||||
try:
|
||||
# remove docker container but not addon config
|
||||
try:
|
||||
await self.instance.remove()
|
||||
await self.instance.install(self.version)
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
self.sys_addons.data.update(self.addon_store)
|
||||
_LOGGER.info("Add-on '%s' successfully rebuilt", self.slug)
|
||||
|
||||
finally:
|
||||
# restore state
|
||||
out = (
|
||||
await self.start()
|
||||
if last_state in [AddonState.STARTED, AddonState.STARTUP]
|
||||
else None
|
||||
)
|
||||
return out
|
||||
|
||||
def write_pulse(self) -> None:
|
||||
"""Write asound config to file and return True on success."""
|
||||
@@ -631,6 +795,8 @@ class Addon(AddonModel):
|
||||
try:
|
||||
self.path_pulse.write_text(pulse_config, encoding="utf-8")
|
||||
except OSError as err:
|
||||
if err.errno == errno.EBADMSG:
|
||||
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
_LOGGER.error(
|
||||
"Add-on %s can't write pulse/client.config: %s", self.slug, err
|
||||
)
|
||||
@@ -699,24 +865,34 @@ class Addon(AddonModel):
|
||||
async def _wait_for_startup(self) -> None:
|
||||
"""Wait for startup event to be set with timeout."""
|
||||
try:
|
||||
await asyncio.wait_for(self._startup_event.wait(), STARTUP_TIMEOUT)
|
||||
except asyncio.TimeoutError:
|
||||
self._startup_task = self.sys_create_task(self._startup_event.wait())
|
||||
await asyncio.wait_for(self._startup_task, STARTUP_TIMEOUT)
|
||||
except TimeoutError:
|
||||
_LOGGER.warning(
|
||||
"Timeout while waiting for addon %s to start, took more then %s seconds",
|
||||
"Timeout while waiting for addon %s to start, took more than %s seconds",
|
||||
self.name,
|
||||
STARTUP_TIMEOUT,
|
||||
)
|
||||
except asyncio.CancelledError as err:
|
||||
_LOGGER.info("Wait for addon startup task cancelled due to: %s", err)
|
||||
finally:
|
||||
self._startup_task = None
|
||||
|
||||
async def start(self) -> Awaitable[None]:
|
||||
@Job(
|
||||
name="addon_start",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def start(self) -> asyncio.Task:
|
||||
"""Set options and start add-on.
|
||||
|
||||
Returns a coroutine that completes when addon has state 'started'.
|
||||
Returns a Task that completes when addon has state 'started'.
|
||||
For addons with a healthcheck, that is when they become healthy or unhealthy.
|
||||
Addons without a healthcheck have state 'started' immediately.
|
||||
"""
|
||||
if await self.instance.is_running():
|
||||
_LOGGER.warning("%s is already running!", self.slug)
|
||||
return self._wait_for_startup()
|
||||
return self.sys_create_task(self._wait_for_startup())
|
||||
|
||||
# Access Token
|
||||
self.persist[ATTR_ACCESS_TOKEN] = secrets.token_hex(56)
|
||||
@@ -729,6 +905,18 @@ class Addon(AddonModel):
|
||||
if self.with_audio:
|
||||
self.write_pulse()
|
||||
|
||||
def _check_addon_config_dir():
|
||||
if self.path_config.is_dir():
|
||||
return
|
||||
|
||||
_LOGGER.info(
|
||||
"Creating Home Assistant add-on config folder %s", self.path_config
|
||||
)
|
||||
self.path_config.mkdir()
|
||||
|
||||
if self.addon_config_used:
|
||||
await self.sys_run_in_executor(_check_addon_config_dir)
|
||||
|
||||
# Start Add-on
|
||||
self._startup_event.clear()
|
||||
try:
|
||||
@@ -737,8 +925,13 @@ class Addon(AddonModel):
|
||||
self.state = AddonState.ERROR
|
||||
raise AddonsError() from err
|
||||
|
||||
return self._wait_for_startup()
|
||||
return self.sys_create_task(self._wait_for_startup())
|
||||
|
||||
@Job(
|
||||
name="addon_stop",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def stop(self) -> None:
|
||||
"""Stop add-on."""
|
||||
self._manual_stop = True
|
||||
@@ -748,10 +941,15 @@ class Addon(AddonModel):
|
||||
self.state = AddonState.ERROR
|
||||
raise AddonsError() from err
|
||||
|
||||
async def restart(self) -> Awaitable[None]:
|
||||
@Job(
|
||||
name="addon_restart",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def restart(self) -> asyncio.Task:
|
||||
"""Restart add-on.
|
||||
|
||||
Returns a coroutine that completes when addon has state 'started' (see start).
|
||||
Returns a Task that completes when addon has state 'started' (see start).
|
||||
"""
|
||||
with suppress(AddonsError):
|
||||
await self.stop()
|
||||
@@ -778,11 +976,13 @@ class Addon(AddonModel):
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
@Job(
|
||||
name="addon_write_stdin",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def write_stdin(self, data) -> None:
|
||||
"""Write data to add-on stdin.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
"""Write data to add-on stdin."""
|
||||
if not self.with_stdin:
|
||||
raise AddonsNotSupportedError(
|
||||
f"Add-on {self.slug} does not support writing to stdin!", _LOGGER.error
|
||||
@@ -810,14 +1010,59 @@ class Addon(AddonModel):
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
async def backup(self, tar_file: tarfile.TarFile) -> Awaitable[None] | None:
|
||||
@Job(
|
||||
name="addon_begin_backup",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def begin_backup(self) -> bool:
|
||||
"""Execute pre commands or stop addon if necessary.
|
||||
|
||||
Returns value of `is_running`. Caller should not call `end_backup` if return is false.
|
||||
"""
|
||||
if not await self.is_running():
|
||||
return False
|
||||
|
||||
if self.backup_mode == AddonBackupMode.COLD:
|
||||
_LOGGER.info("Shutdown add-on %s for cold backup", self.slug)
|
||||
await self.stop()
|
||||
|
||||
elif self.backup_pre is not None:
|
||||
await self._backup_command(self.backup_pre)
|
||||
|
||||
return True
|
||||
|
||||
@Job(
|
||||
name="addon_end_backup",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def end_backup(self) -> asyncio.Task | None:
|
||||
"""Execute post commands or restart addon if necessary.
|
||||
|
||||
Returns a Task that completes when addon has state 'started' (see start)
|
||||
for cold backup. Else nothing is returned.
|
||||
"""
|
||||
if self.backup_mode is AddonBackupMode.COLD:
|
||||
_LOGGER.info("Starting add-on %s again", self.slug)
|
||||
return await self.start()
|
||||
|
||||
if self.backup_post is not None:
|
||||
await self._backup_command(self.backup_post)
|
||||
return None
|
||||
|
||||
@Job(
|
||||
name="addon_backup",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def backup(self, tar_file: tarfile.TarFile) -> asyncio.Task | None:
|
||||
"""Backup state of an add-on.
|
||||
|
||||
Returns a coroutine that completes when addon has state 'started' (see start)
|
||||
Returns a Task that completes when addon has state 'started' (see start)
|
||||
for cold backup. Else nothing is returned.
|
||||
"""
|
||||
wait_for_start: Awaitable[None] | None = None
|
||||
is_running = await self.is_running()
|
||||
|
||||
with TemporaryDirectory(dir=self.sys_config.path_tmp) as temp:
|
||||
temp_path = Path(temp)
|
||||
@@ -869,16 +1114,16 @@ class Addon(AddonModel):
|
||||
arcname="data",
|
||||
)
|
||||
|
||||
if (
|
||||
is_running
|
||||
and self.backup_mode == AddonBackupMode.HOT
|
||||
and self.backup_pre is not None
|
||||
):
|
||||
await self._backup_command(self.backup_pre)
|
||||
elif is_running and self.backup_mode == AddonBackupMode.COLD:
|
||||
_LOGGER.info("Shutdown add-on %s for cold backup", self.slug)
|
||||
await self.instance.stop()
|
||||
# Backup config
|
||||
if self.addon_config_used:
|
||||
atomic_contents_add(
|
||||
backup,
|
||||
self.path_config,
|
||||
excludes=self.backup_exclude,
|
||||
arcname="config",
|
||||
)
|
||||
|
||||
is_running = await self.begin_backup()
|
||||
try:
|
||||
_LOGGER.info("Building backup for add-on %s", self.slug)
|
||||
await self.sys_run_in_executor(_write_tarfile)
|
||||
@@ -887,23 +1132,21 @@ class Addon(AddonModel):
|
||||
f"Can't write tarfile {tar_file}: {err}", _LOGGER.error
|
||||
) from err
|
||||
finally:
|
||||
if (
|
||||
is_running
|
||||
and self.backup_mode == AddonBackupMode.HOT
|
||||
and self.backup_post is not None
|
||||
):
|
||||
await self._backup_command(self.backup_post)
|
||||
elif is_running and self.backup_mode is AddonBackupMode.COLD:
|
||||
_LOGGER.info("Starting add-on %s again", self.slug)
|
||||
wait_for_start = await self.start()
|
||||
if is_running:
|
||||
wait_for_start = await self.end_backup()
|
||||
|
||||
_LOGGER.info("Finish backup for addon %s", self.slug)
|
||||
return wait_for_start
|
||||
|
||||
async def restore(self, tar_file: tarfile.TarFile) -> Awaitable[None] | None:
|
||||
@Job(
|
||||
name="addon_restore",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def restore(self, tar_file: tarfile.TarFile) -> asyncio.Task | None:
|
||||
"""Restore state of an add-on.
|
||||
|
||||
Returns a coroutine that completes when addon has state 'started' (see start)
|
||||
Returns a Task that completes when addon has state 'started' (see start)
|
||||
if addon is started after restore. Else nothing is returned.
|
||||
"""
|
||||
wait_for_start: Awaitable[None] | None = None
|
||||
@@ -912,7 +1155,11 @@ class Addon(AddonModel):
|
||||
def _extract_tarfile():
|
||||
"""Extract tar backup."""
|
||||
with tar_file as backup:
|
||||
backup.extractall(path=Path(temp), members=secure_path(backup))
|
||||
backup.extractall(
|
||||
path=Path(temp),
|
||||
members=secure_path(backup),
|
||||
filter="fully_trusted",
|
||||
)
|
||||
|
||||
try:
|
||||
await self.sys_run_in_executor(_extract_tarfile)
|
||||
@@ -950,64 +1197,81 @@ class Addon(AddonModel):
|
||||
self.slug, data[ATTR_USER], data[ATTR_SYSTEM], restore_image
|
||||
)
|
||||
|
||||
# Check version / restore image
|
||||
version = data[ATTR_VERSION]
|
||||
if not await self.instance.exists():
|
||||
_LOGGER.info("Restore/Install of image for addon %s", self.slug)
|
||||
# Stop it first if its running
|
||||
if await self.instance.is_running():
|
||||
await self.stop()
|
||||
|
||||
image_file = Path(temp, "image.tar")
|
||||
if image_file.is_file():
|
||||
with suppress(DockerError):
|
||||
await self.instance.import_image(image_file)
|
||||
else:
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(version, restore_image)
|
||||
await self.instance.cleanup()
|
||||
elif self.instance.version != version or self.legacy:
|
||||
_LOGGER.info("Restore/Update of image for addon %s", self.slug)
|
||||
with suppress(DockerError):
|
||||
await self.instance.update(version, restore_image)
|
||||
else:
|
||||
with suppress(DockerError):
|
||||
await self.instance.stop()
|
||||
|
||||
# Restore data
|
||||
def _restore_data():
|
||||
"""Restore data."""
|
||||
temp_data = Path(temp, "data")
|
||||
if temp_data.is_dir():
|
||||
shutil.copytree(temp_data, self.path_data, symlinks=True)
|
||||
else:
|
||||
self.path_data.mkdir()
|
||||
|
||||
_LOGGER.info("Restoring data for addon %s", self.slug)
|
||||
if self.path_data.is_dir():
|
||||
await remove_data(self.path_data)
|
||||
try:
|
||||
await self.sys_run_in_executor(_restore_data)
|
||||
except shutil.Error as err:
|
||||
raise AddonsError(
|
||||
f"Can't restore origin data: {err}", _LOGGER.error
|
||||
) from err
|
||||
# Check version / restore image
|
||||
version = data[ATTR_VERSION]
|
||||
if not await self.instance.exists():
|
||||
_LOGGER.info("Restore/Install of image for addon %s", self.slug)
|
||||
|
||||
image_file = Path(temp, "image.tar")
|
||||
if image_file.is_file():
|
||||
with suppress(DockerError):
|
||||
await self.instance.import_image(image_file)
|
||||
else:
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
version, restore_image, self.arch
|
||||
)
|
||||
await self.instance.cleanup()
|
||||
elif self.instance.version != version or self.legacy:
|
||||
_LOGGER.info("Restore/Update of image for addon %s", self.slug)
|
||||
with suppress(DockerError):
|
||||
await self.instance.update(version, restore_image, self.arch)
|
||||
self._check_ingress_port()
|
||||
|
||||
# Restore data and config
|
||||
def _restore_data():
|
||||
"""Restore data and config."""
|
||||
temp_data = Path(temp, "data")
|
||||
if temp_data.is_dir():
|
||||
shutil.copytree(temp_data, self.path_data, symlinks=True)
|
||||
else:
|
||||
self.path_data.mkdir()
|
||||
|
||||
temp_config = Path(temp, "config")
|
||||
if temp_config.is_dir():
|
||||
shutil.copytree(temp_config, self.path_config, symlinks=True)
|
||||
elif self.addon_config_used:
|
||||
self.path_config.mkdir()
|
||||
|
||||
_LOGGER.info("Restoring data and config for addon %s", self.slug)
|
||||
if self.path_data.is_dir():
|
||||
await remove_data(self.path_data)
|
||||
if self.path_config.is_dir():
|
||||
await remove_data(self.path_config)
|
||||
|
||||
# Restore AppArmor
|
||||
profile_file = Path(temp, "apparmor.txt")
|
||||
if profile_file.exists():
|
||||
try:
|
||||
await self.sys_host.apparmor.load_profile(self.slug, profile_file)
|
||||
except HostAppArmorError as err:
|
||||
_LOGGER.error(
|
||||
"Can't restore AppArmor profile for add-on %s", self.slug
|
||||
)
|
||||
raise AddonsError() from err
|
||||
await self.sys_run_in_executor(_restore_data)
|
||||
except shutil.Error as err:
|
||||
raise AddonsError(
|
||||
f"Can't restore origin data: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
# Is add-on loaded
|
||||
if not self.loaded:
|
||||
await self.load()
|
||||
# Restore AppArmor
|
||||
profile_file = Path(temp, "apparmor.txt")
|
||||
if profile_file.exists():
|
||||
try:
|
||||
await self.sys_host.apparmor.load_profile(
|
||||
self.slug, profile_file
|
||||
)
|
||||
except HostAppArmorError as err:
|
||||
_LOGGER.error(
|
||||
"Can't restore AppArmor profile for add-on %s", self.slug
|
||||
)
|
||||
raise AddonsError() from err
|
||||
|
||||
# Run add-on
|
||||
if data[ATTR_STATE] == AddonState.STARTED:
|
||||
wait_for_start = await self.start()
|
||||
# Is add-on loaded
|
||||
if not self.loaded:
|
||||
await self.load()
|
||||
|
||||
finally:
|
||||
# Run add-on
|
||||
if data[ATTR_STATE] == AddonState.STARTED:
|
||||
wait_for_start = await self.start()
|
||||
|
||||
_LOGGER.info("Finished restore for add-on %s", self.slug)
|
||||
return wait_for_start
|
||||
@@ -1019,6 +1283,50 @@ class Addon(AddonModel):
|
||||
"""
|
||||
return self.instance.check_trust()
|
||||
|
||||
@Job(
|
||||
name="addon_restart_after_problem",
|
||||
limit=JobExecutionLimit.GROUP_THROTTLE_RATE_LIMIT,
|
||||
throttle_period=WATCHDOG_THROTTLE_PERIOD,
|
||||
throttle_max_calls=WATCHDOG_THROTTLE_MAX_CALLS,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def _restart_after_problem(self, state: ContainerState):
|
||||
"""Restart unhealthy or failed addon."""
|
||||
attempts = 0
|
||||
while await self.instance.current_state() == state:
|
||||
if not self.in_progress:
|
||||
_LOGGER.warning(
|
||||
"Watchdog found addon %s is %s, restarting...",
|
||||
self.name,
|
||||
state,
|
||||
)
|
||||
try:
|
||||
if state == ContainerState.FAILED:
|
||||
# Ensure failed container is removed before attempting reanimation
|
||||
if attempts == 0:
|
||||
with suppress(DockerError):
|
||||
await self.instance.stop(remove_container=True)
|
||||
|
||||
await (await self.start())
|
||||
else:
|
||||
await (await self.restart())
|
||||
except AddonsError as err:
|
||||
attempts = attempts + 1
|
||||
_LOGGER.error("Watchdog restart of addon %s failed!", self.name)
|
||||
capture_exception(err)
|
||||
else:
|
||||
break
|
||||
|
||||
if attempts >= WATCHDOG_MAX_ATTEMPTS:
|
||||
_LOGGER.critical(
|
||||
"Watchdog cannot restart addon %s, failed all %s attempts",
|
||||
self.name,
|
||||
attempts,
|
||||
)
|
||||
break
|
||||
|
||||
await asyncio.sleep(WATCHDOG_RETRY_SECONDS)
|
||||
|
||||
async def container_state_changed(self, event: DockerContainerStateEvent) -> None:
|
||||
"""Set addon state from container state."""
|
||||
if event.name != self.instance.name:
|
||||
@@ -1053,4 +1361,4 @@ class Addon(AddonModel):
|
||||
ContainerState.STOPPED,
|
||||
ContainerState.UNHEALTHY,
|
||||
]:
|
||||
await self._restart_after_problem(self, event.state)
|
||||
await self._restart_after_problem(event.state)
|
||||
|
11
supervisor/addons/configuration.py
Normal file
11
supervisor/addons/configuration.py
Normal file
@@ -0,0 +1,11 @@
|
||||
"""Confgiuration Objects for Addon Config."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class FolderMapping:
|
||||
"""Represent folder mapping configuration."""
|
||||
|
||||
path: str | None
|
||||
read_only: bool
|
@@ -1,19 +1,36 @@
|
||||
"""Add-on static data."""
|
||||
from datetime import timedelta
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
|
||||
from ..jobs.const import JobCondition
|
||||
|
||||
|
||||
class AddonBackupMode(str, Enum):
|
||||
class AddonBackupMode(StrEnum):
|
||||
"""Backup mode of an Add-on."""
|
||||
|
||||
HOT = "hot"
|
||||
COLD = "cold"
|
||||
|
||||
|
||||
class MappingType(StrEnum):
|
||||
"""Mapping type of an Add-on Folder."""
|
||||
|
||||
DATA = "data"
|
||||
CONFIG = "config"
|
||||
SSL = "ssl"
|
||||
ADDONS = "addons"
|
||||
BACKUP = "backup"
|
||||
SHARE = "share"
|
||||
MEDIA = "media"
|
||||
HOMEASSISTANT_CONFIG = "homeassistant_config"
|
||||
ALL_ADDON_CONFIGS = "all_addon_configs"
|
||||
ADDON_CONFIG = "addon_config"
|
||||
|
||||
|
||||
ATTR_BACKUP = "backup"
|
||||
ATTR_CODENOTARY = "codenotary"
|
||||
ATTR_READ_ONLY = "read_only"
|
||||
ATTR_PATH = "path"
|
||||
WATCHDOG_RETRY_SECONDS = 10
|
||||
WATCHDOG_MAX_ATTEMPTS = 5
|
||||
WATCHDOG_THROTTLE_PERIOD = timedelta(minutes=30)
|
||||
|
374
supervisor/addons/manager.py
Normal file
374
supervisor/addons/manager.py
Normal file
@@ -0,0 +1,374 @@
|
||||
"""Supervisor add-on manager."""
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
import tarfile
|
||||
from typing import Union
|
||||
|
||||
from ..const import AddonBoot, AddonStartup, AddonState
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import (
|
||||
AddonConfigurationError,
|
||||
AddonsError,
|
||||
AddonsJobError,
|
||||
AddonsNotSupportedError,
|
||||
CoreDNSError,
|
||||
DockerAPIError,
|
||||
DockerError,
|
||||
DockerNotFound,
|
||||
HassioError,
|
||||
HomeAssistantAPIError,
|
||||
)
|
||||
from ..jobs.decorator import Job, JobCondition
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..store.addon import AddonStore
|
||||
from ..utils import check_exception_chain
|
||||
from ..utils.sentry import capture_exception
|
||||
from .addon import Addon
|
||||
from .const import ADDON_UPDATE_CONDITIONS
|
||||
from .data import AddonsData
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
AnyAddon = Union[Addon, AddonStore]
|
||||
|
||||
|
||||
class AddonManager(CoreSysAttributes):
|
||||
"""Manage add-ons inside Supervisor."""
|
||||
|
||||
def __init__(self, coresys: CoreSys):
|
||||
"""Initialize Docker base wrapper."""
|
||||
self.coresys: CoreSys = coresys
|
||||
self.data: AddonsData = AddonsData(coresys)
|
||||
self.local: dict[str, Addon] = {}
|
||||
self.store: dict[str, AddonStore] = {}
|
||||
|
||||
@property
|
||||
def all(self) -> list[AnyAddon]:
|
||||
"""Return a list of all add-ons."""
|
||||
addons: dict[str, AnyAddon] = {**self.store, **self.local}
|
||||
return list(addons.values())
|
||||
|
||||
@property
|
||||
def installed(self) -> list[Addon]:
|
||||
"""Return a list of all installed add-ons."""
|
||||
return list(self.local.values())
|
||||
|
||||
def get(self, addon_slug: str, local_only: bool = False) -> AnyAddon | None:
|
||||
"""Return an add-on from slug.
|
||||
|
||||
Prio:
|
||||
1 - Local
|
||||
2 - Store
|
||||
"""
|
||||
if addon_slug in self.local:
|
||||
return self.local[addon_slug]
|
||||
if not local_only:
|
||||
return self.store.get(addon_slug)
|
||||
return None
|
||||
|
||||
def from_token(self, token: str) -> Addon | None:
|
||||
"""Return an add-on from Supervisor token."""
|
||||
for addon in self.installed:
|
||||
if token == addon.supervisor_token:
|
||||
return addon
|
||||
return None
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Start up add-on management."""
|
||||
tasks = []
|
||||
for slug in self.data.system:
|
||||
addon = self.local[slug] = Addon(self.coresys, slug)
|
||||
tasks.append(self.sys_create_task(addon.load()))
|
||||
|
||||
# Run initial tasks
|
||||
_LOGGER.info("Found %d installed add-ons", len(tasks))
|
||||
if tasks:
|
||||
await asyncio.wait(tasks)
|
||||
|
||||
# Sync DNS
|
||||
await self.sync_dns()
|
||||
|
||||
async def boot(self, stage: AddonStartup) -> None:
|
||||
"""Boot add-ons with mode auto."""
|
||||
tasks: list[Addon] = []
|
||||
for addon in self.installed:
|
||||
if addon.boot != AddonBoot.AUTO or addon.startup != stage:
|
||||
continue
|
||||
tasks.append(addon)
|
||||
|
||||
# Evaluate add-ons which need to be started
|
||||
_LOGGER.info("Phase '%s' starting %d add-ons", stage, len(tasks))
|
||||
if not tasks:
|
||||
return
|
||||
|
||||
# Start Add-ons sequential
|
||||
# avoid issue on slow IO
|
||||
# Config.wait_boot is deprecated. Until addons update with healthchecks,
|
||||
# add a sleep task for it to keep the same minimum amount of wait time
|
||||
wait_boot: list[Awaitable[None]] = [asyncio.sleep(self.sys_config.wait_boot)]
|
||||
for addon in tasks:
|
||||
try:
|
||||
if start_task := await addon.start():
|
||||
wait_boot.append(start_task)
|
||||
except AddonsError as err:
|
||||
# Check if there is an system/user issue
|
||||
if check_exception_chain(
|
||||
err, (DockerAPIError, DockerNotFound, AddonConfigurationError)
|
||||
):
|
||||
addon.boot = AddonBoot.MANUAL
|
||||
addon.save_persist()
|
||||
except HassioError:
|
||||
pass # These are already handled
|
||||
else:
|
||||
continue
|
||||
|
||||
_LOGGER.warning("Can't start Add-on %s", addon.slug)
|
||||
|
||||
# Ignore exceptions from waiting for addon startup, addon errors handled elsewhere
|
||||
await asyncio.gather(*wait_boot, return_exceptions=True)
|
||||
|
||||
async def shutdown(self, stage: AddonStartup) -> None:
|
||||
"""Shutdown addons."""
|
||||
tasks: list[Addon] = []
|
||||
for addon in self.installed:
|
||||
if addon.state != AddonState.STARTED or addon.startup != stage:
|
||||
continue
|
||||
tasks.append(addon)
|
||||
|
||||
# Evaluate add-ons which need to be stopped
|
||||
_LOGGER.info("Phase '%s' stopping %d add-ons", stage, len(tasks))
|
||||
if not tasks:
|
||||
return
|
||||
|
||||
# Stop Add-ons sequential
|
||||
# avoid issue on slow IO
|
||||
for addon in tasks:
|
||||
try:
|
||||
await addon.stop()
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.warning("Can't stop Add-on %s: %s", addon.slug, err)
|
||||
capture_exception(err)
|
||||
|
||||
@Job(
|
||||
name="addon_manager_install",
|
||||
conditions=ADDON_UPDATE_CONDITIONS,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def install(self, slug: str) -> None:
|
||||
"""Install an add-on."""
|
||||
self.sys_jobs.current.reference = slug
|
||||
|
||||
if slug in self.local:
|
||||
raise AddonsError(f"Add-on {slug} is already installed", _LOGGER.warning)
|
||||
store = self.store.get(slug)
|
||||
|
||||
if not store:
|
||||
raise AddonsError(f"Add-on {slug} does not exist", _LOGGER.error)
|
||||
|
||||
store.validate_availability()
|
||||
|
||||
await Addon(self.coresys, slug).install()
|
||||
|
||||
_LOGGER.info("Add-on '%s' successfully installed", slug)
|
||||
|
||||
async def uninstall(self, slug: str) -> None:
|
||||
"""Remove an add-on."""
|
||||
if slug not in self.local:
|
||||
_LOGGER.warning("Add-on %s is not installed", slug)
|
||||
return
|
||||
|
||||
await self.local[slug].uninstall()
|
||||
|
||||
_LOGGER.info("Add-on '%s' successfully removed", slug)
|
||||
|
||||
@Job(
|
||||
name="addon_manager_update",
|
||||
conditions=ADDON_UPDATE_CONDITIONS,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def update(
|
||||
self, slug: str, backup: bool | None = False
|
||||
) -> asyncio.Task | None:
|
||||
"""Update add-on.
|
||||
|
||||
Returns a Task that completes when addon has state 'started' (see addon.start)
|
||||
if addon is started after update. Else nothing is returned.
|
||||
"""
|
||||
self.sys_jobs.current.reference = slug
|
||||
|
||||
if slug not in self.local:
|
||||
raise AddonsError(f"Add-on {slug} is not installed", _LOGGER.error)
|
||||
addon = self.local[slug]
|
||||
|
||||
if addon.is_detached:
|
||||
raise AddonsError(
|
||||
f"Add-on {slug} is not available inside store", _LOGGER.error
|
||||
)
|
||||
store = self.store[slug]
|
||||
|
||||
if addon.version == store.version:
|
||||
raise AddonsError(f"No update available for add-on {slug}", _LOGGER.warning)
|
||||
|
||||
# Check if available, Maybe something have changed
|
||||
store.validate_availability()
|
||||
|
||||
if backup:
|
||||
await self.sys_backups.do_backup_partial(
|
||||
name=f"addon_{addon.slug}_{addon.version}",
|
||||
homeassistant=False,
|
||||
addons=[addon.slug],
|
||||
)
|
||||
|
||||
return await addon.update()
|
||||
|
||||
@Job(
|
||||
name="addon_manager_rebuild",
|
||||
conditions=[
|
||||
JobCondition.FREE_SPACE,
|
||||
JobCondition.INTERNET_HOST,
|
||||
JobCondition.HEALTHY,
|
||||
],
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def rebuild(self, slug: str) -> asyncio.Task | None:
|
||||
"""Perform a rebuild of local build add-on.
|
||||
|
||||
Returns a Task that completes when addon has state 'started' (see addon.start)
|
||||
if addon is started after rebuild. Else nothing is returned.
|
||||
"""
|
||||
self.sys_jobs.current.reference = slug
|
||||
|
||||
if slug not in self.local:
|
||||
raise AddonsError(f"Add-on {slug} is not installed", _LOGGER.error)
|
||||
addon = self.local[slug]
|
||||
|
||||
if addon.is_detached:
|
||||
raise AddonsError(
|
||||
f"Add-on {slug} is not available inside store", _LOGGER.error
|
||||
)
|
||||
store = self.store[slug]
|
||||
|
||||
# Check if a rebuild is possible now
|
||||
if addon.version != store.version:
|
||||
raise AddonsError(
|
||||
"Version changed, use Update instead Rebuild", _LOGGER.error
|
||||
)
|
||||
if not addon.need_build:
|
||||
raise AddonsNotSupportedError(
|
||||
"Can't rebuild a image based add-on", _LOGGER.error
|
||||
)
|
||||
|
||||
return await addon.rebuild()
|
||||
|
||||
@Job(
|
||||
name="addon_manager_restore",
|
||||
conditions=[
|
||||
JobCondition.FREE_SPACE,
|
||||
JobCondition.INTERNET_HOST,
|
||||
JobCondition.HEALTHY,
|
||||
],
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def restore(
|
||||
self, slug: str, tar_file: tarfile.TarFile
|
||||
) -> asyncio.Task | None:
|
||||
"""Restore state of an add-on.
|
||||
|
||||
Returns a Task that completes when addon has state 'started' (see addon.start)
|
||||
if addon is started after restore. Else nothing is returned.
|
||||
"""
|
||||
self.sys_jobs.current.reference = slug
|
||||
|
||||
if slug not in self.local:
|
||||
_LOGGER.debug("Add-on %s is not local available for restore", slug)
|
||||
addon = Addon(self.coresys, slug)
|
||||
had_ingress = False
|
||||
else:
|
||||
_LOGGER.debug("Add-on %s is local available for restore", slug)
|
||||
addon = self.local[slug]
|
||||
had_ingress = addon.ingress_panel
|
||||
|
||||
wait_for_start = await addon.restore(tar_file)
|
||||
|
||||
# Check if new
|
||||
if slug not in self.local:
|
||||
_LOGGER.info("Detect new Add-on after restore %s", slug)
|
||||
self.local[slug] = addon
|
||||
|
||||
# Update ingress
|
||||
if had_ingress != addon.ingress_panel:
|
||||
await self.sys_ingress.reload()
|
||||
with suppress(HomeAssistantAPIError):
|
||||
await self.sys_ingress.update_hass_panel(addon)
|
||||
|
||||
return wait_for_start
|
||||
|
||||
@Job(
|
||||
name="addon_manager_repair",
|
||||
conditions=[JobCondition.FREE_SPACE, JobCondition.INTERNET_HOST],
|
||||
)
|
||||
async def repair(self) -> None:
|
||||
"""Repair local add-ons."""
|
||||
needs_repair: list[Addon] = []
|
||||
|
||||
# Evaluate Add-ons to repair
|
||||
for addon in self.installed:
|
||||
if await addon.instance.exists():
|
||||
continue
|
||||
needs_repair.append(addon)
|
||||
|
||||
_LOGGER.info("Found %d add-ons to repair", len(needs_repair))
|
||||
if not needs_repair:
|
||||
return
|
||||
|
||||
for addon in needs_repair:
|
||||
_LOGGER.info("Repairing for add-on: %s", addon.slug)
|
||||
with suppress(DockerError, KeyError):
|
||||
# Need pull a image again
|
||||
if not addon.need_build:
|
||||
await addon.instance.install(addon.version, addon.image)
|
||||
continue
|
||||
|
||||
# Need local lookup
|
||||
if addon.need_build and not addon.is_detached:
|
||||
store = self.store[addon.slug]
|
||||
# If this add-on is available for rebuild
|
||||
if addon.version == store.version:
|
||||
await addon.instance.install(addon.version, addon.image)
|
||||
continue
|
||||
|
||||
_LOGGER.error("Can't repair %s", addon.slug)
|
||||
with suppress(AddonsError):
|
||||
await self.uninstall(addon.slug)
|
||||
|
||||
async def sync_dns(self) -> None:
|
||||
"""Sync add-ons DNS names."""
|
||||
# Update hosts
|
||||
add_host_coros: list[Awaitable[None]] = []
|
||||
for addon in self.installed:
|
||||
try:
|
||||
if not await addon.instance.is_running():
|
||||
continue
|
||||
except DockerError as err:
|
||||
_LOGGER.warning("Add-on %s is corrupt: %s", addon.slug, err)
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.CORRUPT_DOCKER,
|
||||
ContextType.ADDON,
|
||||
reference=addon.slug,
|
||||
suggestions=[SuggestionType.EXECUTE_REPAIR],
|
||||
)
|
||||
capture_exception(err)
|
||||
else:
|
||||
add_host_coros.append(
|
||||
self.sys_plugins.dns.add_host(
|
||||
ipv4=addon.ip_address, names=[addon.hostname], write=False
|
||||
)
|
||||
)
|
||||
|
||||
await asyncio.gather(*add_host_coros)
|
||||
|
||||
# Write hosts files
|
||||
with suppress(CoreDNSError):
|
||||
await self.sys_plugins.dns.write_hosts()
|
@@ -1,6 +1,7 @@
|
||||
"""Init file for Supervisor add-ons."""
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Awaitable, Callable
|
||||
from collections import defaultdict
|
||||
from collections.abc import Callable
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
from pathlib import Path
|
||||
@@ -64,6 +65,7 @@ from ..const import (
|
||||
ATTR_TIMEOUT,
|
||||
ATTR_TMPFS,
|
||||
ATTR_TRANSLATIONS,
|
||||
ATTR_TYPE,
|
||||
ATTR_UART,
|
||||
ATTR_UDEV,
|
||||
ATTR_URL,
|
||||
@@ -79,24 +81,37 @@ from ..const import (
|
||||
AddonStage,
|
||||
AddonStartup,
|
||||
)
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..coresys import CoreSys
|
||||
from ..docker.const import Capabilities
|
||||
from ..exceptions import AddonsNotSupportedError
|
||||
from .const import ATTR_BACKUP, ATTR_CODENOTARY, AddonBackupMode
|
||||
from ..jobs.const import JOB_GROUP_ADDON
|
||||
from ..jobs.job_group import JobGroup
|
||||
from ..utils import version_is_new_enough
|
||||
from .configuration import FolderMapping
|
||||
from .const import (
|
||||
ATTR_BACKUP,
|
||||
ATTR_CODENOTARY,
|
||||
ATTR_PATH,
|
||||
ATTR_READ_ONLY,
|
||||
AddonBackupMode,
|
||||
MappingType,
|
||||
)
|
||||
from .options import AddonOptions, UiOptions
|
||||
from .validate import RE_SERVICE, RE_VOLUME
|
||||
from .validate import RE_SERVICE
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
Data = dict[str, Any]
|
||||
|
||||
|
||||
class AddonModel(CoreSysAttributes, ABC):
|
||||
class AddonModel(JobGroup, ABC):
|
||||
"""Add-on Data layout."""
|
||||
|
||||
def __init__(self, coresys: CoreSys, slug: str):
|
||||
"""Initialize data holder."""
|
||||
self.coresys: CoreSys = coresys
|
||||
super().__init__(
|
||||
coresys, JOB_GROUP_ADDON.format_map(defaultdict(str, slug=slug)), slug
|
||||
)
|
||||
self.slug: str = slug
|
||||
|
||||
@property
|
||||
@@ -532,14 +547,13 @@ class AddonModel(CoreSysAttributes, ABC):
|
||||
return ATTR_IMAGE not in self.data
|
||||
|
||||
@property
|
||||
def map_volumes(self) -> dict[str, bool]:
|
||||
"""Return a dict of {volume: read-only} from add-on."""
|
||||
def map_volumes(self) -> dict[MappingType, FolderMapping]:
|
||||
"""Return a dict of {MappingType: FolderMapping} from add-on."""
|
||||
volumes = {}
|
||||
for volume in self.data[ATTR_MAP]:
|
||||
result = RE_VOLUME.match(volume)
|
||||
if not result:
|
||||
continue
|
||||
volumes[result.group(1)] = result.group(2) != "rw"
|
||||
volumes[MappingType(volume[ATTR_TYPE])] = FolderMapping(
|
||||
volume.get(ATTR_PATH), volume[ATTR_READ_ONLY]
|
||||
)
|
||||
|
||||
return volumes
|
||||
|
||||
@@ -640,7 +654,9 @@ class AddonModel(CoreSysAttributes, ABC):
|
||||
# Home Assistant
|
||||
version: AwesomeVersion | None = config.get(ATTR_HOMEASSISTANT)
|
||||
with suppress(AwesomeVersionException, TypeError):
|
||||
if self.sys_homeassistant.version < version:
|
||||
if version and not version_is_new_enough(
|
||||
self.sys_homeassistant.version, version
|
||||
):
|
||||
raise AddonsNotSupportedError(
|
||||
f"Add-on {self.slug} not supported on this system, requires Home Assistant version {version} or greater",
|
||||
logger,
|
||||
@@ -664,19 +680,3 @@ class AddonModel(CoreSysAttributes, ABC):
|
||||
|
||||
# local build
|
||||
return f"{config[ATTR_REPOSITORY]}/{self.sys_arch.default}-addon-{config[ATTR_SLUG]}"
|
||||
|
||||
def install(self) -> Awaitable[None]:
|
||||
"""Install this add-on."""
|
||||
return self.sys_addons.install(self.slug)
|
||||
|
||||
def uninstall(self) -> Awaitable[None]:
|
||||
"""Uninstall this add-on."""
|
||||
return self.sys_addons.uninstall(self.slug)
|
||||
|
||||
def update(self, backup: bool | None = False) -> Awaitable[Awaitable[None] | None]:
|
||||
"""Update this add-on."""
|
||||
return self.sys_addons.update(self.slug, backup=backup)
|
||||
|
||||
def rebuild(self) -> Awaitable[Awaitable[None] | None]:
|
||||
"""Rebuild this add-on."""
|
||||
return self.sys_addons.rebuild(self.slug)
|
||||
|
@@ -81,6 +81,7 @@ from ..const import (
|
||||
ATTR_TIMEOUT,
|
||||
ATTR_TMPFS,
|
||||
ATTR_TRANSLATIONS,
|
||||
ATTR_TYPE,
|
||||
ATTR_UART,
|
||||
ATTR_UDEV,
|
||||
ATTR_URL,
|
||||
@@ -109,12 +110,22 @@ from ..validate import (
|
||||
uuid_match,
|
||||
version_tag,
|
||||
)
|
||||
from .const import ATTR_BACKUP, ATTR_CODENOTARY, RE_SLUG, AddonBackupMode
|
||||
from .const import (
|
||||
ATTR_BACKUP,
|
||||
ATTR_CODENOTARY,
|
||||
ATTR_PATH,
|
||||
ATTR_READ_ONLY,
|
||||
RE_SLUG,
|
||||
AddonBackupMode,
|
||||
MappingType,
|
||||
)
|
||||
from .options import RE_SCHEMA_ELEMENT
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
RE_VOLUME = re.compile(r"^(config|ssl|addons|backup|share|media)(?::(rw|ro))?$")
|
||||
RE_VOLUME = re.compile(
|
||||
r"^(data|config|ssl|addons|backup|share|media|homeassistant_config|all_addon_configs|addon_config)(?::(rw|ro))?$"
|
||||
)
|
||||
RE_SERVICE = re.compile(r"^(?P<service>mqtt|mysql):(?P<rights>provide|want|need)$")
|
||||
|
||||
|
||||
@@ -143,6 +154,9 @@ RE_MACHINE = re.compile(
|
||||
r"|raspberrypi3"
|
||||
r"|raspberrypi4-64"
|
||||
r"|raspberrypi4"
|
||||
r"|raspberrypi5-64"
|
||||
r"|yellow"
|
||||
r"|green"
|
||||
r"|tinker"
|
||||
r")$"
|
||||
)
|
||||
@@ -175,6 +189,20 @@ def _warn_addon_config(config: dict[str, Any]):
|
||||
name,
|
||||
)
|
||||
|
||||
invalid_services: list[str] = []
|
||||
for service in config.get(ATTR_DISCOVERY, []):
|
||||
try:
|
||||
valid_discovery_service(service)
|
||||
except vol.Invalid:
|
||||
invalid_services.append(service)
|
||||
|
||||
if invalid_services:
|
||||
_LOGGER.warning(
|
||||
"Add-on lists the following unknown services for discovery: %s. Please report this to the maintainer of %s",
|
||||
", ".join(invalid_services),
|
||||
name,
|
||||
)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
@@ -196,9 +224,9 @@ def _migrate_addon_config(protocol=False):
|
||||
name,
|
||||
)
|
||||
if value == "before":
|
||||
config[ATTR_STARTUP] = AddonStartup.SERVICES.value
|
||||
config[ATTR_STARTUP] = AddonStartup.SERVICES
|
||||
elif value == "after":
|
||||
config[ATTR_STARTUP] = AddonStartup.APPLICATION.value
|
||||
config[ATTR_STARTUP] = AddonStartup.APPLICATION
|
||||
|
||||
# UART 2021-01-20
|
||||
if "auto_uart" in config:
|
||||
@@ -244,6 +272,48 @@ def _migrate_addon_config(protocol=False):
|
||||
name,
|
||||
)
|
||||
|
||||
# 2023-11 "map" entries can also be dict to allow path configuration
|
||||
volumes = []
|
||||
for entry in config.get(ATTR_MAP, []):
|
||||
if isinstance(entry, dict):
|
||||
volumes.append(entry)
|
||||
if isinstance(entry, str):
|
||||
result = RE_VOLUME.match(entry)
|
||||
if not result:
|
||||
continue
|
||||
volumes.append(
|
||||
{
|
||||
ATTR_TYPE: result.group(1),
|
||||
ATTR_READ_ONLY: result.group(2) != "rw",
|
||||
}
|
||||
)
|
||||
|
||||
if volumes:
|
||||
config[ATTR_MAP] = volumes
|
||||
|
||||
# 2023-10 "config" became "homeassistant" so /config can be used for addon's public config
|
||||
if any(volume[ATTR_TYPE] == MappingType.CONFIG for volume in volumes):
|
||||
if any(
|
||||
volume
|
||||
and volume[ATTR_TYPE]
|
||||
in {MappingType.ADDON_CONFIG, MappingType.HOMEASSISTANT_CONFIG}
|
||||
for volume in volumes
|
||||
):
|
||||
_LOGGER.warning(
|
||||
"Add-on config using incompatible map options, '%s' and '%s' are ignored if '%s' is included. Please report this to the maintainer of %s",
|
||||
MappingType.ADDON_CONFIG,
|
||||
MappingType.HOMEASSISTANT_CONFIG,
|
||||
MappingType.CONFIG,
|
||||
name,
|
||||
)
|
||||
else:
|
||||
_LOGGER.debug(
|
||||
"Add-on config using deprecated map option '%s' instead of '%s'. Please report this to the maintainer of %s",
|
||||
MappingType.CONFIG,
|
||||
MappingType.HOMEASSISTANT_CONFIG,
|
||||
name,
|
||||
)
|
||||
|
||||
return config
|
||||
|
||||
return _migrate
|
||||
@@ -292,7 +362,15 @@ _SCHEMA_ADDON_CONFIG = vol.Schema(
|
||||
vol.Optional(ATTR_DEVICES): [str],
|
||||
vol.Optional(ATTR_UDEV, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_TMPFS, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_MAP, default=list): [vol.Match(RE_VOLUME)],
|
||||
vol.Optional(ATTR_MAP, default=list): [
|
||||
vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_TYPE): vol.Coerce(MappingType),
|
||||
vol.Optional(ATTR_READ_ONLY, default=True): bool,
|
||||
vol.Optional(ATTR_PATH): str,
|
||||
}
|
||||
)
|
||||
],
|
||||
vol.Optional(ATTR_ENVIRONMENT): {vol.Match(r"\w*"): str},
|
||||
vol.Optional(ATTR_PRIVILEGED): [vol.Coerce(Capabilities)],
|
||||
vol.Optional(ATTR_APPARMOR, default=True): vol.Boolean(),
|
||||
@@ -313,7 +391,7 @@ _SCHEMA_ADDON_CONFIG = vol.Schema(
|
||||
vol.Optional(ATTR_DOCKER_API, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_AUTH_API, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_SERVICES): [vol.Match(RE_SERVICE)],
|
||||
vol.Optional(ATTR_DISCOVERY): [valid_discovery_service],
|
||||
vol.Optional(ATTR_DISCOVERY): [str],
|
||||
vol.Optional(ATTR_BACKUP_EXCLUDE): [str],
|
||||
vol.Optional(ATTR_BACKUP_PRE): str,
|
||||
vol.Optional(ATTR_BACKUP_POST): str,
|
||||
|
@@ -5,6 +5,7 @@ from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import web
|
||||
from aiohttp_fast_url_dispatcher import FastUrlDispatcher, attach_fast_url_dispatcher
|
||||
|
||||
from ..const import AddonState
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
@@ -64,9 +65,10 @@ class RestAPI(CoreSysAttributes):
|
||||
"max_field_size": MAX_LINE_SIZE,
|
||||
},
|
||||
)
|
||||
attach_fast_url_dispatcher(self.webapp, FastUrlDispatcher())
|
||||
|
||||
# service stuff
|
||||
self._runner: web.AppRunner = web.AppRunner(self.webapp)
|
||||
self._runner: web.AppRunner = web.AppRunner(self.webapp, shutdown_timeout=5)
|
||||
self._site: web.TCPSite | None = None
|
||||
|
||||
async def load(self) -> None:
|
||||
@@ -186,6 +188,8 @@ class RestAPI(CoreSysAttributes):
|
||||
# Boards endpoints
|
||||
self.webapp.add_routes(
|
||||
[
|
||||
web.get("/os/boards/green", api_os.boards_green_info),
|
||||
web.post("/os/boards/green", api_os.boards_green_options),
|
||||
web.get("/os/boards/yellow", api_os.boards_yellow_info),
|
||||
web.post("/os/boards/yellow", api_os.boards_yellow_options),
|
||||
web.get("/os/boards/{board}", api_os.boards_other_info),
|
||||
@@ -485,6 +489,8 @@ class RestAPI(CoreSysAttributes):
|
||||
web.get("/backups/info", api_backups.info),
|
||||
web.post("/backups/options", api_backups.options),
|
||||
web.post("/backups/reload", api_backups.reload),
|
||||
web.post("/backups/freeze", api_backups.freeze),
|
||||
web.post("/backups/thaw", api_backups.thaw),
|
||||
web.post("/backups/new/full", api_backups.backup_full),
|
||||
web.post("/backups/new/partial", api_backups.backup_partial),
|
||||
web.post("/backups/new/upload", api_backups.upload),
|
||||
@@ -667,9 +673,7 @@ class RestAPI(CoreSysAttributes):
|
||||
async def start(self) -> None:
|
||||
"""Run RESTful API webserver."""
|
||||
await self._runner.setup()
|
||||
self._site = web.TCPSite(
|
||||
self._runner, host="0.0.0.0", port=80, shutdown_timeout=5
|
||||
)
|
||||
self._site = web.TCPSite(self._runner, host="0.0.0.0", port=80)
|
||||
|
||||
try:
|
||||
await self._site.start()
|
||||
|
@@ -8,8 +8,8 @@ from aiohttp import web
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
from ..addons import AnyAddon
|
||||
from ..addons.addon import Addon
|
||||
from ..addons.manager import AnyAddon
|
||||
from ..addons.utils import rating_security
|
||||
from ..const import (
|
||||
ATTR_ADDONS,
|
||||
@@ -388,7 +388,7 @@ class APIAddons(CoreSysAttributes):
|
||||
def uninstall(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Uninstall add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
return asyncio.shield(addon.uninstall())
|
||||
return asyncio.shield(self.sys_addons.uninstall(addon.slug))
|
||||
|
||||
@api_process
|
||||
async def start(self, request: web.Request) -> None:
|
||||
@@ -414,7 +414,7 @@ class APIAddons(CoreSysAttributes):
|
||||
async def rebuild(self, request: web.Request) -> None:
|
||||
"""Rebuild local build add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
if start_task := await asyncio.shield(addon.rebuild()):
|
||||
if start_task := await asyncio.shield(self.sys_addons.rebuild(addon.slug)):
|
||||
await start_task
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_BINARY)
|
||||
|
@@ -1,11 +1,11 @@
|
||||
"""Init file for Supervisor Audio RESTful API."""
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from dataclasses import asdict
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import web
|
||||
import attr
|
||||
import voluptuous as vol
|
||||
|
||||
from ..const import (
|
||||
@@ -76,15 +76,11 @@ class APIAudio(CoreSysAttributes):
|
||||
ATTR_UPDATE_AVAILABLE: self.sys_plugins.audio.need_update,
|
||||
ATTR_HOST: str(self.sys_docker.network.audio),
|
||||
ATTR_AUDIO: {
|
||||
ATTR_CARD: [attr.asdict(card) for card in self.sys_host.sound.cards],
|
||||
ATTR_INPUT: [
|
||||
attr.asdict(stream) for stream in self.sys_host.sound.inputs
|
||||
],
|
||||
ATTR_OUTPUT: [
|
||||
attr.asdict(stream) for stream in self.sys_host.sound.outputs
|
||||
],
|
||||
ATTR_CARD: [asdict(card) for card in self.sys_host.sound.cards],
|
||||
ATTR_INPUT: [asdict(stream) for stream in self.sys_host.sound.inputs],
|
||||
ATTR_OUTPUT: [asdict(stream) for stream in self.sys_host.sound.outputs],
|
||||
ATTR_APPLICATION: [
|
||||
attr.asdict(stream) for stream in self.sys_host.sound.applications
|
||||
asdict(stream) for stream in self.sys_host.sound.applications
|
||||
],
|
||||
},
|
||||
}
|
||||
|
@@ -11,6 +11,7 @@ from ..addons.addon import Addon
|
||||
from ..const import ATTR_PASSWORD, ATTR_USERNAME, REQUEST_FROM
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIForbidden
|
||||
from ..utils.json import json_loads
|
||||
from .const import CONTENT_TYPE_JSON, CONTENT_TYPE_URL
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
@@ -67,7 +68,7 @@ class APIAuth(CoreSysAttributes):
|
||||
|
||||
# Json
|
||||
if request.headers.get(CONTENT_TYPE) == CONTENT_TYPE_JSON:
|
||||
data = await request.json()
|
||||
data = await request.json(loads=json_loads)
|
||||
return await self._process_dict(request, addon, data)
|
||||
|
||||
# URL encoded
|
||||
|
@@ -1,5 +1,6 @@
|
||||
"""Backups RESTful API."""
|
||||
import asyncio
|
||||
import errno
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import re
|
||||
@@ -20,6 +21,7 @@ from ..const import (
|
||||
ATTR_DAYS_UNTIL_STALE,
|
||||
ATTR_FOLDERS,
|
||||
ATTR_HOMEASSISTANT,
|
||||
ATTR_HOMEASSISTANT_EXCLUDE_DATABASE,
|
||||
ATTR_LOCATON,
|
||||
ATTR_NAME,
|
||||
ATTR_PASSWORD,
|
||||
@@ -28,12 +30,14 @@ from ..const import (
|
||||
ATTR_SIZE,
|
||||
ATTR_SLUG,
|
||||
ATTR_SUPERVISOR_VERSION,
|
||||
ATTR_TIMEOUT,
|
||||
ATTR_TYPE,
|
||||
ATTR_VERSION,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIError
|
||||
from ..mounts.const import MountUsage
|
||||
from ..resolution.const import UnhealthyReason
|
||||
from .const import CONTENT_TYPE_TAR
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
@@ -63,6 +67,7 @@ SCHEMA_BACKUP_FULL = vol.Schema(
|
||||
vol.Optional(ATTR_PASSWORD): vol.Maybe(str),
|
||||
vol.Optional(ATTR_COMPRESSED): vol.Maybe(vol.Boolean()),
|
||||
vol.Optional(ATTR_LOCATON): vol.Maybe(str),
|
||||
vol.Optional(ATTR_HOMEASSISTANT_EXCLUDE_DATABASE): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -80,6 +85,12 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
}
|
||||
)
|
||||
|
||||
SCHEMA_FREEZE = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_TIMEOUT): vol.All(int, vol.Range(min=1)),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class APIBackups(CoreSysAttributes):
|
||||
"""Handle RESTful API for backups functions."""
|
||||
@@ -142,7 +153,7 @@ class APIBackups(CoreSysAttributes):
|
||||
self.sys_backups.save_data()
|
||||
|
||||
@api_process
|
||||
async def reload(self, request):
|
||||
async def reload(self, _):
|
||||
"""Reload backup list."""
|
||||
await asyncio.shield(self.sys_backups.reload())
|
||||
return True
|
||||
@@ -177,6 +188,7 @@ class APIBackups(CoreSysAttributes):
|
||||
ATTR_ADDONS: data_addons,
|
||||
ATTR_REPOSITORIES: backup.repositories,
|
||||
ATTR_FOLDERS: backup.folders,
|
||||
ATTR_HOMEASSISTANT_EXCLUDE_DATABASE: backup.homeassistant_exclude_database,
|
||||
}
|
||||
|
||||
def _location_to_mount(self, body: dict[str, Any]) -> dict[str, Any]:
|
||||
@@ -233,6 +245,17 @@ class APIBackups(CoreSysAttributes):
|
||||
|
||||
return await asyncio.shield(self.sys_backups.do_restore_partial(backup, **body))
|
||||
|
||||
@api_process
|
||||
async def freeze(self, request):
|
||||
"""Initiate manual freeze for external backup."""
|
||||
body = await api_validate(SCHEMA_FREEZE, request)
|
||||
await asyncio.shield(self.sys_backups.freeze_all(**body))
|
||||
|
||||
@api_process
|
||||
async def thaw(self, request):
|
||||
"""Begin thaw after manual freeze."""
|
||||
await self.sys_backups.thaw_all()
|
||||
|
||||
@api_process
|
||||
async def remove(self, request):
|
||||
"""Remove a backup."""
|
||||
@@ -267,6 +290,8 @@ class APIBackups(CoreSysAttributes):
|
||||
backup.write(chunk)
|
||||
|
||||
except OSError as err:
|
||||
if err.errno == errno.EBADMSG:
|
||||
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
_LOGGER.error("Can't write new backup file: %s", err)
|
||||
return False
|
||||
|
||||
|
@@ -23,7 +23,6 @@ ATTR_CONNECTION_BUS = "connection_bus"
|
||||
ATTR_DATA_DISK = "data_disk"
|
||||
ATTR_DEVICE = "device"
|
||||
ATTR_DEV_PATH = "dev_path"
|
||||
ATTR_DISK_LED = "disk_led"
|
||||
ATTR_DISKS = "disks"
|
||||
ATTR_DRIVES = "drives"
|
||||
ATTR_DT_SYNCHRONIZED = "dt_synchronized"
|
||||
@@ -31,8 +30,8 @@ ATTR_DT_UTC = "dt_utc"
|
||||
ATTR_EJECTABLE = "ejectable"
|
||||
ATTR_FALLBACK = "fallback"
|
||||
ATTR_FILESYSTEMS = "filesystems"
|
||||
ATTR_HEARTBEAT_LED = "heartbeat_led"
|
||||
ATTR_IDENTIFIERS = "identifiers"
|
||||
ATTR_JOBS = "jobs"
|
||||
ATTR_LLMNR = "llmnr"
|
||||
ATTR_LLMNR_HOSTNAME = "llmnr_hostname"
|
||||
ATTR_MDNS = "mdns"
|
||||
@@ -40,7 +39,6 @@ ATTR_MODEL = "model"
|
||||
ATTR_MOUNTS = "mounts"
|
||||
ATTR_MOUNT_POINTS = "mount_points"
|
||||
ATTR_PANEL_PATH = "panel_path"
|
||||
ATTR_POWER_LED = "power_led"
|
||||
ATTR_REMOVABLE = "removable"
|
||||
ATTR_REVISION = "revision"
|
||||
ATTR_SEAT = "seat"
|
||||
@@ -48,6 +46,7 @@ ATTR_SIGNED = "signed"
|
||||
ATTR_STARTUP_TIME = "startup_time"
|
||||
ATTR_SUBSYSTEM = "subsystem"
|
||||
ATTR_SYSFS = "sysfs"
|
||||
ATTR_SYSTEM_HEALTH_LED = "system_health_led"
|
||||
ATTR_TIME_DETECTED = "time_detected"
|
||||
ATTR_UPDATE_TYPE = "update_type"
|
||||
ATTR_USE_NTP = "use_ntp"
|
||||
|
@@ -1,6 +1,9 @@
|
||||
"""Init file for Supervisor network RESTful API."""
|
||||
import logging
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from ..addons.addon import Addon
|
||||
from ..const import (
|
||||
ATTR_ADDON,
|
||||
ATTR_CONFIG,
|
||||
@@ -9,15 +12,18 @@ from ..const import (
|
||||
ATTR_SERVICES,
|
||||
ATTR_UUID,
|
||||
REQUEST_FROM,
|
||||
AddonState,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..discovery.validate import valid_discovery_service
|
||||
from ..exceptions import APIError, APIForbidden
|
||||
from .utils import api_process, api_validate, require_home_assistant
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
SCHEMA_DISCOVERY = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_SERVICE): valid_discovery_service,
|
||||
vol.Required(ATTR_SERVICE): str,
|
||||
vol.Optional(ATTR_CONFIG): vol.Maybe(dict),
|
||||
}
|
||||
)
|
||||
@@ -36,19 +42,19 @@ class APIDiscovery(CoreSysAttributes):
|
||||
@api_process
|
||||
@require_home_assistant
|
||||
async def list(self, request):
|
||||
"""Show register services."""
|
||||
|
||||
"""Show registered and available services."""
|
||||
# Get available discovery
|
||||
discovery = []
|
||||
for message in self.sys_discovery.list_messages:
|
||||
discovery.append(
|
||||
{
|
||||
ATTR_ADDON: message.addon,
|
||||
ATTR_SERVICE: message.service,
|
||||
ATTR_UUID: message.uuid,
|
||||
ATTR_CONFIG: message.config,
|
||||
}
|
||||
)
|
||||
discovery = [
|
||||
{
|
||||
ATTR_ADDON: message.addon,
|
||||
ATTR_SERVICE: message.service,
|
||||
ATTR_UUID: message.uuid,
|
||||
ATTR_CONFIG: message.config,
|
||||
}
|
||||
for message in self.sys_discovery.list_messages
|
||||
if (addon := self.sys_addons.get(message.addon, local_only=True))
|
||||
and addon.state == AddonState.STARTED
|
||||
]
|
||||
|
||||
# Get available services/add-ons
|
||||
services = {}
|
||||
@@ -62,11 +68,28 @@ class APIDiscovery(CoreSysAttributes):
|
||||
async def set_discovery(self, request):
|
||||
"""Write data into a discovery pipeline."""
|
||||
body = await api_validate(SCHEMA_DISCOVERY, request)
|
||||
addon = request[REQUEST_FROM]
|
||||
addon: Addon = request[REQUEST_FROM]
|
||||
service = body[ATTR_SERVICE]
|
||||
|
||||
try:
|
||||
valid_discovery_service(service)
|
||||
except vol.Invalid:
|
||||
_LOGGER.warning(
|
||||
"Received discovery message for unknown service %s from addon %s. Please report this to the maintainer of the add-on",
|
||||
service,
|
||||
addon.name,
|
||||
)
|
||||
|
||||
# Access?
|
||||
if body[ATTR_SERVICE] not in addon.discovery:
|
||||
raise APIForbidden("Can't use discovery!")
|
||||
_LOGGER.error(
|
||||
"Add-on %s attempted to send discovery for service %s which is not listed in its config. Please report this to the maintainer of the add-on",
|
||||
addon.name,
|
||||
service,
|
||||
)
|
||||
raise APIForbidden(
|
||||
"Add-ons must list services they provide via discovery in their config!"
|
||||
)
|
||||
|
||||
# Process discovery message
|
||||
message = self.sys_discovery.send(addon, **body)
|
||||
|
@@ -12,6 +12,7 @@ from ..const import (
|
||||
ATTR_AUDIO_INPUT,
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_BACKUP,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE,
|
||||
ATTR_BLK_READ,
|
||||
ATTR_BLK_WRITE,
|
||||
ATTR_BOOT,
|
||||
@@ -51,6 +52,7 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
vol.Optional(ATTR_REFRESH_TOKEN): vol.Maybe(str),
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(str),
|
||||
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
|
||||
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -82,6 +84,7 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
ATTR_WATCHDOG: self.sys_homeassistant.watchdog,
|
||||
ATTR_AUDIO_INPUT: self.sys_homeassistant.audio_input,
|
||||
ATTR_AUDIO_OUTPUT: self.sys_homeassistant.audio_output,
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE: self.sys_homeassistant.backups_exclude_database,
|
||||
}
|
||||
|
||||
@api_process
|
||||
@@ -113,6 +116,11 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
if ATTR_AUDIO_OUTPUT in body:
|
||||
self.sys_homeassistant.audio_output = body[ATTR_AUDIO_OUTPUT]
|
||||
|
||||
if ATTR_BACKUPS_EXCLUDE_DATABASE in body:
|
||||
self.sys_homeassistant.backups_exclude_database = body[
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE
|
||||
]
|
||||
|
||||
self.sys_homeassistant.save_data()
|
||||
|
||||
@api_process
|
||||
|
@@ -21,11 +21,18 @@ from ..const import (
|
||||
ATTR_ICON,
|
||||
ATTR_PANELS,
|
||||
ATTR_SESSION,
|
||||
ATTR_SESSION_DATA_USER_ID,
|
||||
ATTR_TITLE,
|
||||
HEADER_REMOTE_USER_DISPLAY_NAME,
|
||||
HEADER_REMOTE_USER_ID,
|
||||
HEADER_REMOTE_USER_NAME,
|
||||
HEADER_TOKEN,
|
||||
HEADER_TOKEN_OLD,
|
||||
IngressSessionData,
|
||||
IngressSessionDataUser,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import HomeAssistantAPIError
|
||||
from .const import COOKIE_INGRESS
|
||||
from .utils import api_process, api_validate, require_home_assistant
|
||||
|
||||
@@ -33,10 +40,46 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
VALIDATE_SESSION_DATA = vol.Schema({ATTR_SESSION: str})
|
||||
|
||||
"""Expected optional payload of create session request"""
|
||||
SCHEMA_INGRESS_CREATE_SESSION_DATA = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_SESSION_DATA_USER_ID): str,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# from https://github.com/aio-libs/aiohttp/blob/8ae650bee4add9f131d49b96a0a150311ea58cd1/aiohttp/helpers.py#L1059C1-L1079C1
|
||||
def must_be_empty_body(method: str, code: int) -> bool:
|
||||
"""Check if a request must return an empty body."""
|
||||
return (
|
||||
status_code_must_be_empty_body(code)
|
||||
or method_must_be_empty_body(method)
|
||||
or (200 <= code < 300 and method.upper() == hdrs.METH_CONNECT)
|
||||
)
|
||||
|
||||
|
||||
def method_must_be_empty_body(method: str) -> bool:
|
||||
"""Check if a method must return an empty body."""
|
||||
# https://datatracker.ietf.org/doc/html/rfc9112#section-6.3-2.1
|
||||
# https://datatracker.ietf.org/doc/html/rfc9112#section-6.3-2.2
|
||||
return method.upper() == hdrs.METH_HEAD
|
||||
|
||||
|
||||
def status_code_must_be_empty_body(code: int) -> bool:
|
||||
"""Check if a status code must return an empty body."""
|
||||
# https://datatracker.ietf.org/doc/html/rfc9112#section-6.3-2.1
|
||||
return code in {204, 304} or 100 <= code < 200
|
||||
|
||||
|
||||
class APIIngress(CoreSysAttributes):
|
||||
"""Ingress view to handle add-on webui routing."""
|
||||
|
||||
_list_of_users: list[IngressSessionDataUser]
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize APIIngress."""
|
||||
self._list_of_users = []
|
||||
|
||||
def _extract_addon(self, request: web.Request) -> Addon:
|
||||
"""Return addon, throw an exception it it doesn't exist."""
|
||||
token = request.match_info.get("token")
|
||||
@@ -71,7 +114,19 @@ class APIIngress(CoreSysAttributes):
|
||||
@require_home_assistant
|
||||
async def create_session(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Create a new session."""
|
||||
session = self.sys_ingress.create_session()
|
||||
schema_ingress_config_session_data = await api_validate(
|
||||
SCHEMA_INGRESS_CREATE_SESSION_DATA, request
|
||||
)
|
||||
data: IngressSessionData | None = None
|
||||
|
||||
if ATTR_SESSION_DATA_USER_ID in schema_ingress_config_session_data:
|
||||
user = await self._find_user_by_id(
|
||||
schema_ingress_config_session_data[ATTR_SESSION_DATA_USER_ID]
|
||||
)
|
||||
if user:
|
||||
data = IngressSessionData(user)
|
||||
|
||||
session = self.sys_ingress.create_session(data)
|
||||
return {ATTR_SESSION: session}
|
||||
|
||||
@api_process
|
||||
@@ -99,13 +154,14 @@ class APIIngress(CoreSysAttributes):
|
||||
# Process requests
|
||||
addon = self._extract_addon(request)
|
||||
path = request.match_info.get("path")
|
||||
session_data = self.sys_ingress.get_session_data(session)
|
||||
try:
|
||||
# Websocket
|
||||
if _is_websocket(request):
|
||||
return await self._handle_websocket(request, addon, path)
|
||||
return await self._handle_websocket(request, addon, path, session_data)
|
||||
|
||||
# Request
|
||||
return await self._handle_request(request, addon, path)
|
||||
return await self._handle_request(request, addon, path, session_data)
|
||||
|
||||
except aiohttp.ClientError as err:
|
||||
_LOGGER.error("Ingress error: %s", err)
|
||||
@@ -113,7 +169,11 @@ class APIIngress(CoreSysAttributes):
|
||||
raise HTTPBadGateway()
|
||||
|
||||
async def _handle_websocket(
|
||||
self, request: web.Request, addon: Addon, path: str
|
||||
self,
|
||||
request: web.Request,
|
||||
addon: Addon,
|
||||
path: str,
|
||||
session_data: IngressSessionData | None,
|
||||
) -> web.WebSocketResponse:
|
||||
"""Ingress route for websocket."""
|
||||
if hdrs.SEC_WEBSOCKET_PROTOCOL in request.headers:
|
||||
@@ -131,7 +191,7 @@ class APIIngress(CoreSysAttributes):
|
||||
|
||||
# Preparing
|
||||
url = self._create_url(addon, path)
|
||||
source_header = _init_header(request, addon)
|
||||
source_header = _init_header(request, addon, session_data)
|
||||
|
||||
# Support GET query
|
||||
if request.query_string:
|
||||
@@ -157,11 +217,15 @@ class APIIngress(CoreSysAttributes):
|
||||
return ws_server
|
||||
|
||||
async def _handle_request(
|
||||
self, request: web.Request, addon: Addon, path: str
|
||||
self,
|
||||
request: web.Request,
|
||||
addon: Addon,
|
||||
path: str,
|
||||
session_data: IngressSessionData | None,
|
||||
) -> web.Response | web.StreamResponse:
|
||||
"""Ingress route for request."""
|
||||
url = self._create_url(addon, path)
|
||||
source_header = _init_header(request, addon)
|
||||
source_header = _init_header(request, addon, session_data)
|
||||
|
||||
# Passing the raw stream breaks requests for some webservers
|
||||
# since we just need it for POST requests really, for all other methods
|
||||
@@ -184,10 +248,18 @@ class APIIngress(CoreSysAttributes):
|
||||
skip_auto_headers={hdrs.CONTENT_TYPE},
|
||||
) as result:
|
||||
headers = _response_header(result)
|
||||
|
||||
# Avoid parsing content_type in simple cases for better performance
|
||||
if maybe_content_type := result.headers.get(hdrs.CONTENT_TYPE):
|
||||
content_type = (maybe_content_type.partition(";"))[0].strip()
|
||||
else:
|
||||
content_type = result.content_type
|
||||
# Simple request
|
||||
if (
|
||||
hdrs.CONTENT_LENGTH in result.headers
|
||||
# empty body responses should not be streamed,
|
||||
# otherwise aiohttp < 3.9.0 may generate
|
||||
# an invalid "0\r\n\r\n" chunk instead of an empty response.
|
||||
must_be_empty_body(request.method, result.status)
|
||||
or hdrs.CONTENT_LENGTH in result.headers
|
||||
and int(result.headers.get(hdrs.CONTENT_LENGTH, 0)) < 4_194_000
|
||||
):
|
||||
# Return Response
|
||||
@@ -195,13 +267,13 @@ class APIIngress(CoreSysAttributes):
|
||||
return web.Response(
|
||||
headers=headers,
|
||||
status=result.status,
|
||||
content_type=result.content_type,
|
||||
content_type=content_type,
|
||||
body=body,
|
||||
)
|
||||
|
||||
# Stream response
|
||||
response = web.StreamResponse(status=result.status, headers=headers)
|
||||
response.content_type = result.content_type
|
||||
response.content_type = content_type
|
||||
|
||||
try:
|
||||
await response.prepare(request)
|
||||
@@ -217,11 +289,35 @@ class APIIngress(CoreSysAttributes):
|
||||
|
||||
return response
|
||||
|
||||
async def _find_user_by_id(self, user_id: str) -> IngressSessionDataUser | None:
|
||||
"""Find user object by the user's ID."""
|
||||
try:
|
||||
list_of_users = await self.sys_homeassistant.get_users()
|
||||
except (HomeAssistantAPIError, TypeError) as err:
|
||||
_LOGGER.error(
|
||||
"%s error occurred while requesting list of users: %s", type(err), err
|
||||
)
|
||||
return None
|
||||
|
||||
def _init_header(request: web.Request, addon: str) -> CIMultiDict | dict[str, str]:
|
||||
if list_of_users is not None:
|
||||
self._list_of_users = list_of_users
|
||||
|
||||
return next((user for user in self._list_of_users if user.id == user_id), None)
|
||||
|
||||
|
||||
def _init_header(
|
||||
request: web.Request, addon: Addon, session_data: IngressSessionData | None
|
||||
) -> CIMultiDict | dict[str, str]:
|
||||
"""Create initial header."""
|
||||
headers = {}
|
||||
|
||||
if session_data is not None:
|
||||
headers[HEADER_REMOTE_USER_ID] = session_data.user.id
|
||||
if session_data.user.username is not None:
|
||||
headers[HEADER_REMOTE_USER_NAME] = session_data.user.username
|
||||
if session_data.user.display_name is not None:
|
||||
headers[HEADER_REMOTE_USER_DISPLAY_NAME] = session_data.user.display_name
|
||||
|
||||
# filter flags
|
||||
for name, value in request.headers.items():
|
||||
if name in (
|
||||
@@ -234,6 +330,9 @@ def _init_header(request: web.Request, addon: str) -> CIMultiDict | dict[str, st
|
||||
hdrs.SEC_WEBSOCKET_KEY,
|
||||
istr(HEADER_TOKEN),
|
||||
istr(HEADER_TOKEN_OLD),
|
||||
istr(HEADER_REMOTE_USER_ID),
|
||||
istr(HEADER_REMOTE_USER_NAME),
|
||||
istr(HEADER_REMOTE_USER_DISPLAY_NAME),
|
||||
):
|
||||
continue
|
||||
headers[name] = value
|
||||
|
@@ -6,7 +6,9 @@ from aiohttp import web
|
||||
import voluptuous as vol
|
||||
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..jobs import SupervisorJob
|
||||
from ..jobs.const import ATTR_IGNORE_CONDITIONS, JobCondition
|
||||
from .const import ATTR_JOBS
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@@ -19,11 +21,45 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
class APIJobs(CoreSysAttributes):
|
||||
"""Handle RESTful API for OS functions."""
|
||||
|
||||
def _list_jobs(self) -> list[dict[str, Any]]:
|
||||
"""Return current job tree."""
|
||||
jobs_by_parent: dict[str | None, list[SupervisorJob]] = {}
|
||||
for job in self.sys_jobs.jobs:
|
||||
if job.internal:
|
||||
continue
|
||||
|
||||
if job.parent_id not in jobs_by_parent:
|
||||
jobs_by_parent[job.parent_id] = [job]
|
||||
else:
|
||||
jobs_by_parent[job.parent_id].append(job)
|
||||
|
||||
job_list: list[dict[str, Any]] = []
|
||||
queue: list[tuple[list[dict[str, Any]], SupervisorJob]] = [
|
||||
(job_list, job) for job in jobs_by_parent.get(None, [])
|
||||
]
|
||||
|
||||
while queue:
|
||||
(current_list, current_job) = queue.pop(0)
|
||||
child_jobs: list[dict[str, Any]] = []
|
||||
|
||||
# We remove parent_id and instead use that info to represent jobs as a tree
|
||||
job_dict = current_job.as_dict() | {"child_jobs": child_jobs}
|
||||
job_dict.pop("parent_id")
|
||||
current_list.append(job_dict)
|
||||
|
||||
if current_job.uuid in jobs_by_parent:
|
||||
queue.extend(
|
||||
[(child_jobs, job) for job in jobs_by_parent.get(current_job.uuid)]
|
||||
)
|
||||
|
||||
return job_list
|
||||
|
||||
@api_process
|
||||
async def info(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return JobManager information."""
|
||||
return {
|
||||
ATTR_IGNORE_CONDITIONS: self.sys_jobs.ignore_conditions,
|
||||
ATTR_JOBS: self._list_jobs(),
|
||||
}
|
||||
|
||||
@api_process
|
||||
|
@@ -19,6 +19,7 @@ from ...const import (
|
||||
CoreState,
|
||||
)
|
||||
from ...coresys import CoreSys, CoreSysAttributes
|
||||
from ...utils import version_is_new_enough
|
||||
from ..utils import api_return_error, excract_supervisor_token
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@@ -195,7 +196,7 @@ class SecurityMiddleware(CoreSysAttributes):
|
||||
CoreState.FREEZE,
|
||||
):
|
||||
return api_return_error(
|
||||
message=f"System is not ready with state: {self.sys_core.state.value}"
|
||||
message=f"System is not ready with state: {self.sys_core.state}"
|
||||
)
|
||||
|
||||
return await handler(request)
|
||||
@@ -273,9 +274,8 @@ class SecurityMiddleware(CoreSysAttributes):
|
||||
@middleware
|
||||
async def core_proxy(self, request: Request, handler: RequestHandler) -> Response:
|
||||
"""Validate user from Core API proxy."""
|
||||
if (
|
||||
request[REQUEST_FROM] != self.sys_homeassistant
|
||||
or self.sys_homeassistant.version >= _CORE_VERSION
|
||||
if request[REQUEST_FROM] != self.sys_homeassistant or version_is_new_enough(
|
||||
self.sys_homeassistant.version, _CORE_VERSION
|
||||
):
|
||||
return await handler(request)
|
||||
|
||||
|
@@ -1,11 +1,11 @@
|
||||
"""REST API for network."""
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from dataclasses import replace
|
||||
from ipaddress import ip_address, ip_interface
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import web
|
||||
import attr
|
||||
import voluptuous as vol
|
||||
|
||||
from ..const import (
|
||||
@@ -43,8 +43,7 @@ from ..const import (
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIError, HostNetworkNotFound
|
||||
from ..host.const import AuthMethod, InterfaceType, WifiMode
|
||||
from ..host.network import (
|
||||
from ..host.configuration import (
|
||||
AccessPoint,
|
||||
Interface,
|
||||
InterfaceMethod,
|
||||
@@ -52,6 +51,7 @@ from ..host.network import (
|
||||
VlanConfig,
|
||||
WifiConfig,
|
||||
)
|
||||
from ..host.const import AuthMethod, InterfaceType, WifiMode
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
_SCHEMA_IP_CONFIG = vol.Schema(
|
||||
@@ -121,6 +121,7 @@ def interface_struct(interface: Interface) -> dict[str, Any]:
|
||||
ATTR_ENABLED: interface.enabled,
|
||||
ATTR_CONNECTED: interface.connected,
|
||||
ATTR_PRIMARY: interface.primary,
|
||||
ATTR_MAC: interface.mac,
|
||||
ATTR_IPV4: ipconfig_struct(interface.ipv4) if interface.ipv4 else None,
|
||||
ATTR_IPV6: ipconfig_struct(interface.ipv6) if interface.ipv6 else None,
|
||||
ATTR_WIFI: wifi_struct(interface.wifi) if interface.wifi else None,
|
||||
@@ -196,19 +197,19 @@ class APINetwork(CoreSysAttributes):
|
||||
# Apply config
|
||||
for key, config in body.items():
|
||||
if key == ATTR_IPV4:
|
||||
interface.ipv4 = attr.evolve(
|
||||
interface.ipv4 = replace(
|
||||
interface.ipv4
|
||||
or IpConfig(InterfaceMethod.STATIC, [], None, [], None),
|
||||
**config,
|
||||
)
|
||||
elif key == ATTR_IPV6:
|
||||
interface.ipv6 = attr.evolve(
|
||||
interface.ipv6 = replace(
|
||||
interface.ipv6
|
||||
or IpConfig(InterfaceMethod.STATIC, [], None, [], None),
|
||||
**config,
|
||||
)
|
||||
elif key == ATTR_WIFI:
|
||||
interface.wifi = attr.evolve(
|
||||
interface.wifi = replace(
|
||||
interface.wifi
|
||||
or WifiConfig(
|
||||
WifiMode.INFRASTRUCTURE, "", AuthMethod.OPEN, None, None
|
||||
@@ -276,6 +277,8 @@ class APINetwork(CoreSysAttributes):
|
||||
)
|
||||
|
||||
vlan_interface = Interface(
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
True,
|
||||
True,
|
||||
|
@@ -8,11 +8,15 @@ from aiohttp import web
|
||||
import voluptuous as vol
|
||||
|
||||
from ..const import (
|
||||
ATTR_ACTIVITY_LED,
|
||||
ATTR_BOARD,
|
||||
ATTR_BOOT,
|
||||
ATTR_DEVICES,
|
||||
ATTR_DISK_LED,
|
||||
ATTR_HEARTBEAT_LED,
|
||||
ATTR_ID,
|
||||
ATTR_NAME,
|
||||
ATTR_POWER_LED,
|
||||
ATTR_SERIAL,
|
||||
ATTR_SIZE,
|
||||
ATTR_UPDATE_AVAILABLE,
|
||||
@@ -27,21 +31,19 @@ from .const import (
|
||||
ATTR_DATA_DISK,
|
||||
ATTR_DEV_PATH,
|
||||
ATTR_DEVICE,
|
||||
ATTR_DISK_LED,
|
||||
ATTR_DISKS,
|
||||
ATTR_HEARTBEAT_LED,
|
||||
ATTR_MODEL,
|
||||
ATTR_POWER_LED,
|
||||
ATTR_SYSTEM_HEALTH_LED,
|
||||
ATTR_VENDOR,
|
||||
)
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): version_tag})
|
||||
SCHEMA_DISK = vol.Schema({vol.Required(ATTR_DEVICE): str})
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_YELLOW_OPTIONS = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_DISK_LED): vol.Boolean(),
|
||||
@@ -49,6 +51,14 @@ SCHEMA_YELLOW_OPTIONS = vol.Schema(
|
||||
vol.Optional(ATTR_POWER_LED): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
SCHEMA_GREEN_OPTIONS = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_ACTIVITY_LED): vol.Boolean(),
|
||||
vol.Optional(ATTR_POWER_LED): vol.Boolean(),
|
||||
vol.Optional(ATTR_SYSTEM_HEALTH_LED): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
# pylint: enable=no-value-for-parameter
|
||||
|
||||
|
||||
class APIOS(CoreSysAttributes):
|
||||
@@ -105,6 +115,31 @@ class APIOS(CoreSysAttributes):
|
||||
],
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def boards_green_info(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Get green board settings."""
|
||||
return {
|
||||
ATTR_ACTIVITY_LED: self.sys_dbus.agent.board.green.activity_led,
|
||||
ATTR_POWER_LED: self.sys_dbus.agent.board.green.power_led,
|
||||
ATTR_SYSTEM_HEALTH_LED: self.sys_dbus.agent.board.green.user_led,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def boards_green_options(self, request: web.Request) -> None:
|
||||
"""Update green board settings."""
|
||||
body = await api_validate(SCHEMA_GREEN_OPTIONS, request)
|
||||
|
||||
if ATTR_ACTIVITY_LED in body:
|
||||
self.sys_dbus.agent.board.green.activity_led = body[ATTR_ACTIVITY_LED]
|
||||
|
||||
if ATTR_POWER_LED in body:
|
||||
self.sys_dbus.agent.board.green.power_led = body[ATTR_POWER_LED]
|
||||
|
||||
if ATTR_SYSTEM_HEALTH_LED in body:
|
||||
self.sys_dbus.agent.board.green.user_led = body[ATTR_SYSTEM_HEALTH_LED]
|
||||
|
||||
self.sys_dbus.agent.board.green.save_data()
|
||||
|
||||
@api_process
|
||||
async def boards_yellow_info(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Get yellow board settings."""
|
||||
@@ -128,6 +163,7 @@ class APIOS(CoreSysAttributes):
|
||||
if ATTR_POWER_LED in body:
|
||||
self.sys_dbus.agent.board.yellow.power_led = body[ATTR_POWER_LED]
|
||||
|
||||
self.sys_dbus.agent.board.yellow.save_data()
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.REBOOT_REQUIRED,
|
||||
ContextType.SYSTEM,
|
||||
|
@@ -6,7 +6,10 @@ import logging
|
||||
import aiohttp
|
||||
from aiohttp import web
|
||||
from aiohttp.client_exceptions import ClientConnectorError
|
||||
from aiohttp.client_ws import ClientWebSocketResponse
|
||||
from aiohttp.hdrs import AUTHORIZATION, CONTENT_TYPE
|
||||
from aiohttp.http import WSMessage
|
||||
from aiohttp.http_websocket import WSMsgType
|
||||
from aiohttp.web_exceptions import HTTPBadGateway, HTTPUnauthorized
|
||||
|
||||
from ..coresys import CoreSysAttributes
|
||||
@@ -18,6 +21,13 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
FORWARD_HEADERS = ("X-Speech-Content",)
|
||||
HEADER_HA_ACCESS = "X-Ha-Access"
|
||||
|
||||
# Maximum message size for websocket messages from Home Assistant.
|
||||
# Since these are coming from core we want the largest possible size
|
||||
# that is not likely to cause a memory problem as most modern browsers
|
||||
# support large messages.
|
||||
# https://github.com/home-assistant/supervisor/issues/4392
|
||||
MAX_MESSAGE_SIZE_FROM_CORE = 64 * 1024 * 1024
|
||||
|
||||
|
||||
class APIProxy(CoreSysAttributes):
|
||||
"""API Proxy for Home Assistant."""
|
||||
@@ -67,7 +77,7 @@ class APIProxy(CoreSysAttributes):
|
||||
_LOGGER.error("Error on API for request %s", path)
|
||||
except aiohttp.ClientError as err:
|
||||
_LOGGER.error("Client error on API %s request %s", path, err)
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
_LOGGER.error("Client timeout error on API request %s", path)
|
||||
|
||||
raise HTTPBadGateway()
|
||||
@@ -107,12 +117,14 @@ class APIProxy(CoreSysAttributes):
|
||||
body=data, status=client.status, content_type=client.content_type
|
||||
)
|
||||
|
||||
async def _websocket_client(self):
|
||||
async def _websocket_client(self) -> ClientWebSocketResponse:
|
||||
"""Initialize a WebSocket API connection."""
|
||||
url = f"{self.sys_homeassistant.api_url}/api/websocket"
|
||||
|
||||
try:
|
||||
client = await self.sys_websession.ws_connect(url, heartbeat=30, ssl=False)
|
||||
client = await self.sys_websession.ws_connect(
|
||||
url, heartbeat=30, ssl=False, max_msg_size=MAX_MESSAGE_SIZE_FROM_CORE
|
||||
)
|
||||
|
||||
# Handle authentication
|
||||
data = await client.receive_json()
|
||||
@@ -158,6 +170,25 @@ class APIProxy(CoreSysAttributes):
|
||||
|
||||
raise APIError()
|
||||
|
||||
async def _proxy_message(
|
||||
self,
|
||||
read_task: asyncio.Task,
|
||||
target: web.WebSocketResponse | ClientWebSocketResponse,
|
||||
) -> None:
|
||||
"""Proxy a message from client to server or vice versa."""
|
||||
if read_task.exception():
|
||||
raise read_task.exception()
|
||||
|
||||
msg: WSMessage = read_task.result()
|
||||
if msg.type == WSMsgType.TEXT:
|
||||
return await target.send_str(msg.data)
|
||||
if msg.type == WSMsgType.BINARY:
|
||||
return await target.send_bytes(msg.data)
|
||||
|
||||
raise TypeError(
|
||||
f"Cannot proxy websocket message of unsupported type: {msg.type}"
|
||||
)
|
||||
|
||||
async def websocket(self, request: web.Request):
|
||||
"""Initialize a WebSocket API connection."""
|
||||
if not await self.sys_homeassistant.api.check_api_state():
|
||||
@@ -205,13 +236,13 @@ class APIProxy(CoreSysAttributes):
|
||||
|
||||
_LOGGER.info("Home Assistant WebSocket API request running")
|
||||
try:
|
||||
client_read = None
|
||||
server_read = None
|
||||
client_read: asyncio.Task | None = None
|
||||
server_read: asyncio.Task | None = None
|
||||
while not server.closed and not client.closed:
|
||||
if not client_read:
|
||||
client_read = self.sys_create_task(client.receive_str())
|
||||
client_read = self.sys_create_task(client.receive())
|
||||
if not server_read:
|
||||
server_read = self.sys_create_task(server.receive_str())
|
||||
server_read = self.sys_create_task(server.receive())
|
||||
|
||||
# wait until data need to be processed
|
||||
await asyncio.wait(
|
||||
@@ -220,14 +251,12 @@ class APIProxy(CoreSysAttributes):
|
||||
|
||||
# server
|
||||
if server_read.done() and not client.closed:
|
||||
server_read.exception()
|
||||
await client.send_str(server_read.result())
|
||||
await self._proxy_message(server_read, client)
|
||||
server_read = None
|
||||
|
||||
# client
|
||||
if client_read.done() and not server.closed:
|
||||
client_read.exception()
|
||||
await server.send_str(client_read.result())
|
||||
await self._proxy_message(client_read, server)
|
||||
client_read = None
|
||||
|
||||
except asyncio.CancelledError:
|
||||
@@ -237,9 +266,9 @@ class APIProxy(CoreSysAttributes):
|
||||
_LOGGER.info("Home Assistant WebSocket API error: %s", err)
|
||||
|
||||
finally:
|
||||
if client_read:
|
||||
if client_read and not client_read.done():
|
||||
client_read.cancel()
|
||||
if server_read:
|
||||
if server_read and not server_read.done():
|
||||
server_read.cancel()
|
||||
|
||||
# close connections
|
||||
|
@@ -6,7 +6,7 @@ from typing import Any
|
||||
from aiohttp import web
|
||||
import voluptuous as vol
|
||||
|
||||
from ..addons import AnyAddon
|
||||
from ..addons.manager import AnyAddon
|
||||
from ..addons.utils import rating_security
|
||||
from ..api.const import ATTR_SIGNED
|
||||
from ..api.utils import api_process, api_process_raw, api_validate
|
||||
@@ -186,18 +186,20 @@ class APIStore(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def addons_list(self, request: web.Request) -> list[dict[str, Any]]:
|
||||
async def addons_list(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return all store add-ons."""
|
||||
return [
|
||||
self._generate_addon_information(self.sys_addons.store[addon])
|
||||
for addon in self.sys_addons.store
|
||||
]
|
||||
return {
|
||||
ATTR_ADDONS: [
|
||||
self._generate_addon_information(self.sys_addons.store[addon])
|
||||
for addon in self.sys_addons.store
|
||||
]
|
||||
}
|
||||
|
||||
@api_process
|
||||
def addons_addon_install(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Install add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
return asyncio.shield(addon.install())
|
||||
return asyncio.shield(self.sys_addons.install(addon.slug))
|
||||
|
||||
@api_process
|
||||
async def addons_addon_update(self, request: web.Request) -> None:
|
||||
@@ -209,7 +211,7 @@ class APIStore(CoreSysAttributes):
|
||||
body = await api_validate(SCHEMA_UPDATE, request)
|
||||
|
||||
if start_task := await asyncio.shield(
|
||||
addon.update(backup=body.get(ATTR_BACKUP))
|
||||
self.sys_addons.update(addon.slug, backup=body.get(ATTR_BACKUP))
|
||||
):
|
||||
await start_task
|
||||
|
||||
|
@@ -22,7 +22,7 @@ from ..const import (
|
||||
from ..coresys import CoreSys
|
||||
from ..exceptions import APIError, APIForbidden, DockerAPIError, HassioError
|
||||
from ..utils import check_exception_chain, get_message_from_exception_chain
|
||||
from ..utils.json import JSONEncoder
|
||||
from ..utils.json import json_dumps, json_loads as json_loads_util
|
||||
from ..utils.log_format import format_message
|
||||
from .const import CONTENT_TYPE_BINARY
|
||||
|
||||
@@ -48,7 +48,7 @@ def json_loads(data: Any) -> dict[str, Any]:
|
||||
if not data:
|
||||
return {}
|
||||
try:
|
||||
return json.loads(data)
|
||||
return json_loads_util(data)
|
||||
except json.JSONDecodeError as err:
|
||||
raise APIError("Invalid json") from err
|
||||
|
||||
@@ -130,7 +130,7 @@ def api_return_error(
|
||||
JSON_MESSAGE: message or "Unknown error, see supervisor",
|
||||
},
|
||||
status=400,
|
||||
dumps=lambda x: json.dumps(x, cls=JSONEncoder),
|
||||
dumps=json_dumps,
|
||||
)
|
||||
|
||||
|
||||
@@ -138,7 +138,7 @@ def api_return_ok(data: dict[str, Any] | None = None) -> web.Response:
|
||||
"""Return an API ok answer."""
|
||||
return web.json_response(
|
||||
{JSON_RESULT: RESULT_OK, JSON_DATA: data or {}},
|
||||
dumps=lambda x: json.dumps(x, cls=JSONEncoder),
|
||||
dumps=json_dumps,
|
||||
)
|
||||
|
||||
|
||||
|
@@ -28,6 +28,7 @@ class CpuArch(CoreSysAttributes):
|
||||
"""Initialize CPU Architecture handler."""
|
||||
self.coresys = coresys
|
||||
self._supported_arch: list[str] = []
|
||||
self._supported_set: set[str] = set()
|
||||
self._default_arch: str
|
||||
|
||||
@property
|
||||
@@ -70,9 +71,11 @@ class CpuArch(CoreSysAttributes):
|
||||
if native_support not in self._supported_arch:
|
||||
self._supported_arch.append(native_support)
|
||||
|
||||
self._supported_set = set(self._supported_arch)
|
||||
|
||||
def is_supported(self, arch_list: list[str]) -> bool:
|
||||
"""Return True if there is a supported arch by this platform."""
|
||||
return not set(self.supported).isdisjoint(set(arch_list))
|
||||
return not self._supported_set.isdisjoint(arch_list)
|
||||
|
||||
def match(self, arch_list: list[str]) -> str:
|
||||
"""Return best match for this CPU/Platform."""
|
||||
|
@@ -1,4 +1,5 @@
|
||||
"""Representation of a backup file."""
|
||||
import asyncio
|
||||
from base64 import b64decode, b64encode
|
||||
from collections.abc import Awaitable
|
||||
from datetime import timedelta
|
||||
@@ -18,13 +19,14 @@ from securetar import SecureTarFile, atomic_contents_add, secure_path
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
from ..addons import Addon
|
||||
from ..addons.manager import Addon
|
||||
from ..const import (
|
||||
ATTR_ADDONS,
|
||||
ATTR_COMPRESSED,
|
||||
ATTR_CRYPTO,
|
||||
ATTR_DATE,
|
||||
ATTR_DOCKER,
|
||||
ATTR_EXCLUDE_DATABASE,
|
||||
ATTR_FOLDERS,
|
||||
ATTR_HOMEASSISTANT,
|
||||
ATTR_NAME,
|
||||
@@ -129,7 +131,14 @@ class Backup(CoreSysAttributes):
|
||||
"""Return backup Home Assistant version."""
|
||||
if self.homeassistant is None:
|
||||
return None
|
||||
return self._data[ATTR_HOMEASSISTANT][ATTR_VERSION]
|
||||
return self.homeassistant[ATTR_VERSION]
|
||||
|
||||
@property
|
||||
def homeassistant_exclude_database(self) -> bool:
|
||||
"""Return whether database was excluded from Home Assistant backup."""
|
||||
if self.homeassistant is None:
|
||||
return None
|
||||
return self.homeassistant[ATTR_EXCLUDE_DATABASE]
|
||||
|
||||
@property
|
||||
def homeassistant(self):
|
||||
@@ -183,7 +192,15 @@ class Backup(CoreSysAttributes):
|
||||
days=self.sys_backups.days_until_stale
|
||||
)
|
||||
|
||||
def new(self, slug, name, date, sys_type, password=None, compressed=True):
|
||||
def new(
|
||||
self,
|
||||
slug: str,
|
||||
name: str,
|
||||
date: str,
|
||||
sys_type: BackupType,
|
||||
password: str | None = None,
|
||||
compressed: bool = True,
|
||||
):
|
||||
"""Initialize a new backup."""
|
||||
# Init metadata
|
||||
self._data[ATTR_VERSION] = 2
|
||||
@@ -288,7 +305,7 @@ class Backup(CoreSysAttributes):
|
||||
|
||||
async def __aenter__(self):
|
||||
"""Async context to open a backup."""
|
||||
self._tmp = TemporaryDirectory(dir=str(self.sys_config.path_tmp))
|
||||
self._tmp = TemporaryDirectory(dir=str(self.tarfile.parent))
|
||||
|
||||
# create a backup
|
||||
if not self.tarfile.is_file():
|
||||
@@ -298,7 +315,11 @@ class Backup(CoreSysAttributes):
|
||||
def _extract_backup():
|
||||
"""Extract a backup."""
|
||||
with tarfile.open(self.tarfile, "r:") as tar:
|
||||
tar.extractall(path=self._tmp.name, members=secure_path(tar))
|
||||
tar.extractall(
|
||||
path=self._tmp.name,
|
||||
members=secure_path(tar),
|
||||
filter="fully_trusted",
|
||||
)
|
||||
|
||||
await self.sys_run_in_executor(_extract_backup)
|
||||
|
||||
@@ -332,14 +353,14 @@ class Backup(CoreSysAttributes):
|
||||
finally:
|
||||
self._tmp.cleanup()
|
||||
|
||||
async def store_addons(self, addon_list: list[str]) -> list[Awaitable[None]]:
|
||||
async def store_addons(self, addon_list: list[str]) -> list[asyncio.Task]:
|
||||
"""Add a list of add-ons into backup.
|
||||
|
||||
For each addon that needs to be started after backup, returns a task which
|
||||
For each addon that needs to be started after backup, returns a Task which
|
||||
completes when that addon has state 'started' (see addon.start).
|
||||
"""
|
||||
|
||||
async def _addon_save(addon: Addon) -> Awaitable[None] | None:
|
||||
async def _addon_save(addon: Addon) -> asyncio.Task | None:
|
||||
"""Task to store an add-on into backup."""
|
||||
tar_name = f"{addon.slug}.tar{'.gz' if self.compressed else ''}"
|
||||
addon_file = SecureTarFile(
|
||||
@@ -371,7 +392,7 @@ class Backup(CoreSysAttributes):
|
||||
|
||||
# Save Add-ons sequential
|
||||
# avoid issue on slow IO
|
||||
start_tasks: list[Awaitable[None]] = []
|
||||
start_tasks: list[asyncio.Task] = []
|
||||
for addon in addon_list:
|
||||
try:
|
||||
if start_task := await _addon_save(addon):
|
||||
@@ -381,10 +402,12 @@ class Backup(CoreSysAttributes):
|
||||
|
||||
return start_tasks
|
||||
|
||||
async def restore_addons(self, addon_list: list[str]) -> list[Awaitable[None]]:
|
||||
async def restore_addons(
|
||||
self, addon_list: list[str]
|
||||
) -> tuple[bool, list[asyncio.Task]]:
|
||||
"""Restore a list add-on from backup."""
|
||||
|
||||
async def _addon_restore(addon_slug: str) -> Awaitable[None] | None:
|
||||
async def _addon_restore(addon_slug: str) -> tuple[bool, asyncio.Task | None]:
|
||||
"""Task to restore an add-on into backup."""
|
||||
tar_name = f"{addon_slug}.tar{'.gz' if self.compressed else ''}"
|
||||
addon_file = SecureTarFile(
|
||||
@@ -398,30 +421,36 @@ class Backup(CoreSysAttributes):
|
||||
# If exists inside backup
|
||||
if not addon_file.path.exists():
|
||||
_LOGGER.error("Can't find backup %s", addon_slug)
|
||||
return
|
||||
return (False, None)
|
||||
|
||||
# Perform a restore
|
||||
try:
|
||||
return await self.sys_addons.restore(addon_slug, addon_file)
|
||||
return (True, await self.sys_addons.restore(addon_slug, addon_file))
|
||||
except AddonsError:
|
||||
_LOGGER.error("Can't restore backup %s", addon_slug)
|
||||
return (False, None)
|
||||
|
||||
# Save Add-ons sequential
|
||||
# avoid issue on slow IO
|
||||
start_tasks: list[Awaitable[None]] = []
|
||||
start_tasks: list[asyncio.Task] = []
|
||||
success = True
|
||||
for slug in addon_list:
|
||||
try:
|
||||
if start_task := await _addon_restore(slug):
|
||||
start_tasks.append(start_task)
|
||||
addon_success, start_task = await _addon_restore(slug)
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.warning("Can't restore Add-on %s: %s", slug, err)
|
||||
success = False
|
||||
else:
|
||||
success = success and addon_success
|
||||
if start_task:
|
||||
start_tasks.append(start_task)
|
||||
|
||||
return start_tasks
|
||||
return (success, start_tasks)
|
||||
|
||||
async def store_folders(self, folder_list: list[str]):
|
||||
"""Backup Supervisor data into backup."""
|
||||
|
||||
def _folder_save(name: str):
|
||||
async def _folder_save(name: str):
|
||||
"""Take backup of a folder."""
|
||||
slug_name = name.replace("/", "_")
|
||||
tar_name = Path(
|
||||
@@ -434,39 +463,43 @@ class Backup(CoreSysAttributes):
|
||||
_LOGGER.warning("Can't find backup folder %s", name)
|
||||
return
|
||||
|
||||
# Take backup
|
||||
_LOGGER.info("Backing up folder %s", name)
|
||||
with SecureTarFile(
|
||||
tar_name, "w", key=self._key, gzip=self.compressed, bufsize=BUF_SIZE
|
||||
) as tar_file:
|
||||
atomic_contents_add(
|
||||
tar_file,
|
||||
origin_dir,
|
||||
excludes=[
|
||||
bound.bind_mount.local_where.as_posix()
|
||||
for bound in self.sys_mounts.bound_mounts
|
||||
if bound.bind_mount.local_where
|
||||
],
|
||||
arcname=".",
|
||||
)
|
||||
def _save() -> None:
|
||||
# Take backup
|
||||
_LOGGER.info("Backing up folder %s", name)
|
||||
with SecureTarFile(
|
||||
tar_name, "w", key=self._key, gzip=self.compressed, bufsize=BUF_SIZE
|
||||
) as tar_file:
|
||||
atomic_contents_add(
|
||||
tar_file,
|
||||
origin_dir,
|
||||
excludes=[
|
||||
bound.bind_mount.local_where.as_posix()
|
||||
for bound in self.sys_mounts.bound_mounts
|
||||
if bound.bind_mount.local_where
|
||||
],
|
||||
arcname=".",
|
||||
)
|
||||
|
||||
_LOGGER.info("Backup folder %s done", name)
|
||||
_LOGGER.info("Backup folder %s done", name)
|
||||
|
||||
await self.sys_run_in_executor(_save)
|
||||
self._data[ATTR_FOLDERS].append(name)
|
||||
|
||||
# Save folder sequential
|
||||
# avoid issue on slow IO
|
||||
for folder in folder_list:
|
||||
try:
|
||||
await self.sys_run_in_executor(_folder_save, folder)
|
||||
await _folder_save(folder)
|
||||
except (tarfile.TarError, OSError) as err:
|
||||
raise BackupError(
|
||||
f"Can't backup folder {folder}: {str(err)}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
async def restore_folders(self, folder_list: list[str]):
|
||||
async def restore_folders(self, folder_list: list[str]) -> bool:
|
||||
"""Backup Supervisor data into backup."""
|
||||
success = True
|
||||
|
||||
async def _folder_restore(name: str) -> None:
|
||||
async def _folder_restore(name: str) -> bool:
|
||||
"""Intenal function to restore a folder."""
|
||||
slug_name = name.replace("/", "_")
|
||||
tar_name = Path(
|
||||
@@ -477,14 +510,26 @@ class Backup(CoreSysAttributes):
|
||||
# Check if exists inside backup
|
||||
if not tar_name.exists():
|
||||
_LOGGER.warning("Can't find restore folder %s", name)
|
||||
return
|
||||
return False
|
||||
|
||||
# Unmount any mounts within folder
|
||||
bind_mounts = [
|
||||
bound.bind_mount
|
||||
for bound in self.sys_mounts.bound_mounts
|
||||
if bound.bind_mount.local_where
|
||||
and bound.bind_mount.local_where.is_relative_to(origin_dir)
|
||||
]
|
||||
if bind_mounts:
|
||||
await asyncio.gather(
|
||||
*[bind_mount.unmount() for bind_mount in bind_mounts]
|
||||
)
|
||||
|
||||
# Clean old stuff
|
||||
if origin_dir.is_dir():
|
||||
await remove_folder(origin_dir, content_only=True)
|
||||
|
||||
# Perform a restore
|
||||
def _restore() -> None:
|
||||
def _restore() -> bool:
|
||||
try:
|
||||
_LOGGER.info("Restore folder %s", name)
|
||||
with SecureTarFile(
|
||||
@@ -494,24 +539,39 @@ class Backup(CoreSysAttributes):
|
||||
gzip=self.compressed,
|
||||
bufsize=BUF_SIZE,
|
||||
) as tar_file:
|
||||
tar_file.extractall(path=origin_dir, members=tar_file)
|
||||
tar_file.extractall(
|
||||
path=origin_dir, members=tar_file, filter="fully_trusted"
|
||||
)
|
||||
_LOGGER.info("Restore folder %s done", name)
|
||||
except (tarfile.TarError, OSError) as err:
|
||||
_LOGGER.warning("Can't restore folder %s: %s", name, err)
|
||||
return False
|
||||
return True
|
||||
|
||||
await self.sys_run_in_executor(_restore)
|
||||
try:
|
||||
return await self.sys_run_in_executor(_restore)
|
||||
finally:
|
||||
if bind_mounts:
|
||||
await asyncio.gather(
|
||||
*[bind_mount.mount() for bind_mount in bind_mounts]
|
||||
)
|
||||
|
||||
# Restore folder sequential
|
||||
# avoid issue on slow IO
|
||||
for folder in folder_list:
|
||||
try:
|
||||
await _folder_restore(folder)
|
||||
success = success and await _folder_restore(folder)
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.warning("Can't restore folder %s: %s", folder, err)
|
||||
success = False
|
||||
return success
|
||||
|
||||
async def store_homeassistant(self):
|
||||
"""Backup Home Assitant Core configuration folder."""
|
||||
self._data[ATTR_HOMEASSISTANT] = {ATTR_VERSION: self.sys_homeassistant.version}
|
||||
async def store_homeassistant(self, exclude_database: bool = False):
|
||||
"""Backup Home Assistant Core configuration folder."""
|
||||
self._data[ATTR_HOMEASSISTANT] = {
|
||||
ATTR_VERSION: self.sys_homeassistant.version,
|
||||
ATTR_EXCLUDE_DATABASE: exclude_database,
|
||||
}
|
||||
|
||||
# Backup Home Assistant Core config directory
|
||||
tar_name = Path(
|
||||
@@ -521,13 +581,13 @@ class Backup(CoreSysAttributes):
|
||||
tar_name, "w", key=self._key, gzip=self.compressed, bufsize=BUF_SIZE
|
||||
)
|
||||
|
||||
await self.sys_homeassistant.backup(homeassistant_file)
|
||||
await self.sys_homeassistant.backup(homeassistant_file, exclude_database)
|
||||
|
||||
# Store size
|
||||
self.homeassistant[ATTR_SIZE] = homeassistant_file.size
|
||||
|
||||
async def restore_homeassistant(self) -> Awaitable[None]:
|
||||
"""Restore Home Assitant Core configuration folder."""
|
||||
"""Restore Home Assistant Core configuration folder."""
|
||||
await self.sys_homeassistant.core.stop()
|
||||
|
||||
# Restore Home Assistant Core config directory
|
||||
@@ -538,7 +598,9 @@ class Backup(CoreSysAttributes):
|
||||
tar_name, "r", key=self._key, gzip=self.compressed, bufsize=BUF_SIZE
|
||||
)
|
||||
|
||||
await self.sys_homeassistant.restore(homeassistant_file)
|
||||
await self.sys_homeassistant.restore(
|
||||
homeassistant_file, self.homeassistant_exclude_database
|
||||
)
|
||||
|
||||
# Generate restore task
|
||||
async def _core_update():
|
||||
@@ -561,12 +623,12 @@ class Backup(CoreSysAttributes):
|
||||
"""Store repository list into backup."""
|
||||
self.repositories = self.sys_store.repository_urls
|
||||
|
||||
async def restore_repositories(self, replace: bool = False):
|
||||
def restore_repositories(self, replace: bool = False) -> Awaitable[None]:
|
||||
"""Restore repositories from backup.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
await self.sys_store.update_repositories(
|
||||
return self.sys_store.update_repositories(
|
||||
self.repositories, add_with_errors=True, replace=replace
|
||||
)
|
||||
|
||||
|
@@ -1,11 +1,38 @@
|
||||
"""Backup consts."""
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
|
||||
BUF_SIZE = 2**20 * 4 # 4MB
|
||||
DEFAULT_FREEZE_TIMEOUT = 600
|
||||
|
||||
|
||||
class BackupType(str, Enum):
|
||||
class BackupType(StrEnum):
|
||||
"""Backup type enum."""
|
||||
|
||||
FULL = "full"
|
||||
PARTIAL = "partial"
|
||||
|
||||
|
||||
class BackupJobStage(StrEnum):
|
||||
"""Backup job stage enum."""
|
||||
|
||||
ADDON_REPOSITORIES = "addon_repositories"
|
||||
ADDONS = "addons"
|
||||
DOCKER_CONFIG = "docker_config"
|
||||
FINISHING_FILE = "finishing_file"
|
||||
FOLDERS = "folders"
|
||||
HOME_ASSISTANT = "home_assistant"
|
||||
AWAIT_ADDON_RESTARTS = "await_addon_restarts"
|
||||
|
||||
|
||||
class RestoreJobStage(StrEnum):
|
||||
"""Restore job stage enum."""
|
||||
|
||||
ADDON_REPOSITORIES = "addon_repositories"
|
||||
ADDONS = "addons"
|
||||
AWAIT_ADDON_RESTARTS = "await_addon_restarts"
|
||||
AWAIT_HOME_ASSISTANT_RESTART = "await_home_assistant_restart"
|
||||
CHECK_HOME_ASSISTANT = "check_home_assistant"
|
||||
DOCKER_CONFIG = "docker_config"
|
||||
FOLDERS = "folders"
|
||||
HOME_ASSISTANT = "home_assistant"
|
||||
REMOVE_DELTA_ADDONS = "remove_delta_addons"
|
||||
|
@@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Awaitable, Iterable
|
||||
import errno
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
@@ -13,44 +14,35 @@ from ..const import (
|
||||
FOLDER_HOMEASSISTANT,
|
||||
CoreState,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..dbus.const import UnitActiveState
|
||||
from ..exceptions import AddonsError
|
||||
from ..jobs.decorator import Job, JobCondition
|
||||
from ..exceptions import AddonsError, BackupError, BackupInvalidError, BackupJobError
|
||||
from ..jobs.const import JOB_GROUP_BACKUP_MANAGER, JobCondition, JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..jobs.job_group import JobGroup
|
||||
from ..mounts.mount import Mount
|
||||
from ..resolution.const import UnhealthyReason
|
||||
from ..utils.common import FileConfiguration
|
||||
from ..utils.dt import utcnow
|
||||
from ..utils.sentinel import DEFAULT
|
||||
from ..utils.sentry import capture_exception
|
||||
from .backup import Backup
|
||||
from .const import BackupType
|
||||
from .const import DEFAULT_FREEZE_TIMEOUT, BackupJobStage, BackupType, RestoreJobStage
|
||||
from .utils import create_slug
|
||||
from .validate import ALL_FOLDERS, SCHEMA_BACKUPS_CONFIG
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _list_backup_files(path: Path) -> Iterable[Path]:
|
||||
"""Return iterable of backup files, suppress and log OSError for network mounts."""
|
||||
try:
|
||||
# is_dir does a stat syscall which raises if the mount is down
|
||||
if path.is_dir():
|
||||
return path.glob("*.tar")
|
||||
except OSError as err:
|
||||
_LOGGER.error("Could not list backups from %s: %s", path.as_posix(), err)
|
||||
|
||||
return []
|
||||
|
||||
|
||||
class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
class BackupManager(FileConfiguration, JobGroup):
|
||||
"""Manage backups."""
|
||||
|
||||
def __init__(self, coresys):
|
||||
"""Initialize a backup manager."""
|
||||
super().__init__(FILE_HASSIO_BACKUPS, SCHEMA_BACKUPS_CONFIG)
|
||||
self.coresys = coresys
|
||||
self._backups = {}
|
||||
self.lock = asyncio.Lock()
|
||||
super(FileConfiguration, self).__init__(coresys, JOB_GROUP_BACKUP_MANAGER)
|
||||
self._backups: dict[str, Backup] = {}
|
||||
self._thaw_task: Awaitable[None] | None = None
|
||||
self._thaw_event: asyncio.Event = asyncio.Event()
|
||||
|
||||
@property
|
||||
def list_backups(self) -> set[Backup]:
|
||||
@@ -76,7 +68,7 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
if mount.state == UnitActiveState.ACTIVE
|
||||
]
|
||||
|
||||
def get(self, slug):
|
||||
def get(self, slug: str) -> Backup:
|
||||
"""Return backup object."""
|
||||
return self._backups.get(slug)
|
||||
|
||||
@@ -90,6 +82,46 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
|
||||
return self.sys_config.path_backup
|
||||
|
||||
def _change_stage(
|
||||
self,
|
||||
stage: BackupJobStage | RestoreJobStage,
|
||||
backup: Backup | None = None,
|
||||
):
|
||||
"""Change the stage of the current job during backup/restore.
|
||||
|
||||
Must be called from an existing backup/restore job.
|
||||
"""
|
||||
job_name = self.sys_jobs.current.name
|
||||
if "restore" in job_name:
|
||||
action = "Restore"
|
||||
elif "freeze" in job_name:
|
||||
action = "Freeze"
|
||||
elif "thaw" in job_name:
|
||||
action = "Thaw"
|
||||
else:
|
||||
action = "Backup"
|
||||
|
||||
_LOGGER.info(
|
||||
"%s %sstarting stage %s",
|
||||
action,
|
||||
f"{backup.slug} " if backup else "",
|
||||
stage,
|
||||
)
|
||||
self.sys_jobs.current.stage = stage
|
||||
|
||||
def _list_backup_files(self, path: Path) -> Iterable[Path]:
|
||||
"""Return iterable of backup files, suppress and log OSError for network mounts."""
|
||||
try:
|
||||
# is_dir does a stat syscall which raises if the mount is down
|
||||
if path.is_dir():
|
||||
return path.glob("*.tar")
|
||||
except OSError as err:
|
||||
if err.errno == errno.EBADMSG and path == self.sys_config.path_backup:
|
||||
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
_LOGGER.error("Could not list backups from %s: %s", path.as_posix(), err)
|
||||
|
||||
return []
|
||||
|
||||
def _create_backup(
|
||||
self,
|
||||
name: str,
|
||||
@@ -98,7 +130,10 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
compressed: bool = True,
|
||||
location: Mount | type[DEFAULT] | None = DEFAULT,
|
||||
) -> Backup:
|
||||
"""Initialize a new backup object from name."""
|
||||
"""Initialize a new backup object from name.
|
||||
|
||||
Must be called from an existing backup job.
|
||||
"""
|
||||
date_str = utcnow().isoformat()
|
||||
slug = create_slug(name, date_str)
|
||||
tar_file = Path(self._get_base_path(location), f"{slug}.tar")
|
||||
@@ -107,19 +142,24 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
backup = Backup(self.coresys, tar_file)
|
||||
backup.new(slug, name, date_str, sys_type, password, compressed)
|
||||
|
||||
# Add backup ID to job
|
||||
self.sys_jobs.current.reference = backup.slug
|
||||
|
||||
self._change_stage(BackupJobStage.ADDON_REPOSITORIES, backup)
|
||||
backup.store_repositories()
|
||||
self._change_stage(BackupJobStage.DOCKER_CONFIG, backup)
|
||||
backup.store_dockerconfig()
|
||||
|
||||
return backup
|
||||
|
||||
def load(self):
|
||||
def load(self) -> Awaitable[None]:
|
||||
"""Load exists backups data.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.reload()
|
||||
|
||||
async def reload(self):
|
||||
async def reload(self) -> None:
|
||||
"""Load exists backups."""
|
||||
self._backups = {}
|
||||
|
||||
@@ -132,14 +172,14 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
tasks = [
|
||||
self.sys_create_task(_load_backup(tar_file))
|
||||
for path in self.backup_locations
|
||||
for tar_file in _list_backup_files(path)
|
||||
for tar_file in self._list_backup_files(path)
|
||||
]
|
||||
|
||||
_LOGGER.info("Found %d backup files", len(tasks))
|
||||
if tasks:
|
||||
await asyncio.wait(tasks)
|
||||
|
||||
def remove(self, backup):
|
||||
def remove(self, backup: Backup) -> bool:
|
||||
"""Remove a backup."""
|
||||
try:
|
||||
backup.tarfile.unlink()
|
||||
@@ -147,12 +187,17 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
_LOGGER.info("Removed backup file %s", backup.slug)
|
||||
|
||||
except OSError as err:
|
||||
if (
|
||||
err.errno == errno.EBADMSG
|
||||
and backup.tarfile.parent == self.sys_config.path_backup
|
||||
):
|
||||
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
_LOGGER.error("Can't remove backup %s: %s", backup.slug, err)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def import_backup(self, tar_file):
|
||||
async def import_backup(self, tar_file: Path) -> Backup | None:
|
||||
"""Check backup tarfile and import it."""
|
||||
backup = Backup(self.coresys, tar_file)
|
||||
|
||||
@@ -171,6 +216,8 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
backup.tarfile.rename(tar_origin)
|
||||
|
||||
except OSError as err:
|
||||
if err.errno == errno.EBADMSG:
|
||||
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
_LOGGER.error("Can't move backup file to storage: %s", err)
|
||||
return None
|
||||
|
||||
@@ -189,26 +236,39 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
addon_list: list[Addon],
|
||||
folder_list: list[str],
|
||||
homeassistant: bool,
|
||||
):
|
||||
homeassistant_exclude_database: bool | None,
|
||||
) -> Backup | None:
|
||||
"""Create a backup.
|
||||
|
||||
Must be called from an existing backup job.
|
||||
"""
|
||||
addon_start_tasks: list[Awaitable[None]] | None = None
|
||||
|
||||
try:
|
||||
self.sys_core.state = CoreState.FREEZE
|
||||
|
||||
async with backup:
|
||||
# Backup add-ons
|
||||
if addon_list:
|
||||
_LOGGER.info("Backing up %s store Add-ons", backup.slug)
|
||||
self._change_stage(BackupJobStage.ADDONS, backup)
|
||||
addon_start_tasks = await backup.store_addons(addon_list)
|
||||
|
||||
# HomeAssistant Folder is for v1
|
||||
if homeassistant:
|
||||
await backup.store_homeassistant()
|
||||
self._change_stage(BackupJobStage.HOME_ASSISTANT, backup)
|
||||
await backup.store_homeassistant(
|
||||
self.sys_homeassistant.backups_exclude_database
|
||||
if homeassistant_exclude_database is None
|
||||
else homeassistant_exclude_database
|
||||
)
|
||||
|
||||
# Backup folders
|
||||
if folder_list:
|
||||
_LOGGER.info("Backing up %s store folders", backup.slug)
|
||||
self._change_stage(BackupJobStage.FOLDERS, backup)
|
||||
await backup.store_folders(folder_list)
|
||||
|
||||
self._change_stage(BackupJobStage.FINISHING_FILE, backup)
|
||||
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.exception("Backup %s error", backup.slug)
|
||||
capture_exception(err)
|
||||
@@ -217,6 +277,7 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
self._backups[backup.slug] = backup
|
||||
|
||||
if addon_start_tasks:
|
||||
self._change_stage(BackupJobStage.AWAIT_ADDON_RESTARTS, backup)
|
||||
# Ignore exceptions from waiting for addon startup, addon errors handled elsewhere
|
||||
await asyncio.gather(*addon_start_tasks, return_exceptions=True)
|
||||
|
||||
@@ -224,33 +285,48 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
finally:
|
||||
self.sys_core.state = CoreState.RUNNING
|
||||
|
||||
@Job(conditions=[JobCondition.FREE_SPACE, JobCondition.RUNNING])
|
||||
@Job(
|
||||
name="backup_manager_full_backup",
|
||||
conditions=[JobCondition.RUNNING],
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=BackupJobError,
|
||||
)
|
||||
async def do_backup_full(
|
||||
self,
|
||||
name="",
|
||||
password=None,
|
||||
compressed=True,
|
||||
name: str = "",
|
||||
password: str | None = None,
|
||||
compressed: bool = True,
|
||||
location: Mount | type[DEFAULT] | None = DEFAULT,
|
||||
):
|
||||
homeassistant_exclude_database: bool | None = None,
|
||||
) -> Backup | None:
|
||||
"""Create a full backup."""
|
||||
if self.lock.locked():
|
||||
_LOGGER.error("A backup/restore process is already running")
|
||||
return None
|
||||
if self._get_base_path(location) == self.sys_config.path_backup:
|
||||
await Job.check_conditions(
|
||||
self, {JobCondition.FREE_SPACE}, "BackupManager.do_backup_full"
|
||||
)
|
||||
|
||||
backup = self._create_backup(
|
||||
name, BackupType.FULL, password, compressed, location
|
||||
)
|
||||
|
||||
_LOGGER.info("Creating new full backup with slug %s", backup.slug)
|
||||
async with self.lock:
|
||||
backup = await self._do_backup(
|
||||
backup, self.sys_addons.installed, ALL_FOLDERS, True
|
||||
)
|
||||
if backup:
|
||||
_LOGGER.info("Creating full backup with slug %s completed", backup.slug)
|
||||
return backup
|
||||
backup = await self._do_backup(
|
||||
backup,
|
||||
self.sys_addons.installed,
|
||||
ALL_FOLDERS,
|
||||
True,
|
||||
homeassistant_exclude_database,
|
||||
)
|
||||
if backup:
|
||||
_LOGGER.info("Creating full backup with slug %s completed", backup.slug)
|
||||
return backup
|
||||
|
||||
@Job(conditions=[JobCondition.FREE_SPACE, JobCondition.RUNNING])
|
||||
@Job(
|
||||
name="backup_manager_partial_backup",
|
||||
conditions=[JobCondition.RUNNING],
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=BackupJobError,
|
||||
)
|
||||
async def do_backup_partial(
|
||||
self,
|
||||
name: str = "",
|
||||
@@ -260,11 +336,13 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
homeassistant: bool = False,
|
||||
compressed: bool = True,
|
||||
location: Mount | type[DEFAULT] | None = DEFAULT,
|
||||
):
|
||||
homeassistant_exclude_database: bool | None = None,
|
||||
) -> Backup | None:
|
||||
"""Create a partial backup."""
|
||||
if self.lock.locked():
|
||||
_LOGGER.error("A backup/restore process is already running")
|
||||
return None
|
||||
if self._get_base_path(location) == self.sys_config.path_backup:
|
||||
await Job.check_conditions(
|
||||
self, {JobCondition.FREE_SPACE}, "BackupManager.do_backup_partial"
|
||||
)
|
||||
|
||||
addons = addons or []
|
||||
folders = folders or []
|
||||
@@ -282,21 +360,20 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
)
|
||||
|
||||
_LOGGER.info("Creating new partial backup with slug %s", backup.slug)
|
||||
async with self.lock:
|
||||
addon_list = []
|
||||
for addon_slug in addons:
|
||||
addon = self.sys_addons.get(addon_slug)
|
||||
if addon and addon.is_installed:
|
||||
addon_list.append(addon)
|
||||
continue
|
||||
_LOGGER.warning("Add-on %s not found/installed", addon_slug)
|
||||
addon_list = []
|
||||
for addon_slug in addons:
|
||||
addon = self.sys_addons.get(addon_slug)
|
||||
if addon and addon.is_installed:
|
||||
addon_list.append(addon)
|
||||
continue
|
||||
_LOGGER.warning("Add-on %s not found/installed", addon_slug)
|
||||
|
||||
backup = await self._do_backup(backup, addon_list, folders, homeassistant)
|
||||
if backup:
|
||||
_LOGGER.info(
|
||||
"Creating partial backup with slug %s completed", backup.slug
|
||||
)
|
||||
return backup
|
||||
backup = await self._do_backup(
|
||||
backup, addon_list, folders, homeassistant, homeassistant_exclude_database
|
||||
)
|
||||
if backup:
|
||||
_LOGGER.info("Creating partial backup with slug %s completed", backup.slug)
|
||||
return backup
|
||||
|
||||
async def _do_restore(
|
||||
self,
|
||||
@@ -305,28 +382,34 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
folder_list: list[str],
|
||||
homeassistant: bool,
|
||||
replace: bool,
|
||||
):
|
||||
) -> bool:
|
||||
"""Restore from a backup.
|
||||
|
||||
Must be called from an existing restore job.
|
||||
"""
|
||||
addon_start_tasks: list[Awaitable[None]] | None = None
|
||||
success = True
|
||||
|
||||
try:
|
||||
task_hass: asyncio.Task | None = None
|
||||
async with backup:
|
||||
# Restore docker config
|
||||
_LOGGER.info("Restoring %s Docker config", backup.slug)
|
||||
self._change_stage(RestoreJobStage.DOCKER_CONFIG, backup)
|
||||
backup.restore_dockerconfig(replace)
|
||||
|
||||
# Process folders
|
||||
if folder_list:
|
||||
_LOGGER.info("Restoring %s folders", backup.slug)
|
||||
await backup.restore_folders(folder_list)
|
||||
self._change_stage(RestoreJobStage.FOLDERS, backup)
|
||||
success = await backup.restore_folders(folder_list)
|
||||
|
||||
# Process Home-Assistant
|
||||
if homeassistant:
|
||||
_LOGGER.info("Restoring %s Home Assistant Core", backup.slug)
|
||||
self._change_stage(RestoreJobStage.HOME_ASSISTANT, backup)
|
||||
task_hass = await backup.restore_homeassistant()
|
||||
|
||||
# Delete delta add-ons
|
||||
if replace:
|
||||
_LOGGER.info("Removing Add-ons not in the backup %s", backup.slug)
|
||||
self._change_stage(RestoreJobStage.REMOVE_DELTA_ADDONS, backup)
|
||||
for addon in self.sys_addons.installed:
|
||||
if addon.slug in backup.addon_list:
|
||||
continue
|
||||
@@ -334,97 +417,123 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
# Remove Add-on because it's not a part of the new env
|
||||
# Do it sequential avoid issue on slow IO
|
||||
try:
|
||||
await addon.uninstall()
|
||||
await self.sys_addons.uninstall(addon.slug)
|
||||
except AddonsError:
|
||||
_LOGGER.warning("Can't uninstall Add-on %s", addon.slug)
|
||||
success = False
|
||||
|
||||
if addon_list:
|
||||
_LOGGER.info("Restoring %s Repositories", backup.slug)
|
||||
self._change_stage(RestoreJobStage.ADDON_REPOSITORIES, backup)
|
||||
await backup.restore_repositories(replace)
|
||||
|
||||
_LOGGER.info("Restoring %s Add-ons", backup.slug)
|
||||
addon_start_tasks = await backup.restore_addons(addon_list)
|
||||
self._change_stage(RestoreJobStage.ADDONS, backup)
|
||||
restore_success, addon_start_tasks = await backup.restore_addons(
|
||||
addon_list
|
||||
)
|
||||
success = success and restore_success
|
||||
|
||||
# Wait for Home Assistant Core update/downgrade
|
||||
if task_hass:
|
||||
_LOGGER.info("Restore %s wait for Home-Assistant", backup.slug)
|
||||
self._change_stage(
|
||||
RestoreJobStage.AWAIT_HOME_ASSISTANT_RESTART, backup
|
||||
)
|
||||
await task_hass
|
||||
|
||||
except BackupError:
|
||||
raise
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.exception("Restore %s error", backup.slug)
|
||||
capture_exception(err)
|
||||
return False
|
||||
raise BackupError(
|
||||
f"Restore {backup.slug} error, check logs for details"
|
||||
) from err
|
||||
else:
|
||||
if addon_start_tasks:
|
||||
# Ignore exceptions from waiting for addon startup, addon errors handled elsewhere
|
||||
await asyncio.gather(*addon_start_tasks, return_exceptions=True)
|
||||
self._change_stage(RestoreJobStage.AWAIT_ADDON_RESTARTS, backup)
|
||||
# Failure to resume addons post restore is still a restore failure
|
||||
if any(
|
||||
await asyncio.gather(*addon_start_tasks, return_exceptions=True)
|
||||
):
|
||||
return False
|
||||
|
||||
return True
|
||||
return success
|
||||
finally:
|
||||
# Do we need start Home Assistant Core?
|
||||
if not await self.sys_homeassistant.core.is_running():
|
||||
await self.sys_homeassistant.core.start()
|
||||
# Leave Home Assistant alone if it wasn't part of the restore
|
||||
if homeassistant:
|
||||
self._change_stage(RestoreJobStage.CHECK_HOME_ASSISTANT, backup)
|
||||
|
||||
# Check If we can access to API / otherwise restart
|
||||
if not await self.sys_homeassistant.api.check_api_state():
|
||||
_LOGGER.warning("Need restart HomeAssistant for API")
|
||||
await self.sys_homeassistant.core.restart()
|
||||
# Do we need start Home Assistant Core?
|
||||
if not await self.sys_homeassistant.core.is_running():
|
||||
await self.sys_homeassistant.core.start()
|
||||
|
||||
# Check If we can access to API / otherwise restart
|
||||
if not await self.sys_homeassistant.api.check_api_state():
|
||||
_LOGGER.warning("Need restart HomeAssistant for API")
|
||||
await self.sys_homeassistant.core.restart()
|
||||
|
||||
@Job(
|
||||
name="backup_manager_full_restore",
|
||||
conditions=[
|
||||
JobCondition.FREE_SPACE,
|
||||
JobCondition.HEALTHY,
|
||||
JobCondition.INTERNET_HOST,
|
||||
JobCondition.INTERNET_SYSTEM,
|
||||
JobCondition.RUNNING,
|
||||
]
|
||||
],
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=BackupJobError,
|
||||
)
|
||||
async def do_restore_full(self, backup: Backup, password=None):
|
||||
async def do_restore_full(
|
||||
self, backup: Backup, password: str | None = None
|
||||
) -> bool:
|
||||
"""Restore a backup."""
|
||||
if self.lock.locked():
|
||||
_LOGGER.error("A backup/restore process is already running")
|
||||
return False
|
||||
# Add backup ID to job
|
||||
self.sys_jobs.current.reference = backup.slug
|
||||
|
||||
if backup.sys_type != BackupType.FULL:
|
||||
_LOGGER.error("%s is only a partial backup!", backup.slug)
|
||||
return False
|
||||
raise BackupInvalidError(
|
||||
f"{backup.slug} is only a partial backup!", _LOGGER.error
|
||||
)
|
||||
|
||||
if backup.protected and not backup.set_password(password):
|
||||
_LOGGER.error("Invalid password for backup %s", backup.slug)
|
||||
return False
|
||||
raise BackupInvalidError(
|
||||
f"Invalid password for backup {backup.slug}", _LOGGER.error
|
||||
)
|
||||
|
||||
if backup.supervisor_version > self.sys_supervisor.version:
|
||||
_LOGGER.error(
|
||||
"Backup was made on supervisor version %s, can't restore on %s. Must update supervisor first.",
|
||||
backup.supervisor_version,
|
||||
self.sys_supervisor.version,
|
||||
raise BackupInvalidError(
|
||||
f"Backup was made on supervisor version {backup.supervisor_version}, "
|
||||
f"can't restore on {self.sys_supervisor.version}. Must update supervisor first.",
|
||||
_LOGGER.error,
|
||||
)
|
||||
return False
|
||||
|
||||
_LOGGER.info("Full-Restore %s start", backup.slug)
|
||||
async with self.lock:
|
||||
self.sys_core.state = CoreState.FREEZE
|
||||
self.sys_core.state = CoreState.FREEZE
|
||||
|
||||
try:
|
||||
# Stop Home-Assistant / Add-ons
|
||||
await self.sys_core.shutdown()
|
||||
|
||||
success = await self._do_restore(
|
||||
backup, backup.addon_list, backup.folders, True, True
|
||||
)
|
||||
|
||||
finally:
|
||||
self.sys_core.state = CoreState.RUNNING
|
||||
|
||||
if success:
|
||||
_LOGGER.info("Full-Restore %s done", backup.slug)
|
||||
if success:
|
||||
_LOGGER.info("Full-Restore %s done", backup.slug)
|
||||
return success
|
||||
|
||||
@Job(
|
||||
name="backup_manager_partial_restore",
|
||||
conditions=[
|
||||
JobCondition.FREE_SPACE,
|
||||
JobCondition.HEALTHY,
|
||||
JobCondition.INTERNET_HOST,
|
||||
JobCondition.INTERNET_SYSTEM,
|
||||
JobCondition.RUNNING,
|
||||
]
|
||||
],
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=BackupJobError,
|
||||
)
|
||||
async def do_restore_partial(
|
||||
self,
|
||||
@@ -433,11 +542,10 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
addons: list[str] | None = None,
|
||||
folders: list[Path] | None = None,
|
||||
password: str | None = None,
|
||||
):
|
||||
) -> bool:
|
||||
"""Restore a backup."""
|
||||
if self.lock.locked():
|
||||
_LOGGER.error("A backup/restore process is already running")
|
||||
return False
|
||||
# Add backup ID to job
|
||||
self.sys_jobs.current.reference = backup.slug
|
||||
|
||||
addon_list = addons or []
|
||||
folder_list = folders or []
|
||||
@@ -448,30 +556,118 @@ class BackupManager(FileConfiguration, CoreSysAttributes):
|
||||
homeassistant = True
|
||||
|
||||
if backup.protected and not backup.set_password(password):
|
||||
_LOGGER.error("Invalid password for backup %s", backup.slug)
|
||||
return False
|
||||
raise BackupInvalidError(
|
||||
f"Invalid password for backup {backup.slug}", _LOGGER.error
|
||||
)
|
||||
|
||||
if backup.homeassistant is None and homeassistant:
|
||||
_LOGGER.error("No Home Assistant Core data inside the backup")
|
||||
return False
|
||||
raise BackupInvalidError(
|
||||
"No Home Assistant Core data inside the backup", _LOGGER.error
|
||||
)
|
||||
|
||||
if backup.supervisor_version > self.sys_supervisor.version:
|
||||
_LOGGER.error(
|
||||
"Backup was made on supervisor version %s, can't restore on %s. Must update supervisor first.",
|
||||
backup.supervisor_version,
|
||||
self.sys_supervisor.version,
|
||||
raise BackupInvalidError(
|
||||
f"Backup was made on supervisor version {backup.supervisor_version}, "
|
||||
f"can't restore on {self.sys_supervisor.version}. Must update supervisor first.",
|
||||
_LOGGER.error,
|
||||
)
|
||||
return False
|
||||
|
||||
_LOGGER.info("Partial-Restore %s start", backup.slug)
|
||||
async with self.lock:
|
||||
self.sys_core.state = CoreState.FREEZE
|
||||
self.sys_core.state = CoreState.FREEZE
|
||||
|
||||
try:
|
||||
success = await self._do_restore(
|
||||
backup, addon_list, folder_list, homeassistant, False
|
||||
)
|
||||
|
||||
finally:
|
||||
self.sys_core.state = CoreState.RUNNING
|
||||
|
||||
if success:
|
||||
_LOGGER.info("Partial-Restore %s done", backup.slug)
|
||||
if success:
|
||||
_LOGGER.info("Partial-Restore %s done", backup.slug)
|
||||
return success
|
||||
|
||||
@Job(
|
||||
name="backup_manager_freeze_all",
|
||||
conditions=[JobCondition.RUNNING],
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=BackupJobError,
|
||||
)
|
||||
async def freeze_all(self, timeout: float = DEFAULT_FREEZE_TIMEOUT) -> None:
|
||||
"""Freeze system to prepare for an external backup such as an image snapshot."""
|
||||
self.sys_core.state = CoreState.FREEZE
|
||||
|
||||
# Determine running addons
|
||||
installed = self.sys_addons.installed.copy()
|
||||
is_running: list[bool] = await asyncio.gather(
|
||||
*[addon.is_running() for addon in installed]
|
||||
)
|
||||
running_addons = [
|
||||
installed[ind] for ind in range(len(installed)) if is_running[ind]
|
||||
]
|
||||
|
||||
# Create thaw task first to ensure we eventually undo freezes even if the below fails
|
||||
self._thaw_task = asyncio.shield(
|
||||
self.sys_create_task(self._thaw_all(running_addons, timeout))
|
||||
)
|
||||
|
||||
# Tell Home Assistant to freeze for a backup
|
||||
self._change_stage(BackupJobStage.HOME_ASSISTANT)
|
||||
await self.sys_homeassistant.begin_backup()
|
||||
|
||||
# Run all pre-backup tasks for addons
|
||||
self._change_stage(BackupJobStage.ADDONS)
|
||||
await asyncio.gather(*[addon.begin_backup() for addon in running_addons])
|
||||
|
||||
@Job(
|
||||
name="backup_manager_thaw_all",
|
||||
conditions=[JobCondition.FROZEN],
|
||||
on_condition=BackupJobError,
|
||||
)
|
||||
async def _thaw_all(
|
||||
self, running_addons: list[Addon], timeout: float = DEFAULT_FREEZE_TIMEOUT
|
||||
) -> None:
|
||||
"""Thaw system after user signal or timeout."""
|
||||
try:
|
||||
try:
|
||||
await asyncio.wait_for(self._thaw_event.wait(), timeout)
|
||||
except TimeoutError:
|
||||
_LOGGER.warning(
|
||||
"Timeout waiting for signal to thaw after manual freeze, beginning thaw now"
|
||||
)
|
||||
|
||||
self._change_stage(BackupJobStage.HOME_ASSISTANT)
|
||||
await self.sys_homeassistant.end_backup()
|
||||
|
||||
self._change_stage(BackupJobStage.ADDONS)
|
||||
addon_start_tasks: list[asyncio.Task] = [
|
||||
task
|
||||
for task in await asyncio.gather(
|
||||
*[addon.end_backup() for addon in running_addons]
|
||||
)
|
||||
if task
|
||||
]
|
||||
finally:
|
||||
self.sys_core.state = CoreState.RUNNING
|
||||
self._thaw_event.clear()
|
||||
self._thaw_task = None
|
||||
|
||||
if addon_start_tasks:
|
||||
self._change_stage(BackupJobStage.AWAIT_ADDON_RESTARTS)
|
||||
await asyncio.gather(*addon_start_tasks, return_exceptions=True)
|
||||
|
||||
@Job(
|
||||
name="backup_manager_signal_thaw",
|
||||
conditions=[JobCondition.FROZEN],
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=BackupJobError,
|
||||
internal=True,
|
||||
)
|
||||
async def thaw_all(self) -> None:
|
||||
"""Signal thaw task to begin unfreezing the system."""
|
||||
if not self._thaw_task:
|
||||
raise BackupError(
|
||||
"Freeze was not initiated by freeze API, cannot thaw this way"
|
||||
)
|
||||
|
||||
self._thaw_event.set()
|
||||
await self._thaw_task
|
||||
|
@@ -14,6 +14,7 @@ from ..const import (
|
||||
ATTR_DATE,
|
||||
ATTR_DAYS_UNTIL_STALE,
|
||||
ATTR_DOCKER,
|
||||
ATTR_EXCLUDE_DATABASE,
|
||||
ATTR_FOLDERS,
|
||||
ATTR_HOMEASSISTANT,
|
||||
ATTR_NAME,
|
||||
@@ -103,6 +104,9 @@ SCHEMA_BACKUP = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_VERSION): version_tag,
|
||||
vol.Optional(ATTR_SIZE, default=0): vol.Coerce(float),
|
||||
vol.Optional(
|
||||
ATTR_EXCLUDE_DATABASE, default=False
|
||||
): vol.Boolean(),
|
||||
},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
)
|
||||
|
@@ -6,7 +6,7 @@ import signal
|
||||
|
||||
from colorlog import ColoredFormatter
|
||||
|
||||
from .addons import AddonManager
|
||||
from .addons.manager import AddonManager
|
||||
from .api import RestAPI
|
||||
from .arch import CpuArch
|
||||
from .auth import Auth
|
||||
@@ -221,6 +221,14 @@ def initialize_system(coresys: CoreSys) -> None:
|
||||
)
|
||||
config.path_emergency.mkdir()
|
||||
|
||||
# Addon Configs folder
|
||||
if not config.path_addon_configs.is_dir():
|
||||
_LOGGER.debug(
|
||||
"Creating Supervisor add-on configs folder at '%s'",
|
||||
config.path_addon_configs,
|
||||
)
|
||||
config.path_addon_configs.mkdir()
|
||||
|
||||
|
||||
def migrate_system_env(coresys: CoreSys) -> None:
|
||||
"""Cleanup some stuff after update."""
|
||||
|
@@ -1,5 +1,5 @@
|
||||
"""Bootstrap Supervisor."""
|
||||
from datetime import datetime
|
||||
from datetime import UTC, datetime
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path, PurePath
|
||||
@@ -48,8 +48,9 @@ MEDIA_DATA = PurePath("media")
|
||||
MOUNTS_FOLDER = PurePath("mounts")
|
||||
MOUNTS_CREDENTIALS = PurePath(".mounts_credentials")
|
||||
EMERGENCY_DATA = PurePath("emergency")
|
||||
ADDON_CONFIGS = PurePath("addon_configs")
|
||||
|
||||
DEFAULT_BOOT_TIME = datetime.utcfromtimestamp(0).isoformat()
|
||||
DEFAULT_BOOT_TIME = datetime.fromtimestamp(0, UTC).isoformat()
|
||||
|
||||
# We filter out UTC because it's the system default fallback
|
||||
# Core also not respect the cotnainer timezone and reset timezones
|
||||
@@ -153,7 +154,7 @@ class CoreConfig(FileConfiguration):
|
||||
|
||||
def modify_log_level(self) -> None:
|
||||
"""Change log level."""
|
||||
lvl = getattr(logging, str(self.logging.value).upper())
|
||||
lvl = getattr(logging, self.logging.value.upper())
|
||||
logging.getLogger("supervisor").setLevel(lvl)
|
||||
|
||||
@property
|
||||
@@ -163,7 +164,7 @@ class CoreConfig(FileConfiguration):
|
||||
|
||||
boot_time = parse_datetime(boot_str)
|
||||
if not boot_time:
|
||||
return datetime.utcfromtimestamp(1)
|
||||
return datetime.fromtimestamp(1, UTC)
|
||||
return boot_time
|
||||
|
||||
@last_boot.setter
|
||||
@@ -231,6 +232,16 @@ class CoreConfig(FileConfiguration):
|
||||
"""Return root add-on data folder external for Docker."""
|
||||
return PurePath(self.path_extern_supervisor, ADDONS_DATA)
|
||||
|
||||
@property
|
||||
def path_addon_configs(self) -> Path:
|
||||
"""Return root Add-on configs folder."""
|
||||
return self.path_supervisor / ADDON_CONFIGS
|
||||
|
||||
@property
|
||||
def path_extern_addon_configs(self) -> PurePath:
|
||||
"""Return root Add-on configs folder external for Docker."""
|
||||
return PurePath(self.path_extern_supervisor, ADDON_CONFIGS)
|
||||
|
||||
@property
|
||||
def path_audio(self) -> Path:
|
||||
"""Return root audio data folder."""
|
||||
|
@@ -1,8 +1,10 @@
|
||||
"""Constants file for Supervisor."""
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass
|
||||
from enum import StrEnum
|
||||
from ipaddress import ip_network
|
||||
from pathlib import Path
|
||||
from sys import version_info as systemversion
|
||||
from typing import Self
|
||||
|
||||
from aiohttp import __version__ as aiohttpversion
|
||||
|
||||
@@ -18,6 +20,7 @@ SUPERVISOR_DATA = Path("/data")
|
||||
FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, "addons.json")
|
||||
FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, "auth.json")
|
||||
FILE_HASSIO_BACKUPS = Path(SUPERVISOR_DATA, "backups.json")
|
||||
FILE_HASSIO_BOARD = Path(SUPERVISOR_DATA, "board.json")
|
||||
FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, "config.json")
|
||||
FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, "discovery.json")
|
||||
FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, "docker.json")
|
||||
@@ -69,6 +72,9 @@ JSON_RESULT = "result"
|
||||
RESULT_ERROR = "error"
|
||||
RESULT_OK = "ok"
|
||||
|
||||
HEADER_REMOTE_USER_ID = "X-Remote-User-Id"
|
||||
HEADER_REMOTE_USER_NAME = "X-Remote-User-Name"
|
||||
HEADER_REMOTE_USER_DISPLAY_NAME = "X-Remote-User-Display-Name"
|
||||
HEADER_TOKEN_OLD = "X-Hassio-Key"
|
||||
HEADER_TOKEN = "X-Supervisor-Token"
|
||||
|
||||
@@ -84,6 +90,7 @@ REQUEST_FROM = "HASSIO_FROM"
|
||||
ATTR_ACCESS_TOKEN = "access_token"
|
||||
ATTR_ACCESSPOINTS = "accesspoints"
|
||||
ATTR_ACTIVE = "active"
|
||||
ATTR_ACTIVITY_LED = "activity_led"
|
||||
ATTR_ADDON = "addon"
|
||||
ATTR_ADDONS = "addons"
|
||||
ATTR_ADDONS_CUSTOM_LIST = "addons_custom_list"
|
||||
@@ -109,6 +116,7 @@ ATTR_BACKUP_EXCLUDE = "backup_exclude"
|
||||
ATTR_BACKUP_POST = "backup_post"
|
||||
ATTR_BACKUP_PRE = "backup_pre"
|
||||
ATTR_BACKUPS = "backups"
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE = "backups_exclude_database"
|
||||
ATTR_BLK_READ = "blk_read"
|
||||
ATTR_BLK_WRITE = "blk_write"
|
||||
ATTR_BOARD = "board"
|
||||
@@ -148,9 +156,11 @@ ATTR_DIAGNOSTICS = "diagnostics"
|
||||
ATTR_DISCOVERY = "discovery"
|
||||
ATTR_DISK = "disk"
|
||||
ATTR_DISK_FREE = "disk_free"
|
||||
ATTR_DISK_LED = "disk_led"
|
||||
ATTR_DISK_LIFE_TIME = "disk_life_time"
|
||||
ATTR_DISK_TOTAL = "disk_total"
|
||||
ATTR_DISK_USED = "disk_used"
|
||||
ATTR_DISPLAYNAME = "displayname"
|
||||
ATTR_DNS = "dns"
|
||||
ATTR_DOCKER = "docker"
|
||||
ATTR_DOCKER_API = "docker_api"
|
||||
@@ -160,6 +170,7 @@ ATTR_ENABLE = "enable"
|
||||
ATTR_ENABLED = "enabled"
|
||||
ATTR_ENVIRONMENT = "environment"
|
||||
ATTR_EVENT = "event"
|
||||
ATTR_EXCLUDE_DATABASE = "exclude_database"
|
||||
ATTR_FEATURES = "features"
|
||||
ATTR_FILENAME = "filename"
|
||||
ATTR_FLAGS = "flags"
|
||||
@@ -173,7 +184,9 @@ ATTR_HASSIO_API = "hassio_api"
|
||||
ATTR_HASSIO_ROLE = "hassio_role"
|
||||
ATTR_HASSOS = "hassos"
|
||||
ATTR_HEALTHY = "healthy"
|
||||
ATTR_HEARTBEAT_LED = "heartbeat_led"
|
||||
ATTR_HOMEASSISTANT = "homeassistant"
|
||||
ATTR_HOMEASSISTANT_EXCLUDE_DATABASE = "homeassistant_exclude_database"
|
||||
ATTR_HOMEASSISTANT_API = "homeassistant_api"
|
||||
ATTR_HOST = "host"
|
||||
ATTR_HOST_DBUS = "host_dbus"
|
||||
@@ -248,6 +261,7 @@ ATTR_PLUGINS = "plugins"
|
||||
ATTR_PORT = "port"
|
||||
ATTR_PORTS = "ports"
|
||||
ATTR_PORTS_DESCRIPTION = "ports_description"
|
||||
ATTR_POWER_LED = "power_led"
|
||||
ATTR_PREFIX = "prefix"
|
||||
ATTR_PRIMARY = "primary"
|
||||
ATTR_PRIORITY = "priority"
|
||||
@@ -271,6 +285,9 @@ ATTR_SERVERS = "servers"
|
||||
ATTR_SERVICE = "service"
|
||||
ATTR_SERVICES = "services"
|
||||
ATTR_SESSION = "session"
|
||||
ATTR_SESSION_DATA = "session_data"
|
||||
ATTR_SESSION_DATA_USER = "user"
|
||||
ATTR_SESSION_DATA_USER_ID = "user_id"
|
||||
ATTR_SIGNAL = "signal"
|
||||
ATTR_SIZE = "size"
|
||||
ATTR_SLUG = "slug"
|
||||
@@ -308,6 +325,7 @@ ATTR_UPDATE_KEY = "update_key"
|
||||
ATTR_URL = "url"
|
||||
ATTR_USB = "usb"
|
||||
ATTR_USER = "user"
|
||||
ATTR_USER_LED = "user_led"
|
||||
ATTR_USERNAME = "username"
|
||||
ATTR_UUID = "uuid"
|
||||
ATTR_VALID = "valid"
|
||||
@@ -327,14 +345,6 @@ PROVIDE_SERVICE = "provide"
|
||||
NEED_SERVICE = "need"
|
||||
WANT_SERVICE = "want"
|
||||
|
||||
|
||||
MAP_CONFIG = "config"
|
||||
MAP_SSL = "ssl"
|
||||
MAP_ADDONS = "addons"
|
||||
MAP_BACKUP = "backup"
|
||||
MAP_SHARE = "share"
|
||||
MAP_MEDIA = "media"
|
||||
|
||||
ARCH_ARMHF = "armhf"
|
||||
ARCH_ARMV7 = "armv7"
|
||||
ARCH_AARCH64 = "aarch64"
|
||||
@@ -367,14 +377,14 @@ ROLE_ADMIN = "admin"
|
||||
ROLE_ALL = [ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN]
|
||||
|
||||
|
||||
class AddonBoot(str, Enum):
|
||||
class AddonBoot(StrEnum):
|
||||
"""Boot mode for the add-on."""
|
||||
|
||||
AUTO = "auto"
|
||||
MANUAL = "manual"
|
||||
|
||||
|
||||
class AddonStartup(str, Enum):
|
||||
class AddonStartup(StrEnum):
|
||||
"""Startup types of Add-on."""
|
||||
|
||||
INITIALIZE = "initialize"
|
||||
@@ -384,7 +394,7 @@ class AddonStartup(str, Enum):
|
||||
ONCE = "once"
|
||||
|
||||
|
||||
class AddonStage(str, Enum):
|
||||
class AddonStage(StrEnum):
|
||||
"""Stage types of add-on."""
|
||||
|
||||
STABLE = "stable"
|
||||
@@ -392,7 +402,7 @@ class AddonStage(str, Enum):
|
||||
DEPRECATED = "deprecated"
|
||||
|
||||
|
||||
class AddonState(str, Enum):
|
||||
class AddonState(StrEnum):
|
||||
"""State of add-on."""
|
||||
|
||||
STARTUP = "startup"
|
||||
@@ -402,7 +412,7 @@ class AddonState(str, Enum):
|
||||
ERROR = "error"
|
||||
|
||||
|
||||
class UpdateChannel(str, Enum):
|
||||
class UpdateChannel(StrEnum):
|
||||
"""Core supported update channels."""
|
||||
|
||||
STABLE = "stable"
|
||||
@@ -410,7 +420,7 @@ class UpdateChannel(str, Enum):
|
||||
DEV = "dev"
|
||||
|
||||
|
||||
class CoreState(str, Enum):
|
||||
class CoreState(StrEnum):
|
||||
"""Represent current loading state."""
|
||||
|
||||
INITIALIZE = "initialize"
|
||||
@@ -423,7 +433,7 @@ class CoreState(str, Enum):
|
||||
CLOSE = "close"
|
||||
|
||||
|
||||
class LogLevel(str, Enum):
|
||||
class LogLevel(StrEnum):
|
||||
"""Logging level of system."""
|
||||
|
||||
DEBUG = "debug"
|
||||
@@ -433,7 +443,7 @@ class LogLevel(str, Enum):
|
||||
CRITICAL = "critical"
|
||||
|
||||
|
||||
class HostFeature(str, Enum):
|
||||
class HostFeature(StrEnum):
|
||||
"""Host feature."""
|
||||
|
||||
HASSOS = "hassos"
|
||||
@@ -445,15 +455,16 @@ class HostFeature(str, Enum):
|
||||
TIMEDATE = "timedate"
|
||||
|
||||
|
||||
class BusEvent(str, Enum):
|
||||
class BusEvent(StrEnum):
|
||||
"""Bus event type."""
|
||||
|
||||
HARDWARE_NEW_DEVICE = "hardware_new_device"
|
||||
HARDWARE_REMOVE_DEVICE = "hardware_remove_device"
|
||||
DOCKER_CONTAINER_STATE_CHANGE = "docker_container_state_change"
|
||||
SUPERVISOR_STATE_CHANGE = "supervisor_state_change"
|
||||
|
||||
|
||||
class CpuArch(str, Enum):
|
||||
class CpuArch(StrEnum):
|
||||
"""Supported CPU architectures."""
|
||||
|
||||
ARMV7 = "armv7"
|
||||
@@ -461,3 +472,52 @@ class CpuArch(str, Enum):
|
||||
AARCH64 = "aarch64"
|
||||
I386 = "i386"
|
||||
AMD64 = "amd64"
|
||||
|
||||
|
||||
@dataclass
|
||||
class IngressSessionDataUser:
|
||||
"""Format of an IngressSessionDataUser object."""
|
||||
|
||||
id: str
|
||||
display_name: str | None = None
|
||||
username: str | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, str | None]:
|
||||
"""Get dictionary representation."""
|
||||
return {
|
||||
ATTR_ID: self.id,
|
||||
ATTR_DISPLAYNAME: self.display_name,
|
||||
ATTR_USERNAME: self.username,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, str | None]) -> Self:
|
||||
"""Return object from dictionary representation."""
|
||||
return cls(
|
||||
id=data[ATTR_ID],
|
||||
display_name=data.get(ATTR_DISPLAYNAME),
|
||||
username=data.get(ATTR_USERNAME),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class IngressSessionData:
|
||||
"""Format of an IngressSessionData object."""
|
||||
|
||||
user: IngressSessionDataUser
|
||||
|
||||
def to_dict(self) -> dict[str, dict[str, str | None]]:
|
||||
"""Get dictionary representation."""
|
||||
return {ATTR_USER: self.user.to_dict()}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, dict[str, str | None]]) -> Self:
|
||||
"""Return object from dictionary representation."""
|
||||
return cls(user=IngressSessionDataUser.from_dict(data[ATTR_USER]))
|
||||
|
||||
|
||||
STARTING_STATES = [
|
||||
CoreState.INITIALIZE,
|
||||
CoreState.STARTUP,
|
||||
CoreState.SETUP,
|
||||
]
|
||||
|
@@ -7,7 +7,14 @@ import logging
|
||||
|
||||
import async_timeout
|
||||
|
||||
from .const import RUN_SUPERVISOR_STATE, AddonStartup, CoreState
|
||||
from .const import (
|
||||
ATTR_STARTUP,
|
||||
RUN_SUPERVISOR_STATE,
|
||||
STARTING_STATES,
|
||||
AddonStartup,
|
||||
BusEvent,
|
||||
CoreState,
|
||||
)
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .exceptions import (
|
||||
HassioError,
|
||||
@@ -21,7 +28,7 @@ from .homeassistant.core import LANDINGPAGE
|
||||
from .resolution.const import ContextType, IssueType, SuggestionType, UnhealthyReason
|
||||
from .utils.dt import utcnow
|
||||
from .utils.sentry import capture_exception
|
||||
from .utils.whoami import retrieve_whoami
|
||||
from .utils.whoami import WhoamiData, retrieve_whoami
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -56,16 +63,23 @@ class Core(CoreSysAttributes):
|
||||
if self._state == new_state:
|
||||
return
|
||||
try:
|
||||
RUN_SUPERVISOR_STATE.write_text(new_state.value, encoding="utf-8")
|
||||
RUN_SUPERVISOR_STATE.write_text(new_state, encoding="utf-8")
|
||||
except OSError as err:
|
||||
_LOGGER.warning(
|
||||
"Can't update the Supervisor state to %s: %s", new_state, err
|
||||
)
|
||||
finally:
|
||||
self._state = new_state
|
||||
self.sys_homeassistant.websocket.supervisor_update_event(
|
||||
"info", {"state": new_state}
|
||||
)
|
||||
|
||||
# Don't attempt to notify anyone on CLOSE as we're about to stop the event loop
|
||||
if new_state != CoreState.CLOSE:
|
||||
self.sys_bus.fire_event(BusEvent.SUPERVISOR_STATE_CHANGE, new_state)
|
||||
|
||||
# These will be received by HA after startup has completed which won't make sense
|
||||
if new_state not in STARTING_STATES:
|
||||
self.sys_homeassistant.websocket.supervisor_update_event(
|
||||
"info", {"state": new_state}
|
||||
)
|
||||
|
||||
async def connect(self):
|
||||
"""Connect Supervisor container."""
|
||||
@@ -119,12 +133,12 @@ class Core(CoreSysAttributes):
|
||||
self._adjust_system_datetime(),
|
||||
# Load mounts
|
||||
self.sys_mounts.load(),
|
||||
# Start docker monitoring
|
||||
# Load Docker manager
|
||||
self.sys_docker.load(),
|
||||
# Load Plugins container
|
||||
self.sys_plugins.load(),
|
||||
# load last available data
|
||||
self.sys_updater.load(),
|
||||
# Load Plugins container
|
||||
self.sys_plugins.load(),
|
||||
# Load Home Assistant
|
||||
self.sys_homeassistant.load(),
|
||||
# Load CPU/Arch
|
||||
@@ -236,7 +250,7 @@ class Core(CoreSysAttributes):
|
||||
except HomeAssistantError as err:
|
||||
capture_exception(err)
|
||||
else:
|
||||
_LOGGER.info("Skiping start of Home Assistant")
|
||||
_LOGGER.info("Skipping start of Home Assistant")
|
||||
|
||||
# Core is not running
|
||||
if self.sys_homeassistant.core.error_state:
|
||||
@@ -266,7 +280,9 @@ class Core(CoreSysAttributes):
|
||||
self.sys_create_task(self.sys_resolution.healthcheck())
|
||||
|
||||
self.state = CoreState.RUNNING
|
||||
self.sys_homeassistant.websocket.supervisor_update_event("supervisor", {})
|
||||
self.sys_homeassistant.websocket.supervisor_update_event(
|
||||
"supervisor", {ATTR_STARTUP: "complete"}
|
||||
)
|
||||
_LOGGER.info("Supervisor is up and running")
|
||||
|
||||
async def stop(self):
|
||||
@@ -293,7 +309,7 @@ class Core(CoreSysAttributes):
|
||||
)
|
||||
]
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
_LOGGER.warning("Stage 1: Force Shutdown!")
|
||||
|
||||
# Stage 2
|
||||
@@ -310,7 +326,7 @@ class Core(CoreSysAttributes):
|
||||
)
|
||||
]
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
_LOGGER.warning("Stage 2: Force Shutdown!")
|
||||
|
||||
self.state = CoreState.CLOSE
|
||||
@@ -347,6 +363,13 @@ class Core(CoreSysAttributes):
|
||||
self.sys_config.last_boot = self.sys_hardware.helper.last_boot
|
||||
self.sys_config.save_data()
|
||||
|
||||
async def _retrieve_whoami(self, with_ssl: bool) -> WhoamiData | None:
|
||||
try:
|
||||
return await retrieve_whoami(self.sys_websession, with_ssl)
|
||||
except WhoamiSSLError:
|
||||
_LOGGER.info("Whoami service SSL error")
|
||||
return None
|
||||
|
||||
async def _adjust_system_datetime(self):
|
||||
"""Adjust system time/date on startup."""
|
||||
# If no timezone is detect or set
|
||||
@@ -359,21 +382,15 @@ class Core(CoreSysAttributes):
|
||||
|
||||
# Get Timezone data
|
||||
try:
|
||||
data = await retrieve_whoami(self.sys_websession)
|
||||
except WhoamiSSLError:
|
||||
pass
|
||||
data = await self._retrieve_whoami(True)
|
||||
|
||||
# SSL Date Issue & possible time drift
|
||||
if not data:
|
||||
data = await self._retrieve_whoami(False)
|
||||
except WhoamiError as err:
|
||||
_LOGGER.warning("Can't adjust Time/Date settings: %s", err)
|
||||
return
|
||||
|
||||
# SSL Date Issue & possible time drift
|
||||
if not data:
|
||||
try:
|
||||
data = await retrieve_whoami(self.sys_websession, with_ssl=False)
|
||||
except WhoamiError as err:
|
||||
_LOGGER.error("Can't adjust Time/Date settings: %s", err)
|
||||
return
|
||||
|
||||
self.sys_config.timezone = self.sys_config.timezone or data.timezone
|
||||
|
||||
# Calculate if system time is out of sync
|
||||
|
@@ -3,7 +3,9 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Coroutine
|
||||
from contextvars import Context, copy_context
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
import logging
|
||||
import os
|
||||
from types import MappingProxyType
|
||||
@@ -16,7 +18,7 @@ from .const import ENV_SUPERVISOR_DEV, SERVER_SOFTWARE
|
||||
from .utils.dt import UTC, get_time_zone
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .addons import AddonManager
|
||||
from .addons.manager import AddonManager
|
||||
from .api import RestAPI
|
||||
from .arch import CpuArch
|
||||
from .auth import Auth
|
||||
@@ -98,6 +100,9 @@ class CoreSys:
|
||||
{aiohttp.hdrs.USER_AGENT: SERVER_SOFTWARE}
|
||||
)
|
||||
|
||||
# Task factory attributes
|
||||
self._set_task_context: list[Callable[[Context], Context]] = []
|
||||
|
||||
@property
|
||||
def dev(self) -> bool:
|
||||
"""Return True if we run dev mode."""
|
||||
@@ -519,15 +524,33 @@ class CoreSys:
|
||||
"""Return now in local timezone."""
|
||||
return datetime.now(get_time_zone(self.timezone) or UTC)
|
||||
|
||||
def add_set_task_context_callback(
|
||||
self, callback: Callable[[Context], Context]
|
||||
) -> None:
|
||||
"""Add callback used to modify context prior to creating a task.
|
||||
|
||||
Only used for tasks created via CoreSys.create_task. Callback can modify the provided
|
||||
context using context.run (ex. `context.run(var.set, "new_value")`). Callback should
|
||||
return the context to be provided to task.
|
||||
"""
|
||||
self._set_task_context.append(callback)
|
||||
|
||||
def run_in_executor(
|
||||
self, funct: Callable[..., T], *args: Any
|
||||
self, funct: Callable[..., T], *args: tuple[Any], **kwargs: dict[str, Any]
|
||||
) -> Coroutine[Any, Any, T]:
|
||||
"""Add an job to the executor pool."""
|
||||
if kwargs:
|
||||
funct = partial(funct, **kwargs)
|
||||
|
||||
return self.loop.run_in_executor(None, funct, *args)
|
||||
|
||||
def create_task(self, coroutine: Coroutine) -> asyncio.Task:
|
||||
"""Create an async task."""
|
||||
return self.loop.create_task(coroutine)
|
||||
context = copy_context()
|
||||
for callback in self._set_task_context:
|
||||
context = callback(context)
|
||||
|
||||
return self.loop.create_task(coroutine, context=context)
|
||||
|
||||
|
||||
class CoreSysAttributes:
|
||||
@@ -700,10 +723,10 @@ class CoreSysAttributes:
|
||||
return self.coresys.now()
|
||||
|
||||
def sys_run_in_executor(
|
||||
self, funct: Callable[..., T], *args: Any
|
||||
self, funct: Callable[..., T], *args: tuple[Any], **kwargs: dict[str, Any]
|
||||
) -> Coroutine[Any, Any, T]:
|
||||
"""Add an job to the executor pool."""
|
||||
return self.coresys.run_in_executor(funct, *args)
|
||||
"""Add a job to the executor pool."""
|
||||
return self.coresys.run_in_executor(funct, *args, **kwargs)
|
||||
|
||||
def sys_create_task(self, coroutine: Coroutine) -> asyncio.Task:
|
||||
"""Create an async task."""
|
||||
|
@@ -5,7 +5,9 @@
|
||||
"raspberrypi3-64": ["aarch64", "armv7", "armhf"],
|
||||
"raspberrypi4": ["armv7", "armhf"],
|
||||
"raspberrypi4-64": ["aarch64", "armv7", "armhf"],
|
||||
"raspberrypi5-64": ["aarch64", "armv7", "armhf"],
|
||||
"yellow": ["aarch64", "armv7", "armhf"],
|
||||
"green": ["aarch64", "armv7", "armhf"],
|
||||
"tinker": ["armv7", "armhf"],
|
||||
"odroid-c2": ["aarch64", "armv7", "armhf"],
|
||||
"odroid-c4": ["aarch64", "armv7", "armhf"],
|
||||
|
@@ -6,7 +6,7 @@ from typing import Any
|
||||
from awesomeversion import AwesomeVersion
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ...exceptions import DBusError, DBusInterfaceError
|
||||
from ...exceptions import DBusError, DBusInterfaceError, DBusServiceUnkownError
|
||||
from ..const import (
|
||||
DBUS_ATTR_DIAGNOSTICS,
|
||||
DBUS_ATTR_VERSION,
|
||||
@@ -99,7 +99,7 @@ class OSAgent(DBusInterfaceProxy):
|
||||
await asyncio.gather(*[dbus.connect(bus) for dbus in self.all])
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to OS-Agent")
|
||||
except DBusInterfaceError:
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning(
|
||||
"No OS-Agent support on the host. Some Host functions have been disabled."
|
||||
)
|
||||
|
@@ -11,7 +11,8 @@ from ...const import (
|
||||
DBUS_OBJECT_HAOS_BOARDS,
|
||||
)
|
||||
from ...interface import DBusInterfaceProxy, dbus_property
|
||||
from .const import BOARD_NAME_SUPERVISED, BOARD_NAME_YELLOW
|
||||
from .const import BOARD_NAME_GREEN, BOARD_NAME_SUPERVISED, BOARD_NAME_YELLOW
|
||||
from .green import Green
|
||||
from .interface import BoardProxy
|
||||
from .supervised import Supervised
|
||||
from .yellow import Yellow
|
||||
@@ -39,6 +40,14 @@ class BoardManager(DBusInterfaceProxy):
|
||||
"""Get board name."""
|
||||
return self.properties[DBUS_ATTR_BOARD]
|
||||
|
||||
@property
|
||||
def green(self) -> Green:
|
||||
"""Get Green board."""
|
||||
if self.board != BOARD_NAME_GREEN:
|
||||
raise BoardInvalidError("Green board is not in use", _LOGGER.error)
|
||||
|
||||
return self._board_proxy
|
||||
|
||||
@property
|
||||
def supervised(self) -> Supervised:
|
||||
"""Get Supervised board."""
|
||||
@@ -61,6 +70,8 @@ class BoardManager(DBusInterfaceProxy):
|
||||
|
||||
if self.board == BOARD_NAME_YELLOW:
|
||||
self._board_proxy = Yellow()
|
||||
elif self.board == BOARD_NAME_GREEN:
|
||||
self._board_proxy = Green()
|
||||
elif self.board == BOARD_NAME_SUPERVISED:
|
||||
self._board_proxy = Supervised()
|
||||
|
||||
|
@@ -1,4 +1,5 @@
|
||||
"""Constants for boards."""
|
||||
|
||||
BOARD_NAME_GREEN = "Green"
|
||||
BOARD_NAME_SUPERVISED = "Supervised"
|
||||
BOARD_NAME_YELLOW = "Yellow"
|
||||
|
65
supervisor/dbus/agent/boards/green.py
Normal file
65
supervisor/dbus/agent/boards/green.py
Normal file
@@ -0,0 +1,65 @@
|
||||
"""Green board management."""
|
||||
|
||||
import asyncio
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ....const import ATTR_ACTIVITY_LED, ATTR_POWER_LED, ATTR_USER_LED
|
||||
from ...const import DBUS_ATTR_ACTIVITY_LED, DBUS_ATTR_POWER_LED, DBUS_ATTR_USER_LED
|
||||
from ...interface import dbus_property
|
||||
from .const import BOARD_NAME_GREEN
|
||||
from .interface import BoardProxy
|
||||
from .validate import SCHEMA_GREEN_BOARD
|
||||
|
||||
|
||||
class Green(BoardProxy):
|
||||
"""Green board manager object."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize properties."""
|
||||
super().__init__(BOARD_NAME_GREEN, SCHEMA_GREEN_BOARD)
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def activity_led(self) -> bool:
|
||||
"""Get activity LED enabled."""
|
||||
return self.properties[DBUS_ATTR_ACTIVITY_LED]
|
||||
|
||||
@activity_led.setter
|
||||
def activity_led(self, enabled: bool) -> None:
|
||||
"""Enable/disable activity LED."""
|
||||
self._data[ATTR_ACTIVITY_LED] = enabled
|
||||
asyncio.create_task(self.dbus.Boards.Green.set_activity_led(enabled))
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def power_led(self) -> bool:
|
||||
"""Get power LED enabled."""
|
||||
return self.properties[DBUS_ATTR_POWER_LED]
|
||||
|
||||
@power_led.setter
|
||||
def power_led(self, enabled: bool) -> None:
|
||||
"""Enable/disable power LED."""
|
||||
self._data[ATTR_POWER_LED] = enabled
|
||||
asyncio.create_task(self.dbus.Boards.Green.set_power_led(enabled))
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def user_led(self) -> bool:
|
||||
"""Get user LED enabled."""
|
||||
return self.properties[DBUS_ATTR_USER_LED]
|
||||
|
||||
@user_led.setter
|
||||
def user_led(self, enabled: bool) -> None:
|
||||
"""Enable/disable disk LED."""
|
||||
self._data[ATTR_USER_LED] = enabled
|
||||
asyncio.create_task(self.dbus.Boards.Green.set_user_led(enabled))
|
||||
|
||||
async def connect(self, bus: MessageBus) -> None:
|
||||
"""Connect to D-Bus."""
|
||||
await super().connect(bus)
|
||||
|
||||
# Set LEDs based on settings on connect
|
||||
self.activity_led = self._data[ATTR_ACTIVITY_LED]
|
||||
self.power_led = self._data[ATTR_POWER_LED]
|
||||
self.user_led = self._data[ATTR_USER_LED]
|
@@ -1,17 +1,23 @@
|
||||
"""Board dbus proxy interface."""
|
||||
|
||||
from voluptuous import Schema
|
||||
|
||||
from ....const import FILE_HASSIO_BOARD
|
||||
from ....utils.common import FileConfiguration
|
||||
from ...const import DBUS_IFACE_HAOS_BOARDS, DBUS_NAME_HAOS, DBUS_OBJECT_HAOS_BOARDS
|
||||
from ...interface import DBusInterfaceProxy
|
||||
from .validate import SCHEMA_BASE_BOARD
|
||||
|
||||
|
||||
class BoardProxy(DBusInterfaceProxy):
|
||||
class BoardProxy(FileConfiguration, DBusInterfaceProxy):
|
||||
"""DBus interface proxy for os board."""
|
||||
|
||||
bus_name: str = DBUS_NAME_HAOS
|
||||
|
||||
def __init__(self, name: str) -> None:
|
||||
def __init__(self, name: str, file_schema: Schema | None = None) -> None:
|
||||
"""Initialize properties."""
|
||||
super().__init__()
|
||||
super().__init__(FILE_HASSIO_BOARD, file_schema or SCHEMA_BASE_BOARD)
|
||||
super(FileConfiguration, self).__init__()
|
||||
|
||||
self._name: str = name
|
||||
self.object_path: str = f"{DBUS_OBJECT_HAOS_BOARDS}/{name}"
|
||||
|
32
supervisor/dbus/agent/boards/validate.py
Normal file
32
supervisor/dbus/agent/boards/validate.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""Validation for board config."""
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from ....const import (
|
||||
ATTR_ACTIVITY_LED,
|
||||
ATTR_DISK_LED,
|
||||
ATTR_HEARTBEAT_LED,
|
||||
ATTR_POWER_LED,
|
||||
ATTR_USER_LED,
|
||||
)
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_BASE_BOARD = vol.Schema({}, extra=vol.REMOVE_EXTRA)
|
||||
|
||||
SCHEMA_GREEN_BOARD = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_ACTIVITY_LED, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_POWER_LED, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_USER_LED, default=True): vol.Boolean(),
|
||||
},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
)
|
||||
|
||||
SCHEMA_YELLOW_BOARD = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_DISK_LED, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_HEARTBEAT_LED, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_POWER_LED, default=True): vol.Boolean(),
|
||||
},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
)
|
@@ -2,10 +2,14 @@
|
||||
|
||||
import asyncio
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ....const import ATTR_DISK_LED, ATTR_HEARTBEAT_LED, ATTR_POWER_LED
|
||||
from ...const import DBUS_ATTR_DISK_LED, DBUS_ATTR_HEARTBEAT_LED, DBUS_ATTR_POWER_LED
|
||||
from ...interface import dbus_property
|
||||
from .const import BOARD_NAME_YELLOW
|
||||
from .interface import BoardProxy
|
||||
from .validate import SCHEMA_YELLOW_BOARD
|
||||
|
||||
|
||||
class Yellow(BoardProxy):
|
||||
@@ -13,7 +17,7 @@ class Yellow(BoardProxy):
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize properties."""
|
||||
super().__init__(BOARD_NAME_YELLOW)
|
||||
super().__init__(BOARD_NAME_YELLOW, SCHEMA_YELLOW_BOARD)
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
@@ -24,6 +28,7 @@ class Yellow(BoardProxy):
|
||||
@heartbeat_led.setter
|
||||
def heartbeat_led(self, enabled: bool) -> None:
|
||||
"""Enable/disable heartbeat LED."""
|
||||
self._data[ATTR_HEARTBEAT_LED] = enabled
|
||||
asyncio.create_task(self.dbus.Boards.Yellow.set_heartbeat_led(enabled))
|
||||
|
||||
@property
|
||||
@@ -35,6 +40,7 @@ class Yellow(BoardProxy):
|
||||
@power_led.setter
|
||||
def power_led(self, enabled: bool) -> None:
|
||||
"""Enable/disable power LED."""
|
||||
self._data[ATTR_POWER_LED] = enabled
|
||||
asyncio.create_task(self.dbus.Boards.Yellow.set_power_led(enabled))
|
||||
|
||||
@property
|
||||
@@ -46,4 +52,14 @@ class Yellow(BoardProxy):
|
||||
@disk_led.setter
|
||||
def disk_led(self, enabled: bool) -> None:
|
||||
"""Enable/disable disk LED."""
|
||||
self._data[ATTR_DISK_LED] = enabled
|
||||
asyncio.create_task(self.dbus.Boards.Yellow.set_disk_led(enabled))
|
||||
|
||||
async def connect(self, bus: MessageBus) -> None:
|
||||
"""Connect to D-Bus."""
|
||||
await super().connect(bus)
|
||||
|
||||
# Set LEDs based on settings on connect
|
||||
self.disk_led = self._data[ATTR_DISK_LED]
|
||||
self.heartbeat_led = self._data[ATTR_HEARTBEAT_LED]
|
||||
self.power_led = self._data[ATTR_POWER_LED]
|
||||
|
@@ -1,5 +1,5 @@
|
||||
"""Constants for DBUS."""
|
||||
from enum import Enum, IntEnum
|
||||
from enum import IntEnum, StrEnum
|
||||
from socket import AF_INET, AF_INET6
|
||||
|
||||
DBUS_NAME_HAOS = "io.hass.os"
|
||||
@@ -64,6 +64,7 @@ DBUS_OBJECT_UDISKS2 = "/org/freedesktop/UDisks2/Manager"
|
||||
DBUS_ATTR_ACTIVE_ACCESSPOINT = "ActiveAccessPoint"
|
||||
DBUS_ATTR_ACTIVE_CONNECTION = "ActiveConnection"
|
||||
DBUS_ATTR_ACTIVE_CONNECTIONS = "ActiveConnections"
|
||||
DBUS_ATTR_ACTIVITY_LED = "ActivityLED"
|
||||
DBUS_ATTR_ADDRESS_DATA = "AddressData"
|
||||
DBUS_ATTR_BITRATE = "Bitrate"
|
||||
DBUS_ATTR_BOARD = "Board"
|
||||
@@ -144,6 +145,7 @@ DBUS_ATTR_OPERATION = "Operation"
|
||||
DBUS_ATTR_OPTIONS = "Options"
|
||||
DBUS_ATTR_PARSER_VERSION = "ParserVersion"
|
||||
DBUS_ATTR_PARTITIONS = "Partitions"
|
||||
DBUS_ATTR_PATH = "Path"
|
||||
DBUS_ATTR_POWER_LED = "PowerLED"
|
||||
DBUS_ATTR_PRIMARY_CONNECTION = "PrimaryConnection"
|
||||
DBUS_ATTR_READ_ONLY = "ReadOnly"
|
||||
@@ -168,6 +170,7 @@ DBUS_ATTR_TIMEUSEC = "TimeUSec"
|
||||
DBUS_ATTR_TIMEZONE = "Timezone"
|
||||
DBUS_ATTR_TRANSACTION_STATISTICS = "TransactionStatistics"
|
||||
DBUS_ATTR_TYPE = "Type"
|
||||
DBUS_ATTR_USER_LED = "UserLED"
|
||||
DBUS_ATTR_USERSPACE_TIMESTAMP_MONOTONIC = "UserspaceTimestampMonotonic"
|
||||
DBUS_ATTR_UUID_UPPERCASE = "UUID"
|
||||
DBUS_ATTR_UUID = "Uuid"
|
||||
@@ -180,7 +183,7 @@ DBUS_ATTR_WWN = "WWN"
|
||||
DBUS_ERR_SYSTEMD_NO_SUCH_UNIT = "org.freedesktop.systemd1.NoSuchUnit"
|
||||
|
||||
|
||||
class RaucState(str, Enum):
|
||||
class RaucState(StrEnum):
|
||||
"""Rauc slot states."""
|
||||
|
||||
GOOD = "good"
|
||||
@@ -188,7 +191,7 @@ class RaucState(str, Enum):
|
||||
ACTIVE = "active"
|
||||
|
||||
|
||||
class InterfaceMethod(str, Enum):
|
||||
class InterfaceMethod(StrEnum):
|
||||
"""Interface method simple."""
|
||||
|
||||
AUTO = "auto"
|
||||
@@ -197,14 +200,14 @@ class InterfaceMethod(str, Enum):
|
||||
LINK_LOCAL = "link-local"
|
||||
|
||||
|
||||
class ConnectionType(str, Enum):
|
||||
class ConnectionType(StrEnum):
|
||||
"""Connection type."""
|
||||
|
||||
ETHERNET = "802-3-ethernet"
|
||||
WIRELESS = "802-11-wireless"
|
||||
|
||||
|
||||
class ConnectionStateType(int, Enum):
|
||||
class ConnectionStateType(IntEnum):
|
||||
"""Connection states.
|
||||
|
||||
https://developer.gnome.org/NetworkManager/stable/nm-dbus-types.html#NMActiveConnectionState
|
||||
@@ -217,7 +220,7 @@ class ConnectionStateType(int, Enum):
|
||||
DEACTIVATED = 4
|
||||
|
||||
|
||||
class ConnectionStateFlags(int, Enum):
|
||||
class ConnectionStateFlags(IntEnum):
|
||||
"""Connection state flags.
|
||||
|
||||
https://developer-old.gnome.org/NetworkManager/stable/nm-dbus-types.html#NMActivationStateFlags
|
||||
@@ -234,7 +237,7 @@ class ConnectionStateFlags(int, Enum):
|
||||
EXTERNAL = 0x80
|
||||
|
||||
|
||||
class ConnectivityState(int, Enum):
|
||||
class ConnectivityState(IntEnum):
|
||||
"""Network connectvity.
|
||||
|
||||
https://developer.gnome.org/NetworkManager/unstable/nm-dbus-types.html#NMConnectivityState
|
||||
@@ -247,7 +250,7 @@ class ConnectivityState(int, Enum):
|
||||
CONNECTIVITY_FULL = 4
|
||||
|
||||
|
||||
class DeviceType(int, Enum):
|
||||
class DeviceType(IntEnum):
|
||||
"""Device types.
|
||||
|
||||
https://developer.gnome.org/NetworkManager/stable/nm-dbus-types.html#NMDeviceType
|
||||
@@ -262,7 +265,7 @@ class DeviceType(int, Enum):
|
||||
VETH = 20
|
||||
|
||||
|
||||
class WirelessMethodType(int, Enum):
|
||||
class WirelessMethodType(IntEnum):
|
||||
"""Device Type."""
|
||||
|
||||
UNKNOWN = 0
|
||||
@@ -279,7 +282,7 @@ class DNSAddressFamily(IntEnum):
|
||||
INET6 = AF_INET6
|
||||
|
||||
|
||||
class MulticastProtocolEnabled(str, Enum):
|
||||
class MulticastProtocolEnabled(StrEnum):
|
||||
"""Multicast protocol enabled or resolve."""
|
||||
|
||||
YES = "yes"
|
||||
@@ -287,7 +290,7 @@ class MulticastProtocolEnabled(str, Enum):
|
||||
RESOLVE = "resolve"
|
||||
|
||||
|
||||
class DNSOverTLSEnabled(str, Enum):
|
||||
class DNSOverTLSEnabled(StrEnum):
|
||||
"""DNS over TLS enabled."""
|
||||
|
||||
YES = "yes"
|
||||
@@ -295,7 +298,7 @@ class DNSOverTLSEnabled(str, Enum):
|
||||
OPPORTUNISTIC = "opportunistic"
|
||||
|
||||
|
||||
class DNSSECValidation(str, Enum):
|
||||
class DNSSECValidation(StrEnum):
|
||||
"""DNSSEC validation enforced."""
|
||||
|
||||
YES = "yes"
|
||||
@@ -303,7 +306,7 @@ class DNSSECValidation(str, Enum):
|
||||
ALLOW_DOWNGRADE = "allow-downgrade"
|
||||
|
||||
|
||||
class DNSStubListenerEnabled(str, Enum):
|
||||
class DNSStubListenerEnabled(StrEnum):
|
||||
"""DNS stub listener enabled."""
|
||||
|
||||
YES = "yes"
|
||||
@@ -312,7 +315,7 @@ class DNSStubListenerEnabled(str, Enum):
|
||||
UDP_ONLY = "udp"
|
||||
|
||||
|
||||
class ResolvConfMode(str, Enum):
|
||||
class ResolvConfMode(StrEnum):
|
||||
"""Resolv.conf management mode."""
|
||||
|
||||
FOREIGN = "foreign"
|
||||
@@ -322,7 +325,7 @@ class ResolvConfMode(str, Enum):
|
||||
UPLINK = "uplink"
|
||||
|
||||
|
||||
class StopUnitMode(str, Enum):
|
||||
class StopUnitMode(StrEnum):
|
||||
"""Mode for stopping the unit."""
|
||||
|
||||
REPLACE = "replace"
|
||||
@@ -331,7 +334,7 @@ class StopUnitMode(str, Enum):
|
||||
IGNORE_REQUIREMENTS = "ignore-requirements"
|
||||
|
||||
|
||||
class StartUnitMode(str, Enum):
|
||||
class StartUnitMode(StrEnum):
|
||||
"""Mode for starting the unit."""
|
||||
|
||||
REPLACE = "replace"
|
||||
@@ -341,7 +344,7 @@ class StartUnitMode(str, Enum):
|
||||
ISOLATE = "isolate"
|
||||
|
||||
|
||||
class UnitActiveState(str, Enum):
|
||||
class UnitActiveState(StrEnum):
|
||||
"""Active state of a systemd unit."""
|
||||
|
||||
ACTIVE = "active"
|
||||
|
@@ -3,7 +3,7 @@ import logging
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ..exceptions import DBusError, DBusInterfaceError
|
||||
from ..exceptions import DBusError, DBusInterfaceError, DBusServiceUnkownError
|
||||
from .const import (
|
||||
DBUS_ATTR_CHASSIS,
|
||||
DBUS_ATTR_DEPLOYMENT,
|
||||
@@ -39,7 +39,7 @@ class Hostname(DBusInterfaceProxy):
|
||||
await super().connect(bus)
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to systemd-hostname")
|
||||
except DBusInterfaceError:
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning(
|
||||
"No hostname support on the host. Hostname functions have been disabled."
|
||||
)
|
||||
|
@@ -3,7 +3,7 @@ import logging
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ..exceptions import DBusError, DBusInterfaceError
|
||||
from ..exceptions import DBusError, DBusInterfaceError, DBusServiceUnkownError
|
||||
from .const import DBUS_NAME_LOGIND, DBUS_OBJECT_LOGIND
|
||||
from .interface import DBusInterface
|
||||
from .utils import dbus_connected
|
||||
@@ -28,8 +28,8 @@ class Logind(DBusInterface):
|
||||
await super().connect(bus)
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to systemd-logind")
|
||||
except DBusInterfaceError:
|
||||
_LOGGER.info("No systemd-logind support on the host.")
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning("No systemd-logind support on the host.")
|
||||
|
||||
@dbus_connected
|
||||
async def reboot(self) -> None:
|
||||
|
@@ -9,7 +9,10 @@ from ...exceptions import (
|
||||
DBusError,
|
||||
DBusFatalError,
|
||||
DBusInterfaceError,
|
||||
DBusNoReplyError,
|
||||
DBusServiceUnkownError,
|
||||
HostNotSupportedError,
|
||||
NetworkInterfaceNotFound,
|
||||
)
|
||||
from ...utils.sentry import capture_exception
|
||||
from ..const import (
|
||||
@@ -67,9 +70,9 @@ class NetworkManager(DBusInterfaceProxy):
|
||||
return self._settings
|
||||
|
||||
@property
|
||||
def interfaces(self) -> dict[str, NetworkInterface]:
|
||||
"""Return a dictionary of active interfaces."""
|
||||
return self._interfaces
|
||||
def interfaces(self) -> set[NetworkInterface]:
|
||||
"""Return a set of active interfaces."""
|
||||
return set(self._interfaces.values())
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
@@ -83,6 +86,20 @@ class NetworkManager(DBusInterfaceProxy):
|
||||
"""Return Network Manager version."""
|
||||
return AwesomeVersion(self.properties[DBUS_ATTR_VERSION])
|
||||
|
||||
def get(self, name_or_mac: str) -> NetworkInterface:
|
||||
"""Get an interface by name or mac address."""
|
||||
if name_or_mac not in self._interfaces:
|
||||
raise NetworkInterfaceNotFound(
|
||||
f"No interface exists with name or mac address '{name_or_mac}'"
|
||||
)
|
||||
return self._interfaces[name_or_mac]
|
||||
|
||||
def __contains__(self, item: NetworkInterface | str) -> bool:
|
||||
"""Return true if specified network interface exists."""
|
||||
if isinstance(item, str):
|
||||
return item in self._interfaces
|
||||
return item in self.interfaces
|
||||
|
||||
@dbus_connected
|
||||
async def activate_connection(
|
||||
self, connection_object: str, device_object: str
|
||||
@@ -128,7 +145,7 @@ class NetworkManager(DBusInterfaceProxy):
|
||||
await self.settings.connect(bus)
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to Network Manager")
|
||||
except DBusInterfaceError:
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning(
|
||||
"No Network Manager support on the host. Local network functions have been disabled."
|
||||
)
|
||||
@@ -167,9 +184,9 @@ class NetworkManager(DBusInterfaceProxy):
|
||||
|
||||
if changed and (
|
||||
DBUS_ATTR_DEVICES not in changed
|
||||
or {
|
||||
intr.object_path for intr in self.interfaces.values() if intr.managed
|
||||
}.issubset(set(changed[DBUS_ATTR_DEVICES]))
|
||||
or {intr.object_path for intr in self.interfaces if intr.managed}.issubset(
|
||||
set(changed[DBUS_ATTR_DEVICES])
|
||||
)
|
||||
):
|
||||
# If none of our managed devices were removed then most likely this is just veths changing.
|
||||
# We don't care about veths and reprocessing all their changes can swamp a system when
|
||||
@@ -177,8 +194,8 @@ class NetworkManager(DBusInterfaceProxy):
|
||||
# in rare occaisions but we'll catch it on the next host update scheduled task.
|
||||
return
|
||||
|
||||
interfaces = {}
|
||||
curr_devices = {intr.object_path: intr for intr in self.interfaces.values()}
|
||||
interfaces: dict[str, NetworkInterface] = {}
|
||||
curr_devices = {intr.object_path: intr for intr in self.interfaces}
|
||||
for device in self.properties[DBUS_ATTR_DEVICES]:
|
||||
if device in curr_devices and curr_devices[device].is_connected:
|
||||
interface = curr_devices[device]
|
||||
@@ -195,8 +212,22 @@ class NetworkManager(DBusInterfaceProxy):
|
||||
# try to query it. Ignore those cases.
|
||||
_LOGGER.debug("Can't process %s: %s", device, err)
|
||||
continue
|
||||
except (
|
||||
DBusNoReplyError,
|
||||
DBusServiceUnkownError,
|
||||
) as err:
|
||||
# This typically means that NetworkManager disappeared. Give up immeaditly.
|
||||
_LOGGER.error(
|
||||
"NetworkManager not responding while processing %s: %s. Giving up.",
|
||||
device,
|
||||
err,
|
||||
)
|
||||
capture_exception(err)
|
||||
return
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.exception("Error while processing %s: %s", device, err)
|
||||
_LOGGER.exception(
|
||||
"Unkown error while processing %s: %s", device, err
|
||||
)
|
||||
capture_exception(err)
|
||||
continue
|
||||
|
||||
@@ -222,6 +253,7 @@ class NetworkManager(DBusInterfaceProxy):
|
||||
interface.primary = False
|
||||
|
||||
interfaces[interface.name] = interface
|
||||
interfaces[interface.hw_address] = interface
|
||||
|
||||
# Disconnect removed devices
|
||||
for device in set(curr_devices.keys()) - set(
|
||||
@@ -242,7 +274,7 @@ class NetworkManager(DBusInterfaceProxy):
|
||||
|
||||
def disconnect(self) -> None:
|
||||
"""Disconnect from D-Bus."""
|
||||
for intr in self.interfaces.values():
|
||||
for intr in self.interfaces:
|
||||
intr.shutdown()
|
||||
|
||||
super().disconnect()
|
||||
|
@@ -1,66 +1,72 @@
|
||||
"""NetworkConnection object4s for Network Manager."""
|
||||
"""NetworkConnection objects for Network Manager."""
|
||||
from dataclasses import dataclass
|
||||
from ipaddress import IPv4Address, IPv6Address
|
||||
|
||||
import attr
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
@dataclass(slots=True)
|
||||
class DNSConfiguration:
|
||||
"""DNS configuration Object."""
|
||||
|
||||
nameservers: list[IPv4Address | IPv6Address] = attr.ib()
|
||||
domains: list[str] = attr.ib()
|
||||
interface: str = attr.ib()
|
||||
priority: int = attr.ib()
|
||||
vpn: bool = attr.ib()
|
||||
nameservers: list[IPv4Address | IPv6Address]
|
||||
domains: list[str]
|
||||
interface: str
|
||||
priority: int
|
||||
vpn: bool
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
@dataclass(slots=True)
|
||||
class ConnectionProperties:
|
||||
"""Connection Properties object for Network Manager."""
|
||||
|
||||
id: str | None = attr.ib()
|
||||
uuid: str | None = attr.ib()
|
||||
type: str | None = attr.ib()
|
||||
interface_name: str | None = attr.ib()
|
||||
id: str | None
|
||||
uuid: str | None
|
||||
type: str | None
|
||||
interface_name: str | None
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
@dataclass(slots=True)
|
||||
class WirelessProperties:
|
||||
"""Wireless Properties object for Network Manager."""
|
||||
|
||||
ssid: str | None = attr.ib()
|
||||
assigned_mac: str | None = attr.ib()
|
||||
mode: str | None = attr.ib()
|
||||
powersave: int | None = attr.ib()
|
||||
ssid: str | None
|
||||
assigned_mac: str | None
|
||||
mode: str | None
|
||||
powersave: int | None
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
@dataclass(slots=True)
|
||||
class WirelessSecurityProperties:
|
||||
"""Wireless Security Properties object for Network Manager."""
|
||||
|
||||
auth_alg: str | None = attr.ib()
|
||||
key_mgmt: str | None = attr.ib()
|
||||
psk: str | None = attr.ib()
|
||||
auth_alg: str | None
|
||||
key_mgmt: str | None
|
||||
psk: str | None
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
@dataclass(slots=True)
|
||||
class EthernetProperties:
|
||||
"""Ethernet properties object for Network Manager."""
|
||||
|
||||
assigned_mac: str | None = attr.ib()
|
||||
assigned_mac: str | None
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
@dataclass(slots=True)
|
||||
class VlanProperties:
|
||||
"""Ethernet properties object for Network Manager."""
|
||||
|
||||
id: int | None = attr.ib()
|
||||
parent: str | None = attr.ib()
|
||||
id: int | None
|
||||
parent: str | None
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
@dataclass(slots=True)
|
||||
class IpProperties:
|
||||
"""IP properties object for Network Manager."""
|
||||
|
||||
method: str | None = attr.ib()
|
||||
method: str | None
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class MatchProperties:
|
||||
"""Match properties object for Network Manager."""
|
||||
|
||||
path: list[str] | None = None
|
||||
|
@@ -121,7 +121,7 @@ class NetworkConnection(DBusInterfaceProxy):
|
||||
self._state_flags = {
|
||||
flag
|
||||
for flag in ConnectionStateFlags
|
||||
if flag.value & self.properties[DBUS_ATTR_STATE_FLAGS]
|
||||
if flag & self.properties[DBUS_ATTR_STATE_FLAGS]
|
||||
} or {ConnectionStateFlags.NONE}
|
||||
|
||||
# IPv4
|
||||
|
@@ -12,7 +12,7 @@ from ...const import (
|
||||
ATTR_PRIORITY,
|
||||
ATTR_VPN,
|
||||
)
|
||||
from ...exceptions import DBusError, DBusInterfaceError
|
||||
from ...exceptions import DBusError, DBusInterfaceError, DBusServiceUnkownError
|
||||
from ..const import (
|
||||
DBUS_ATTR_CONFIGURATION,
|
||||
DBUS_ATTR_MODE,
|
||||
@@ -67,7 +67,7 @@ class NetworkManagerDNS(DBusInterfaceProxy):
|
||||
await super().connect(bus)
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to DnsManager")
|
||||
except DBusInterfaceError:
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning(
|
||||
"No DnsManager support on the host. Local DNS functions have been disabled."
|
||||
)
|
||||
|
@@ -9,7 +9,9 @@ from ..const import (
|
||||
DBUS_ATTR_DEVICE_INTERFACE,
|
||||
DBUS_ATTR_DEVICE_TYPE,
|
||||
DBUS_ATTR_DRIVER,
|
||||
DBUS_ATTR_HWADDRESS,
|
||||
DBUS_ATTR_MANAGED,
|
||||
DBUS_ATTR_PATH,
|
||||
DBUS_IFACE_DEVICE,
|
||||
DBUS_NAME_NM,
|
||||
DBUS_OBJECT_BASE,
|
||||
@@ -67,6 +69,18 @@ class NetworkInterface(DBusInterfaceProxy):
|
||||
"""Return interface driver."""
|
||||
return self.properties[DBUS_ATTR_MANAGED]
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def hw_address(self) -> str:
|
||||
"""Return hardware address (i.e. mac address) of device."""
|
||||
return self.properties[DBUS_ATTR_HWADDRESS]
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def path(self) -> str:
|
||||
"""Return The path of the device as exposed by the udev property ID_PATH."""
|
||||
return self.properties[DBUS_ATTR_PATH]
|
||||
|
||||
@property
|
||||
def connection(self) -> NetworkConnection | None:
|
||||
"""Return the connection used for this interface."""
|
||||
@@ -98,6 +112,18 @@ class NetworkInterface(DBusInterfaceProxy):
|
||||
|
||||
self._wireless = wireless
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
"""Is object equal to another."""
|
||||
return (
|
||||
isinstance(other, type(self))
|
||||
and other.bus_name == self.bus_name
|
||||
and other.object_path == self.object_path
|
||||
)
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Hash of object."""
|
||||
return hash((self.bus_name, self.object_path))
|
||||
|
||||
async def connect(self, bus: MessageBus) -> None:
|
||||
"""Connect to D-Bus."""
|
||||
await super().connect(bus)
|
||||
|
@@ -2,6 +2,7 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from dbus_fast import Variant
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ....const import ATTR_METHOD, ATTR_MODE, ATTR_PSK, ATTR_SSID
|
||||
@@ -12,6 +13,7 @@ from ..configuration import (
|
||||
ConnectionProperties,
|
||||
EthernetProperties,
|
||||
IpProperties,
|
||||
MatchProperties,
|
||||
VlanProperties,
|
||||
WirelessProperties,
|
||||
WirelessSecurityProperties,
|
||||
@@ -24,6 +26,8 @@ CONF_ATTR_802_WIRELESS_SECURITY = "802-11-wireless-security"
|
||||
CONF_ATTR_VLAN = "vlan"
|
||||
CONF_ATTR_IPV4 = "ipv4"
|
||||
CONF_ATTR_IPV6 = "ipv6"
|
||||
CONF_ATTR_MATCH = "match"
|
||||
CONF_ATTR_PATH = "path"
|
||||
|
||||
ATTR_ID = "id"
|
||||
ATTR_UUID = "uuid"
|
||||
@@ -34,6 +38,7 @@ ATTR_POWERSAVE = "powersave"
|
||||
ATTR_AUTH_ALG = "auth-alg"
|
||||
ATTR_KEY_MGMT = "key-mgmt"
|
||||
ATTR_INTERFACE_NAME = "interface-name"
|
||||
ATTR_PATH = "path"
|
||||
|
||||
IPV4_6_IGNORE_FIELDS = [
|
||||
"addresses",
|
||||
@@ -47,8 +52,8 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _merge_settings_attribute(
|
||||
base_settings: Any,
|
||||
new_settings: Any,
|
||||
base_settings: dict[str, dict[str, Variant]],
|
||||
new_settings: dict[str, dict[str, Variant]],
|
||||
attribute: str,
|
||||
*,
|
||||
ignore_current_value: list[str] = None,
|
||||
@@ -58,8 +63,7 @@ def _merge_settings_attribute(
|
||||
if attribute in base_settings:
|
||||
if ignore_current_value:
|
||||
for field in ignore_current_value:
|
||||
if field in base_settings[attribute]:
|
||||
del base_settings[attribute][field]
|
||||
base_settings[attribute].pop(field, None)
|
||||
|
||||
base_settings[attribute].update(new_settings[attribute])
|
||||
else:
|
||||
@@ -85,6 +89,7 @@ class NetworkSetting(DBusInterface):
|
||||
self._vlan: VlanProperties | None = None
|
||||
self._ipv4: IpProperties | None = None
|
||||
self._ipv6: IpProperties | None = None
|
||||
self._match: MatchProperties | None = None
|
||||
|
||||
@property
|
||||
def connection(self) -> ConnectionProperties | None:
|
||||
@@ -121,19 +126,29 @@ class NetworkSetting(DBusInterface):
|
||||
"""Return ipv6 properties if any."""
|
||||
return self._ipv6
|
||||
|
||||
@property
|
||||
def match(self) -> MatchProperties | None:
|
||||
"""Return match properties if any."""
|
||||
return self._match
|
||||
|
||||
@dbus_connected
|
||||
async def get_settings(self) -> dict[str, Any]:
|
||||
"""Return connection settings."""
|
||||
return await self.dbus.Settings.Connection.call_get_settings()
|
||||
|
||||
@dbus_connected
|
||||
async def update(self, settings: Any) -> None:
|
||||
async def update(self, settings: dict[str, dict[str, Variant]]) -> None:
|
||||
"""Update connection settings."""
|
||||
new_settings = await self.dbus.Settings.Connection.call_get_settings(
|
||||
unpack_variants=False
|
||||
)
|
||||
new_settings: dict[
|
||||
str, dict[str, Variant]
|
||||
] = await self.dbus.Settings.Connection.call_get_settings(unpack_variants=False)
|
||||
|
||||
_merge_settings_attribute(new_settings, settings, CONF_ATTR_CONNECTION)
|
||||
_merge_settings_attribute(
|
||||
new_settings,
|
||||
settings,
|
||||
CONF_ATTR_CONNECTION,
|
||||
ignore_current_value=[ATTR_INTERFACE_NAME],
|
||||
)
|
||||
_merge_settings_attribute(new_settings, settings, CONF_ATTR_802_ETHERNET)
|
||||
_merge_settings_attribute(new_settings, settings, CONF_ATTR_802_WIRELESS)
|
||||
_merge_settings_attribute(
|
||||
@@ -152,6 +167,7 @@ class NetworkSetting(DBusInterface):
|
||||
CONF_ATTR_IPV6,
|
||||
ignore_current_value=IPV4_6_IGNORE_FIELDS,
|
||||
)
|
||||
_merge_settings_attribute(new_settings, settings, CONF_ATTR_MATCH)
|
||||
|
||||
await self.dbus.Settings.Connection.call_update(new_settings)
|
||||
|
||||
@@ -217,3 +233,6 @@ class NetworkSetting(DBusInterface):
|
||||
self._ipv6 = IpProperties(
|
||||
data[CONF_ATTR_IPV6].get(ATTR_METHOD),
|
||||
)
|
||||
|
||||
if CONF_ATTR_MATCH in data:
|
||||
self._match = MatchProperties(data[CONF_ATTR_MATCH].get(ATTR_PATH))
|
||||
|
@@ -2,7 +2,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import socket
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING
|
||||
from uuid import uuid4
|
||||
|
||||
from dbus_fast import Variant
|
||||
@@ -15,17 +15,23 @@ from . import (
|
||||
CONF_ATTR_CONNECTION,
|
||||
CONF_ATTR_IPV4,
|
||||
CONF_ATTR_IPV6,
|
||||
CONF_ATTR_MATCH,
|
||||
CONF_ATTR_PATH,
|
||||
CONF_ATTR_VLAN,
|
||||
)
|
||||
from .. import NetworkManager
|
||||
from ....host.const import InterfaceMethod, InterfaceType
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ....host.network import Interface
|
||||
from ....host.configuration import Interface
|
||||
|
||||
|
||||
def get_connection_from_interface(
|
||||
interface: Interface, name: str | None = None, uuid: str | None = None
|
||||
) -> Any:
|
||||
interface: Interface,
|
||||
network_manager: NetworkManager,
|
||||
name: str | None = None,
|
||||
uuid: str | None = None,
|
||||
) -> dict[str, dict[str, Variant]]:
|
||||
"""Generate message argument for network interface update."""
|
||||
|
||||
# Generate/Update ID/name
|
||||
@@ -39,26 +45,28 @@ def get_connection_from_interface(
|
||||
elif interface.type == InterfaceType.WIRELESS:
|
||||
iftype = "802-11-wireless"
|
||||
else:
|
||||
iftype = interface.type.value
|
||||
iftype = interface.type
|
||||
|
||||
# Generate UUID
|
||||
if not uuid:
|
||||
uuid = str(uuid4())
|
||||
|
||||
connection = {
|
||||
"id": Variant("s", name),
|
||||
"type": Variant("s", iftype),
|
||||
"uuid": Variant("s", uuid),
|
||||
"llmnr": Variant("i", 2),
|
||||
"mdns": Variant("i", 2),
|
||||
"autoconnect": Variant("b", True),
|
||||
conn: dict[str, dict[str, Variant]] = {
|
||||
CONF_ATTR_CONNECTION: {
|
||||
"id": Variant("s", name),
|
||||
"type": Variant("s", iftype),
|
||||
"uuid": Variant("s", uuid),
|
||||
"llmnr": Variant("i", 2),
|
||||
"mdns": Variant("i", 2),
|
||||
"autoconnect": Variant("b", True),
|
||||
},
|
||||
}
|
||||
|
||||
if interface.type != InterfaceType.VLAN:
|
||||
connection["interface-name"] = Variant("s", interface.name)
|
||||
|
||||
conn = {}
|
||||
conn[CONF_ATTR_CONNECTION] = connection
|
||||
if interface.path:
|
||||
conn[CONF_ATTR_MATCH] = {CONF_ATTR_PATH: Variant("as", [interface.path])}
|
||||
else:
|
||||
conn[CONF_ATTR_CONNECTION]["interface-name"] = Variant("s", interface.name)
|
||||
|
||||
ipv4 = {}
|
||||
if not interface.ipv4 or interface.ipv4.method == InterfaceMethod.AUTO:
|
||||
@@ -117,9 +125,15 @@ def get_connection_from_interface(
|
||||
if interface.type == InterfaceType.ETHERNET:
|
||||
conn[CONF_ATTR_802_ETHERNET] = {ATTR_ASSIGNED_MAC: Variant("s", "preserve")}
|
||||
elif interface.type == "vlan":
|
||||
parent = interface.vlan.interface
|
||||
if parent in network_manager and (
|
||||
parent_connection := network_manager.get(parent).connection
|
||||
):
|
||||
parent = parent_connection.uuid
|
||||
|
||||
conn[CONF_ATTR_VLAN] = {
|
||||
"id": Variant("u", interface.vlan.id),
|
||||
"parent": Variant("s", interface.vlan.interface),
|
||||
"parent": Variant("s", parent),
|
||||
}
|
||||
elif interface.type == InterfaceType.WIRELESS:
|
||||
wireless = {
|
||||
@@ -134,8 +148,8 @@ def get_connection_from_interface(
|
||||
wireless["security"] = Variant("s", CONF_ATTR_802_WIRELESS_SECURITY)
|
||||
wireless_security = {}
|
||||
if interface.wifi.auth == "wep":
|
||||
wireless_security["auth-alg"] = Variant("s", "none")
|
||||
wireless_security["key-mgmt"] = Variant("s", "open")
|
||||
wireless_security["auth-alg"] = Variant("s", "open")
|
||||
wireless_security["key-mgmt"] = Variant("s", "none")
|
||||
elif interface.wifi.auth == "wpa-psk":
|
||||
wireless_security["auth-alg"] = Variant("s", "open")
|
||||
wireless_security["key-mgmt"] = Variant("s", "wpa-psk")
|
||||
|
@@ -4,7 +4,7 @@ from typing import Any
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ...exceptions import DBusError, DBusInterfaceError
|
||||
from ...exceptions import DBusError, DBusInterfaceError, DBusServiceUnkownError
|
||||
from ..const import DBUS_NAME_NM, DBUS_OBJECT_SETTINGS
|
||||
from ..interface import DBusInterface
|
||||
from ..network.setting import NetworkSetting
|
||||
@@ -28,7 +28,7 @@ class NetworkManagerSettings(DBusInterface):
|
||||
await super().connect(bus)
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to Network Manager Settings")
|
||||
except DBusInterfaceError:
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning(
|
||||
"No Network Manager Settings support on the host. Local network functions have been disabled."
|
||||
)
|
||||
|
@@ -4,7 +4,7 @@ from typing import Any
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ..exceptions import DBusError, DBusInterfaceError
|
||||
from ..exceptions import DBusError, DBusInterfaceError, DBusServiceUnkownError
|
||||
from ..utils.dbus import DBusSignalWrapper
|
||||
from .const import (
|
||||
DBUS_ATTR_BOOT_SLOT,
|
||||
@@ -49,7 +49,7 @@ class Rauc(DBusInterfaceProxy):
|
||||
await super().connect(bus)
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to rauc")
|
||||
except DBusInterfaceError:
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning("Host has no rauc support. OTA updates have been disabled.")
|
||||
|
||||
@property
|
||||
@@ -95,7 +95,7 @@ class Rauc(DBusInterfaceProxy):
|
||||
@dbus_connected
|
||||
async def mark(self, state: RaucState, slot_identifier: str) -> tuple[str, str]:
|
||||
"""Get slot status."""
|
||||
return await self.dbus.Installer.call_mark(state.value, slot_identifier)
|
||||
return await self.dbus.Installer.call_mark(state, slot_identifier)
|
||||
|
||||
@dbus_connected
|
||||
async def update(self, changed: dict[str, Any] | None = None) -> None:
|
||||
|
@@ -5,7 +5,7 @@ import logging
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ..exceptions import DBusError, DBusInterfaceError
|
||||
from ..exceptions import DBusError, DBusInterfaceError, DBusServiceUnkownError
|
||||
from .const import (
|
||||
DBUS_ATTR_CACHE_STATISTICS,
|
||||
DBUS_ATTR_CURRENT_DNS_SERVER,
|
||||
@@ -59,7 +59,7 @@ class Resolved(DBusInterfaceProxy):
|
||||
await super().connect(bus)
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to systemd-resolved.")
|
||||
except DBusInterfaceError:
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning(
|
||||
"Host has no systemd-resolved support. DNS will not work correctly."
|
||||
)
|
||||
|
@@ -10,6 +10,7 @@ from ..exceptions import (
|
||||
DBusError,
|
||||
DBusFatalError,
|
||||
DBusInterfaceError,
|
||||
DBusServiceUnkownError,
|
||||
DBusSystemdNoSuchUnit,
|
||||
)
|
||||
from .const import (
|
||||
@@ -86,7 +87,7 @@ class Systemd(DBusInterfaceProxy):
|
||||
await super().connect(bus)
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to systemd")
|
||||
except DBusInterfaceError:
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning(
|
||||
"No systemd support on the host. Host control has been disabled."
|
||||
)
|
||||
@@ -122,25 +123,25 @@ class Systemd(DBusInterfaceProxy):
|
||||
@systemd_errors
|
||||
async def start_unit(self, unit: str, mode: StartUnitMode) -> str:
|
||||
"""Start a systemd service unit. Returns object path of job."""
|
||||
return await self.dbus.Manager.call_start_unit(unit, mode.value)
|
||||
return await self.dbus.Manager.call_start_unit(unit, mode)
|
||||
|
||||
@dbus_connected
|
||||
@systemd_errors
|
||||
async def stop_unit(self, unit: str, mode: StopUnitMode) -> str:
|
||||
"""Stop a systemd service unit. Returns object path of job."""
|
||||
return await self.dbus.Manager.call_stop_unit(unit, mode.value)
|
||||
return await self.dbus.Manager.call_stop_unit(unit, mode)
|
||||
|
||||
@dbus_connected
|
||||
@systemd_errors
|
||||
async def reload_unit(self, unit: str, mode: StartUnitMode) -> str:
|
||||
"""Reload a systemd service unit. Returns object path of job."""
|
||||
return await self.dbus.Manager.call_reload_or_restart_unit(unit, mode.value)
|
||||
return await self.dbus.Manager.call_reload_or_restart_unit(unit, mode)
|
||||
|
||||
@dbus_connected
|
||||
@systemd_errors
|
||||
async def restart_unit(self, unit: str, mode: StartUnitMode) -> str:
|
||||
"""Restart a systemd service unit. Returns object path of job."""
|
||||
return await self.dbus.Manager.call_restart_unit(unit, mode.value)
|
||||
return await self.dbus.Manager.call_restart_unit(unit, mode)
|
||||
|
||||
@dbus_connected
|
||||
async def list_units(
|
||||
@@ -155,7 +156,7 @@ class Systemd(DBusInterfaceProxy):
|
||||
) -> str:
|
||||
"""Start a transient unit which is released when stopped or on reboot. Returns object path of job."""
|
||||
return await self.dbus.Manager.call_start_transient_unit(
|
||||
unit, mode.value, properties, []
|
||||
unit, mode, properties, []
|
||||
)
|
||||
|
||||
@dbus_connected
|
||||
|
@@ -4,7 +4,7 @@ import logging
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ..exceptions import DBusError, DBusInterfaceError
|
||||
from ..exceptions import DBusError, DBusInterfaceError, DBusServiceUnkownError
|
||||
from ..utils.dt import utc_from_timestamp
|
||||
from .const import (
|
||||
DBUS_ATTR_NTP,
|
||||
@@ -63,7 +63,7 @@ class TimeDate(DBusInterfaceProxy):
|
||||
await super().connect(bus)
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to systemd-timedate")
|
||||
except DBusInterfaceError:
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning(
|
||||
"No timedate support on the host. Time/Date functions have been disabled."
|
||||
)
|
||||
|
@@ -6,7 +6,12 @@ from typing import Any
|
||||
from awesomeversion import AwesomeVersion
|
||||
from dbus_fast.aio import MessageBus
|
||||
|
||||
from ...exceptions import DBusError, DBusInterfaceError, DBusObjectError
|
||||
from ...exceptions import (
|
||||
DBusError,
|
||||
DBusInterfaceError,
|
||||
DBusObjectError,
|
||||
DBusServiceUnkownError,
|
||||
)
|
||||
from ..const import (
|
||||
DBUS_ATTR_SUPPORTED_FILESYSTEMS,
|
||||
DBUS_ATTR_VERSION,
|
||||
@@ -45,7 +50,7 @@ class UDisks2(DBusInterfaceProxy):
|
||||
await super().connect(bus)
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to udisks2")
|
||||
except DBusInterfaceError:
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning(
|
||||
"No udisks2 support on the host. Host control has been disabled."
|
||||
)
|
||||
|
@@ -263,6 +263,4 @@ class UDisks2Block(DBusInterfaceProxy):
|
||||
) -> None:
|
||||
"""Format block device."""
|
||||
options = options.to_dict() if options else {}
|
||||
await self.dbus.Block.call_format(
|
||||
type_.value, options | UDISKS2_DEFAULT_OPTIONS
|
||||
)
|
||||
await self.dbus.Block.call_format(type_, options | UDISKS2_DEFAULT_OPTIONS)
|
||||
|
@@ -1,20 +1,20 @@
|
||||
"""Constants for UDisks2."""
|
||||
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
|
||||
from dbus_fast import Variant
|
||||
|
||||
UDISKS2_DEFAULT_OPTIONS = {"auth.no_user_interaction": Variant("b", True)}
|
||||
|
||||
|
||||
class EncryptType(str, Enum):
|
||||
class EncryptType(StrEnum):
|
||||
"""Encryption type."""
|
||||
|
||||
LUKS1 = "luks1"
|
||||
LUKS2 = "luks2"
|
||||
|
||||
|
||||
class EraseMode(str, Enum):
|
||||
class EraseMode(StrEnum):
|
||||
"""Erase mode."""
|
||||
|
||||
ZERO = "zero"
|
||||
@@ -22,7 +22,7 @@ class EraseMode(str, Enum):
|
||||
ATA_SECURE_ERASE_ENHANCED = "ata-secure-erase-enhanced"
|
||||
|
||||
|
||||
class FormatType(str, Enum):
|
||||
class FormatType(StrEnum):
|
||||
"""Format type."""
|
||||
|
||||
EMPTY = "empty"
|
||||
@@ -31,7 +31,7 @@ class FormatType(str, Enum):
|
||||
GPT = "gpt"
|
||||
|
||||
|
||||
class PartitionTableType(str, Enum):
|
||||
class PartitionTableType(StrEnum):
|
||||
"""Partition Table type."""
|
||||
|
||||
DOS = "dos"
|
||||
|
@@ -3,10 +3,9 @@
|
||||
from dataclasses import dataclass
|
||||
from inspect import get_annotations
|
||||
from pathlib import Path
|
||||
from typing import Any, TypedDict
|
||||
from typing import Any, NotRequired, TypedDict
|
||||
|
||||
from dbus_fast import Variant
|
||||
from typing_extensions import NotRequired
|
||||
|
||||
from .const import EncryptType, EraseMode
|
||||
|
||||
@@ -167,10 +166,10 @@ class FormatOptions(UDisks2StandardOptions):
|
||||
)
|
||||
if self.encrypt_passpharase
|
||||
else None,
|
||||
"encrypt.type": Variant("s", self.encrypt_type.value)
|
||||
"encrypt.type": Variant("s", self.encrypt_type)
|
||||
if self.encrypt_type
|
||||
else None,
|
||||
"erase": Variant("s", self.erase.value) if self.erase else None,
|
||||
"erase": Variant("s", self.erase) if self.erase else None,
|
||||
"update-partition-type": _optional_variant("b", self.update_partition_type),
|
||||
"no-block": _optional_variant("b", self.no_block),
|
||||
"dry-run-first": _optional_variant("b", self.dry_run_first),
|
||||
|
@@ -33,7 +33,7 @@ SCHEMA_DISCOVERY = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_UUID): uuid_match,
|
||||
vol.Required(ATTR_ADDON): str,
|
||||
vol.Required(ATTR_SERVICE): valid_discovery_service,
|
||||
vol.Required(ATTR_SERVICE): str,
|
||||
vol.Required(ATTR_CONFIG): vol.Maybe(dict),
|
||||
},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
|
@@ -1,7 +1,6 @@
|
||||
"""Init file for Supervisor add-on Docker object."""
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
from ipaddress import IPv4Address, ip_address
|
||||
@@ -16,15 +15,10 @@ from docker.types import Mount
|
||||
import requests
|
||||
|
||||
from ..addons.build import AddonBuild
|
||||
from ..addons.const import MappingType
|
||||
from ..bus import EventListener
|
||||
from ..const import (
|
||||
DOCKER_CPU_RUNTIME_ALLOCATION,
|
||||
MAP_ADDONS,
|
||||
MAP_BACKUP,
|
||||
MAP_CONFIG,
|
||||
MAP_MEDIA,
|
||||
MAP_SHARE,
|
||||
MAP_SSL,
|
||||
SECURITY_DISABLE,
|
||||
SECURITY_PROFILE,
|
||||
SYSTEMD_JOURNAL_PERSISTENT,
|
||||
@@ -37,14 +31,15 @@ from ..exceptions import (
|
||||
CoreDNSError,
|
||||
DBusError,
|
||||
DockerError,
|
||||
DockerJobError,
|
||||
DockerNotFound,
|
||||
HardwareNotFound,
|
||||
)
|
||||
from ..hardware.const import PolicyGroup
|
||||
from ..hardware.data import Device
|
||||
from ..jobs.decorator import Job, JobCondition, JobExecutionLimit
|
||||
from ..jobs.const import JobCondition, JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..utils import process_lock
|
||||
from ..utils.sentry import capture_exception
|
||||
from .const import (
|
||||
ENV_TIME,
|
||||
@@ -74,8 +69,8 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
def __init__(self, coresys: CoreSys, addon: Addon):
|
||||
"""Initialize Docker Home Assistant wrapper."""
|
||||
super().__init__(coresys)
|
||||
self.addon: Addon = addon
|
||||
super().__init__(coresys)
|
||||
|
||||
self._hw_listener: EventListener | None = None
|
||||
|
||||
@@ -278,7 +273,7 @@ class DockerAddon(DockerInterface):
|
||||
return None
|
||||
|
||||
@property
|
||||
def capabilities(self) -> list[str] | None:
|
||||
def capabilities(self) -> list[Capabilities] | None:
|
||||
"""Generate needed capabilities."""
|
||||
capabilities: set[Capabilities] = set(self.addon.privileged)
|
||||
|
||||
@@ -292,7 +287,7 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
# Return None if no capabilities is present
|
||||
if capabilities:
|
||||
return [cap.value for cap in capabilities]
|
||||
return list(capabilities)
|
||||
return None
|
||||
|
||||
@property
|
||||
@@ -329,76 +324,118 @@ class DockerAddon(DockerInterface):
|
||||
"""Return mounts for container."""
|
||||
addon_mapping = self.addon.map_volumes
|
||||
|
||||
target_data_path = ""
|
||||
if MappingType.DATA in addon_mapping:
|
||||
target_data_path = addon_mapping[MappingType.DATA].path
|
||||
|
||||
mounts = [
|
||||
MOUNT_DEV,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.addon.path_extern_data.as_posix(),
|
||||
target="/data",
|
||||
target=target_data_path or "/data",
|
||||
read_only=False,
|
||||
),
|
||||
]
|
||||
|
||||
# setup config mappings
|
||||
if MAP_CONFIG in addon_mapping:
|
||||
if MappingType.CONFIG in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=addon_mapping[MAP_CONFIG],
|
||||
target=addon_mapping[MappingType.CONFIG].path or "/config",
|
||||
read_only=addon_mapping[MappingType.CONFIG].read_only,
|
||||
)
|
||||
)
|
||||
|
||||
if MAP_SSL in addon_mapping:
|
||||
else:
|
||||
# Map addon's public config folder if not using deprecated config option
|
||||
if self.addon.addon_config_used:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.addon.path_extern_config.as_posix(),
|
||||
target=addon_mapping[MappingType.ADDON_CONFIG].path
|
||||
or "/config",
|
||||
read_only=addon_mapping[MappingType.ADDON_CONFIG].read_only,
|
||||
)
|
||||
)
|
||||
|
||||
# Map Home Assistant config in new way
|
||||
if MappingType.HOMEASSISTANT_CONFIG in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target=addon_mapping[MappingType.HOMEASSISTANT_CONFIG].path
|
||||
or "/homeassistant",
|
||||
read_only=addon_mapping[
|
||||
MappingType.HOMEASSISTANT_CONFIG
|
||||
].read_only,
|
||||
)
|
||||
)
|
||||
|
||||
if MappingType.ALL_ADDON_CONFIGS in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_addon_configs.as_posix(),
|
||||
target=addon_mapping[MappingType.ALL_ADDON_CONFIGS].path
|
||||
or "/addon_configs",
|
||||
read_only=addon_mapping[MappingType.ALL_ADDON_CONFIGS].read_only,
|
||||
)
|
||||
)
|
||||
|
||||
if MappingType.SSL in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target="/ssl",
|
||||
read_only=addon_mapping[MAP_SSL],
|
||||
target=addon_mapping[MappingType.SSL].path or "/ssl",
|
||||
read_only=addon_mapping[MappingType.SSL].read_only,
|
||||
)
|
||||
)
|
||||
|
||||
if MAP_ADDONS in addon_mapping:
|
||||
if MappingType.ADDONS in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_addons_local.as_posix(),
|
||||
target="/addons",
|
||||
read_only=addon_mapping[MAP_ADDONS],
|
||||
target=addon_mapping[MappingType.ADDONS].path or "/addons",
|
||||
read_only=addon_mapping[MappingType.ADDONS].read_only,
|
||||
)
|
||||
)
|
||||
|
||||
if MAP_BACKUP in addon_mapping:
|
||||
if MappingType.BACKUP in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_backup.as_posix(),
|
||||
target="/backup",
|
||||
read_only=addon_mapping[MAP_BACKUP],
|
||||
target=addon_mapping[MappingType.BACKUP].path or "/backup",
|
||||
read_only=addon_mapping[MappingType.BACKUP].read_only,
|
||||
)
|
||||
)
|
||||
|
||||
if MAP_SHARE in addon_mapping:
|
||||
if MappingType.SHARE in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target="/share",
|
||||
read_only=addon_mapping[MAP_SHARE],
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
target=addon_mapping[MappingType.SHARE].path or "/share",
|
||||
read_only=addon_mapping[MappingType.SHARE].read_only,
|
||||
propagation=PropagationMode.RSLAVE,
|
||||
)
|
||||
)
|
||||
|
||||
if MAP_MEDIA in addon_mapping:
|
||||
if MappingType.MEDIA in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_media.as_posix(),
|
||||
target="/media",
|
||||
read_only=addon_mapping[MAP_MEDIA],
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
target=addon_mapping[MappingType.MEDIA].path or "/media",
|
||||
read_only=addon_mapping[MappingType.MEDIA].read_only,
|
||||
propagation=PropagationMode.RSLAVE,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -411,7 +448,7 @@ class DockerAddon(DockerInterface):
|
||||
continue
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=gpio_path,
|
||||
target=gpio_path,
|
||||
read_only=False,
|
||||
@@ -422,7 +459,7 @@ class DockerAddon(DockerInterface):
|
||||
if self.addon.with_devicetree:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source="/sys/firmware/devicetree/base",
|
||||
target="/device-tree",
|
||||
read_only=True,
|
||||
@@ -437,7 +474,7 @@ class DockerAddon(DockerInterface):
|
||||
if self.addon.with_kernel_modules:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source="/lib/modules",
|
||||
target="/lib/modules",
|
||||
read_only=True,
|
||||
@@ -456,19 +493,19 @@ class DockerAddon(DockerInterface):
|
||||
if self.addon.with_audio:
|
||||
mounts += [
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.addon.path_extern_pulse.as_posix(),
|
||||
target="/etc/pulse/client.conf",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
||||
target="/run/audio",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
||||
target="/etc/asound.conf",
|
||||
read_only=True,
|
||||
@@ -479,13 +516,13 @@ class DockerAddon(DockerInterface):
|
||||
if self.addon.with_journald:
|
||||
mounts += [
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
|
||||
target=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
|
||||
target=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
|
||||
read_only=True,
|
||||
@@ -494,28 +531,23 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
return mounts
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return
|
||||
|
||||
@Job(
|
||||
name="docker_addon_run",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
# Security check
|
||||
if not self.addon.protected:
|
||||
_LOGGER.warning("%s running with disabled protected mode!", self.addon.name)
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
|
||||
# Don't set a hostname if no separate UTS namespace is used
|
||||
hostname = None if self.uts_mode else self.addon.hostname
|
||||
|
||||
# Create & Run container
|
||||
try:
|
||||
docker_container = self.sys_docker.run(
|
||||
self.image,
|
||||
await self._run(
|
||||
tag=str(self.addon.version),
|
||||
name=self.name,
|
||||
hostname=hostname,
|
||||
@@ -546,14 +578,13 @@ class DockerAddon(DockerInterface):
|
||||
)
|
||||
raise
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting Docker add-on %s with version %s", self.image, self.version
|
||||
)
|
||||
|
||||
# Write data to DNS server
|
||||
try:
|
||||
self.sys_plugins.dns.add_host(
|
||||
await self.sys_plugins.dns.add_host(
|
||||
ipv4=self.ip_address, names=[self.addon.hostname]
|
||||
)
|
||||
except CoreDNSError as err:
|
||||
@@ -566,13 +597,19 @@ class DockerAddon(DockerInterface):
|
||||
BusEvent.HARDWARE_NEW_DEVICE, self._hardware_events
|
||||
)
|
||||
|
||||
def _update(
|
||||
self, version: AwesomeVersion, image: str | None = None, latest: bool = False
|
||||
@Job(
|
||||
name="docker_addon_update",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def update(
|
||||
self,
|
||||
version: AwesomeVersion,
|
||||
image: str | None = None,
|
||||
latest: bool = False,
|
||||
arch: CpuArch | None = None,
|
||||
) -> None:
|
||||
"""Update a docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
"""Update a docker image."""
|
||||
image = image or self.image
|
||||
|
||||
_LOGGER.info(
|
||||
@@ -580,15 +617,20 @@ class DockerAddon(DockerInterface):
|
||||
)
|
||||
|
||||
# Update docker image
|
||||
self._install(
|
||||
version, image=image, latest=latest, need_build=self.addon.latest_need_build
|
||||
await self.install(
|
||||
version,
|
||||
image=image,
|
||||
latest=latest,
|
||||
arch=arch,
|
||||
need_build=self.addon.latest_need_build,
|
||||
)
|
||||
|
||||
# Stop container & cleanup
|
||||
with suppress(DockerError):
|
||||
self._stop()
|
||||
|
||||
def _install(
|
||||
@Job(
|
||||
name="docker_addon_install",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def install(
|
||||
self,
|
||||
version: AwesomeVersion,
|
||||
image: str | None = None,
|
||||
@@ -597,20 +639,14 @@ class DockerAddon(DockerInterface):
|
||||
*,
|
||||
need_build: bool | None = None,
|
||||
) -> None:
|
||||
"""Pull Docker image or build it.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
"""Pull Docker image or build it."""
|
||||
if need_build is None and self.addon.need_build or need_build:
|
||||
self._build(version)
|
||||
await self._build(version)
|
||||
else:
|
||||
super()._install(version, image, latest, arch)
|
||||
await super().install(version, image, latest, arch)
|
||||
|
||||
def _build(self, version: AwesomeVersion) -> None:
|
||||
"""Build a Docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
async def _build(self, version: AwesomeVersion) -> None:
|
||||
"""Build a Docker container."""
|
||||
build_env = AddonBuild(self.coresys, self.addon)
|
||||
if not build_env.is_valid:
|
||||
_LOGGER.error("Invalid build environment, can't build this add-on!")
|
||||
@@ -618,8 +654,10 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
_LOGGER.info("Starting build for %s:%s", self.image, version)
|
||||
try:
|
||||
image, log = self.sys_docker.images.build(
|
||||
use_config_proxy=False, **build_env.get_docker_args(version)
|
||||
image, log = await self.sys_run_in_executor(
|
||||
self.sys_docker.images.build,
|
||||
use_config_proxy=False,
|
||||
**build_env.get_docker_args(version),
|
||||
)
|
||||
|
||||
_LOGGER.debug("Build %s:%s done: %s", self.image, version, log)
|
||||
@@ -642,77 +680,51 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
_LOGGER.info("Build %s:%s done", self.image, version)
|
||||
|
||||
@process_lock
|
||||
@Job(
|
||||
name="docker_addon_export_image",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
def export_image(self, tar_file: Path) -> Awaitable[None]:
|
||||
"""Export current images into a tar file."""
|
||||
return self.sys_run_in_executor(self._export_image, tar_file)
|
||||
return self.sys_run_in_executor(
|
||||
self.sys_docker.export_image, self.image, self.version, tar_file
|
||||
)
|
||||
|
||||
def _export_image(self, tar_file: Path) -> None:
|
||||
"""Export current images into a tar file.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
image = self.sys_docker.api.get_image(f"{self.image}:{self.version}")
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't fetch image %s: %s", self.image, err)
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Export image %s to %s", self.image, tar_file)
|
||||
try:
|
||||
with tar_file.open("wb") as write_tar:
|
||||
for chunk in image:
|
||||
write_tar.write(chunk)
|
||||
except (OSError, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't write tar file %s: %s", tar_file, err)
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Export image %s done", self.image)
|
||||
|
||||
@process_lock
|
||||
def import_image(self, tar_file: Path) -> Awaitable[None]:
|
||||
@Job(
|
||||
name="docker_addon_import_image",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def import_image(self, tar_file: Path) -> None:
|
||||
"""Import a tar file as image."""
|
||||
return self.sys_run_in_executor(self._import_image, tar_file)
|
||||
docker_image = await self.sys_run_in_executor(
|
||||
self.sys_docker.import_image, tar_file
|
||||
)
|
||||
if docker_image:
|
||||
self._meta = docker_image.attrs
|
||||
_LOGGER.info("Importing image %s and version %s", tar_file, self.version)
|
||||
|
||||
def _import_image(self, tar_file: Path) -> None:
|
||||
"""Import a tar file as image.
|
||||
with suppress(DockerError):
|
||||
await self.cleanup()
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
with tar_file.open("rb") as read_tar:
|
||||
docker_image_list = self.sys_docker.images.load(read_tar)
|
||||
|
||||
if len(docker_image_list) != 1:
|
||||
_LOGGER.warning(
|
||||
"Unexpected image count %d while importing image from tar",
|
||||
len(docker_image_list),
|
||||
)
|
||||
return
|
||||
docker_image = docker_image_list[0]
|
||||
except (docker.errors.DockerException, OSError) as err:
|
||||
_LOGGER.error("Can't import image %s: %s", self.image, err)
|
||||
raise DockerError() from err
|
||||
|
||||
self._meta = docker_image.attrs
|
||||
_LOGGER.info("Importing image %s and version %s", tar_file, self.version)
|
||||
|
||||
with suppress(DockerError):
|
||||
self._cleanup()
|
||||
|
||||
@process_lock
|
||||
def write_stdin(self, data: bytes) -> Awaitable[None]:
|
||||
@Job(
|
||||
name="docker_addon_write_stdin",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def write_stdin(self, data: bytes) -> None:
|
||||
"""Write to add-on stdin."""
|
||||
return self.sys_run_in_executor(self._write_stdin, data)
|
||||
if not await self.is_running():
|
||||
raise DockerError()
|
||||
|
||||
await self.sys_run_in_executor(self._write_stdin, data)
|
||||
|
||||
def _write_stdin(self, data: bytes) -> None:
|
||||
"""Write to add-on stdin.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if not self._is_running():
|
||||
raise DockerError()
|
||||
|
||||
try:
|
||||
# Load needed docker objects
|
||||
container = self.sys_docker.containers.get(self.name)
|
||||
@@ -730,15 +742,17 @@ class DockerAddon(DockerInterface):
|
||||
_LOGGER.error("Can't write to %s stdin: %s", self.name, err)
|
||||
raise DockerError() from err
|
||||
|
||||
def _stop(self, remove_container=True) -> None:
|
||||
"""Stop/remove Docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
@Job(
|
||||
name="docker_addon_stop",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def stop(self, remove_container: bool = True) -> None:
|
||||
"""Stop/remove Docker container."""
|
||||
# DNS
|
||||
if self.ip_address != NO_ADDDRESS:
|
||||
try:
|
||||
self.sys_plugins.dns.delete_host(self.addon.hostname)
|
||||
await self.sys_plugins.dns.delete_host(self.addon.hostname)
|
||||
except CoreDNSError as err:
|
||||
_LOGGER.warning("Can't update DNS for %s", self.name)
|
||||
capture_exception(err)
|
||||
@@ -748,9 +762,9 @@ class DockerAddon(DockerInterface):
|
||||
self.sys_bus.remove_listener(self._hw_listener)
|
||||
self._hw_listener = None
|
||||
|
||||
super()._stop(remove_container)
|
||||
await super().stop(remove_container)
|
||||
|
||||
def _validate_trust(
|
||||
async def _validate_trust(
|
||||
self, image_id: str, image: str, version: AwesomeVersion
|
||||
) -> None:
|
||||
"""Validate trust of content."""
|
||||
@@ -758,13 +772,14 @@ class DockerAddon(DockerInterface):
|
||||
return
|
||||
|
||||
checksum = image_id.partition(":")[2]
|
||||
job = asyncio.run_coroutine_threadsafe(
|
||||
self.sys_security.verify_content(self.addon.codenotary, checksum),
|
||||
self.sys_loop,
|
||||
)
|
||||
job.result()
|
||||
return await self.sys_security.verify_content(self.addon.codenotary, checksum)
|
||||
|
||||
@Job(conditions=[JobCondition.OS_AGENT], limit=JobExecutionLimit.SINGLE_WAIT)
|
||||
@Job(
|
||||
name="docker_addon_hardware_events",
|
||||
conditions=[JobCondition.OS_AGENT],
|
||||
limit=JobExecutionLimit.SINGLE_WAIT,
|
||||
internal=True,
|
||||
)
|
||||
async def _hardware_events(self, device: Device) -> None:
|
||||
"""Process Hardware events for adjust device access."""
|
||||
if not any(
|
||||
|
@@ -6,7 +6,10 @@ from docker.types import Mount
|
||||
|
||||
from ..const import DOCKER_CPU_RUNTIME_ALLOCATION, MACHINE_ID
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerJobError
|
||||
from ..hardware.const import PolicyGroup
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from .const import (
|
||||
ENV_TIME,
|
||||
MOUNT_DBUS,
|
||||
@@ -42,7 +45,7 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
mounts = [
|
||||
MOUNT_DEV,
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_audio.as_posix(),
|
||||
target="/data",
|
||||
read_only=False,
|
||||
@@ -65,9 +68,9 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
) + self.sys_hardware.policy.get_cgroups_rules(PolicyGroup.BLUETOOTH)
|
||||
|
||||
@property
|
||||
def capabilities(self) -> list[str]:
|
||||
def capabilities(self) -> list[Capabilities]:
|
||||
"""Generate needed capabilities."""
|
||||
return [cap.value for cap in (Capabilities.SYS_NICE, Capabilities.SYS_RESOURCE)]
|
||||
return [Capabilities.SYS_NICE, Capabilities.SYS_RESOURCE]
|
||||
|
||||
@property
|
||||
def ulimits(self) -> list[docker.types.Ulimit]:
|
||||
@@ -82,20 +85,14 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
return None
|
||||
return DOCKER_CPU_RUNTIME_ALLOCATION
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
self.image,
|
||||
@Job(
|
||||
name="docker_audio_run",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
await self._run(
|
||||
tag=str(self.sys_plugins.audio.version),
|
||||
init=False,
|
||||
ipv4=self.sys_docker.network.audio,
|
||||
@@ -112,8 +109,6 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
},
|
||||
mounts=self.mounts,
|
||||
)
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting Audio %s with version %s - %s",
|
||||
self.image,
|
||||
|
@@ -2,6 +2,9 @@
|
||||
import logging
|
||||
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerJobError
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from .const import ENV_TIME, ENV_TOKEN
|
||||
from .interface import DockerInterface
|
||||
|
||||
@@ -23,20 +26,14 @@ class DockerCli(DockerInterface, CoreSysAttributes):
|
||||
"""Return name of Docker container."""
|
||||
return CLI_DOCKER_NAME
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
self.image,
|
||||
@Job(
|
||||
name="docker_cli_run",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
await self._run(
|
||||
entrypoint=["/init"],
|
||||
tag=str(self.sys_plugins.cli.version),
|
||||
init=False,
|
||||
@@ -54,8 +51,6 @@ class DockerCli(DockerInterface, CoreSysAttributes):
|
||||
ENV_TOKEN: self.sys_plugins.cli.supervisor_token,
|
||||
},
|
||||
)
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting CLI %s with version %s - %s",
|
||||
self.image,
|
||||
|
@@ -1,12 +1,12 @@
|
||||
"""Docker constants."""
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
|
||||
from docker.types import Mount
|
||||
|
||||
from ..const import MACHINE_ID
|
||||
|
||||
|
||||
class Capabilities(str, Enum):
|
||||
class Capabilities(StrEnum):
|
||||
"""Linux Capabilities."""
|
||||
|
||||
BPF = "BPF"
|
||||
@@ -24,7 +24,7 @@ class Capabilities(str, Enum):
|
||||
SYS_TIME = "SYS_TIME"
|
||||
|
||||
|
||||
class ContainerState(str, Enum):
|
||||
class ContainerState(StrEnum):
|
||||
"""State of supervisor managed docker container."""
|
||||
|
||||
FAILED = "failed"
|
||||
@@ -35,7 +35,7 @@ class ContainerState(str, Enum):
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
class RestartPolicy(str, Enum):
|
||||
class RestartPolicy(StrEnum):
|
||||
"""Restart policy of container."""
|
||||
|
||||
NO = "no"
|
||||
@@ -44,7 +44,7 @@ class RestartPolicy(str, Enum):
|
||||
ALWAYS = "always"
|
||||
|
||||
|
||||
class MountType(str, Enum):
|
||||
class MountType(StrEnum):
|
||||
"""Mount type."""
|
||||
|
||||
BIND = "bind"
|
||||
@@ -53,7 +53,7 @@ class MountType(str, Enum):
|
||||
NPIPE = "npipe"
|
||||
|
||||
|
||||
class PropagationMode(str, Enum):
|
||||
class PropagationMode(StrEnum):
|
||||
"""Propagataion mode, only for bind type mounts."""
|
||||
|
||||
PRIVATE = "private"
|
||||
@@ -71,23 +71,21 @@ ENV_TOKEN_OLD = "HASSIO_TOKEN"
|
||||
LABEL_MANAGED = "supervisor_managed"
|
||||
|
||||
MOUNT_DBUS = Mount(
|
||||
type=MountType.BIND.value, source="/run/dbus", target="/run/dbus", read_only=True
|
||||
)
|
||||
MOUNT_DEV = Mount(
|
||||
type=MountType.BIND.value, source="/dev", target="/dev", read_only=True
|
||||
type=MountType.BIND, source="/run/dbus", target="/run/dbus", read_only=True
|
||||
)
|
||||
MOUNT_DEV = Mount(type=MountType.BIND, source="/dev", target="/dev", read_only=True)
|
||||
MOUNT_DOCKER = Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source="/run/docker.sock",
|
||||
target="/run/docker.sock",
|
||||
read_only=True,
|
||||
)
|
||||
MOUNT_MACHINE_ID = Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=MACHINE_ID.as_posix(),
|
||||
target=MACHINE_ID.as_posix(),
|
||||
read_only=True,
|
||||
)
|
||||
MOUNT_UDEV = Mount(
|
||||
type=MountType.BIND.value, source="/run/udev", target="/run/udev", read_only=True
|
||||
type=MountType.BIND, source="/run/udev", target="/run/udev", read_only=True
|
||||
)
|
||||
|
@@ -4,6 +4,9 @@ import logging
|
||||
from docker.types import Mount
|
||||
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerJobError
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from .const import ENV_TIME, MOUNT_DBUS, MountType
|
||||
from .interface import DockerInterface
|
||||
|
||||
@@ -25,20 +28,14 @@ class DockerDNS(DockerInterface, CoreSysAttributes):
|
||||
"""Return name of Docker container."""
|
||||
return DNS_DOCKER_NAME
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
self.image,
|
||||
@Job(
|
||||
name="docker_dns_run",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
await self._run(
|
||||
tag=str(self.sys_plugins.dns.version),
|
||||
init=False,
|
||||
dns=False,
|
||||
@@ -50,7 +47,7 @@ class DockerDNS(DockerInterface, CoreSysAttributes):
|
||||
environment={ENV_TIME: self.sys_timezone},
|
||||
mounts=[
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_dns.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
@@ -59,8 +56,6 @@ class DockerDNS(DockerInterface, CoreSysAttributes):
|
||||
],
|
||||
oom_score_adj=-300,
|
||||
)
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting DNS %s with version %s - %s",
|
||||
self.image,
|
||||
|
@@ -4,14 +4,14 @@ from ipaddress import IPv4Address
|
||||
import logging
|
||||
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
import docker
|
||||
from docker.types import Mount
|
||||
import requests
|
||||
|
||||
from ..const import LABEL_MACHINE, MACHINE_ID
|
||||
from ..exceptions import DockerError
|
||||
from ..exceptions import DockerJobError
|
||||
from ..hardware.const import PolicyGroup
|
||||
from ..homeassistant.const import LANDINGPAGE
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from .const import (
|
||||
ENV_TIME,
|
||||
ENV_TOKEN,
|
||||
@@ -53,9 +53,10 @@ class DockerHomeAssistant(DockerInterface):
|
||||
@property
|
||||
def timeout(self) -> int:
|
||||
"""Return timeout for Docker actions."""
|
||||
# Synchronized homeassistant's S6_SERVICES_GRACETIME
|
||||
# to avoid killing Home Assistant Core
|
||||
return 220 + 20
|
||||
# Synchronized with the homeassistant core container's S6_SERVICES_GRACETIME
|
||||
# to avoid killing Home Assistant Core, see
|
||||
# https://github.com/home-assistant/core/tree/dev/Dockerfile
|
||||
return 240 + 20
|
||||
|
||||
@property
|
||||
def ip_address(self) -> IPv4Address:
|
||||
@@ -66,10 +67,14 @@ class DockerHomeAssistant(DockerInterface):
|
||||
def cgroups_rules(self) -> list[str]:
|
||||
"""Return a list of needed cgroups permission."""
|
||||
return (
|
||||
self.sys_hardware.policy.get_cgroups_rules(PolicyGroup.UART)
|
||||
+ self.sys_hardware.policy.get_cgroups_rules(PolicyGroup.VIDEO)
|
||||
+ self.sys_hardware.policy.get_cgroups_rules(PolicyGroup.GPIO)
|
||||
+ self.sys_hardware.policy.get_cgroups_rules(PolicyGroup.USB)
|
||||
[]
|
||||
if self.sys_homeassistant.version == LANDINGPAGE
|
||||
else (
|
||||
self.sys_hardware.policy.get_cgroups_rules(PolicyGroup.UART)
|
||||
+ self.sys_hardware.policy.get_cgroups_rules(PolicyGroup.VIDEO)
|
||||
+ self.sys_hardware.policy.get_cgroups_rules(PolicyGroup.GPIO)
|
||||
+ self.sys_hardware.policy.get_cgroups_rules(PolicyGroup.USB)
|
||||
)
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -79,79 +84,81 @@ class DockerHomeAssistant(DockerInterface):
|
||||
MOUNT_DEV,
|
||||
MOUNT_DBUS,
|
||||
MOUNT_UDEV,
|
||||
# Add folders
|
||||
# HA config folder
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target="/ssl",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target="/share",
|
||||
read_only=False,
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_media.as_posix(),
|
||||
target="/media",
|
||||
read_only=False,
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
),
|
||||
# Configuration audio
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_homeassistant.path_extern_pulse.as_posix(),
|
||||
target="/etc/pulse/client.conf",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
||||
target="/run/audio",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
||||
target="/etc/asound.conf",
|
||||
read_only=True,
|
||||
),
|
||||
]
|
||||
|
||||
# Landingpage does not need all this access
|
||||
if self.sys_homeassistant.version != LANDINGPAGE:
|
||||
mounts.extend(
|
||||
[
|
||||
# All other folders
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target="/ssl",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target="/share",
|
||||
read_only=False,
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_media.as_posix(),
|
||||
target="/media",
|
||||
read_only=False,
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
),
|
||||
# Configuration audio
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_homeassistant.path_extern_pulse.as_posix(),
|
||||
target="/etc/pulse/client.conf",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
||||
target="/run/audio",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
||||
target="/etc/asound.conf",
|
||||
read_only=True,
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
# Machine ID
|
||||
if MACHINE_ID.exists():
|
||||
mounts.append(MOUNT_MACHINE_ID)
|
||||
|
||||
return mounts
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
self.image,
|
||||
@Job(
|
||||
name="docker_home_assistant_run",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
await self._run(
|
||||
tag=(self.sys_homeassistant.version),
|
||||
name=self.name,
|
||||
hostname=self.name,
|
||||
detach=True,
|
||||
privileged=True,
|
||||
privileged=self.sys_homeassistant.version != LANDINGPAGE,
|
||||
init=False,
|
||||
security_opt=self.security_opt,
|
||||
network_mode="host",
|
||||
@@ -171,18 +178,19 @@ class DockerHomeAssistant(DockerInterface):
|
||||
tmpfs={"/tmp": ""},
|
||||
oom_score_adj=-300,
|
||||
)
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting Home Assistant %s with version %s", self.image, self.version
|
||||
)
|
||||
|
||||
def _execute_command(self, command: str) -> CommandReturn:
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
return self.sys_docker.run_command(
|
||||
@Job(
|
||||
name="docker_home_assistant_execute_command",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def execute_command(self, command: str) -> CommandReturn:
|
||||
"""Create a temporary container and run command."""
|
||||
return await self.sys_run_in_executor(
|
||||
self.sys_docker.run_command,
|
||||
self.image,
|
||||
version=self.sys_homeassistant.version,
|
||||
command=command,
|
||||
@@ -194,19 +202,19 @@ class DockerHomeAssistant(DockerInterface):
|
||||
stderr=True,
|
||||
mounts=[
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target="/ssl",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND.value,
|
||||
type=MountType.BIND,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target="/share",
|
||||
read_only=False,
|
||||
@@ -217,34 +225,14 @@ class DockerHomeAssistant(DockerInterface):
|
||||
|
||||
def is_initialize(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker container exists."""
|
||||
return self.sys_run_in_executor(self._is_initialize)
|
||||
return self.sys_run_in_executor(
|
||||
self.sys_docker.container_is_initialized,
|
||||
self.name,
|
||||
self.image,
|
||||
self.sys_homeassistant.version,
|
||||
)
|
||||
|
||||
def _is_initialize(self) -> bool:
|
||||
"""Return True if docker container exists.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_image = self.sys_docker.images.get(
|
||||
f"{self.image}:{self.sys_homeassistant.version}"
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return False
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
return DockerError()
|
||||
|
||||
# we run on an old image, stop and start it
|
||||
if docker_container.image.id != docker_image.id:
|
||||
return False
|
||||
|
||||
# Check of correct state
|
||||
if docker_container.status not in ("exited", "running", "created"):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _validate_trust(
|
||||
async def _validate_trust(
|
||||
self, image_id: str, image: str, version: AwesomeVersion
|
||||
) -> None:
|
||||
"""Validate trust of content."""
|
||||
@@ -254,4 +242,4 @@ class DockerHomeAssistant(DockerInterface):
|
||||
except AwesomeVersionCompareException:
|
||||
return
|
||||
|
||||
super()._validate_trust(image_id, image, version)
|
||||
await super()._validate_trust(image_id, image, version)
|
||||
|
@@ -1,13 +1,14 @@
|
||||
"""Interface class for Supervisor Docker object."""
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections import defaultdict
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
import re
|
||||
from time import time
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
from awesomeversion.strategy import AwesomeVersionStrategy
|
||||
@@ -24,18 +25,21 @@ from ..const import (
|
||||
BusEvent,
|
||||
CpuArch,
|
||||
)
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..coresys import CoreSys
|
||||
from ..exceptions import (
|
||||
CodeNotaryError,
|
||||
CodeNotaryUntrusted,
|
||||
DockerAPIError,
|
||||
DockerError,
|
||||
DockerJobError,
|
||||
DockerNotFound,
|
||||
DockerRequestError,
|
||||
DockerTrustError,
|
||||
)
|
||||
from ..jobs.const import JOB_GROUP_DOCKER_INTERFACE, JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..jobs.job_group import JobGroup
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..utils import process_lock
|
||||
from ..utils.sentry import capture_exception
|
||||
from .const import ContainerState, RestartPolicy
|
||||
from .manager import CommandReturn
|
||||
@@ -73,14 +77,20 @@ def _container_state_from_model(docker_container: Container) -> ContainerState:
|
||||
return ContainerState.STOPPED
|
||||
|
||||
|
||||
class DockerInterface(CoreSysAttributes):
|
||||
class DockerInterface(JobGroup):
|
||||
"""Docker Supervisor interface."""
|
||||
|
||||
def __init__(self, coresys: CoreSys):
|
||||
"""Initialize Docker base wrapper."""
|
||||
super().__init__(
|
||||
coresys,
|
||||
JOB_GROUP_DOCKER_INTERFACE.format_map(
|
||||
defaultdict(str, name=self.name or uuid4().hex)
|
||||
),
|
||||
self.name,
|
||||
)
|
||||
self.coresys: CoreSys = coresys
|
||||
self._meta: dict[str, Any] | None = None
|
||||
self.lock: asyncio.Lock = asyncio.Lock()
|
||||
|
||||
@property
|
||||
def timeout(self) -> int:
|
||||
@@ -141,7 +151,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
@property
|
||||
def in_progress(self) -> bool:
|
||||
"""Return True if a task is in progress."""
|
||||
return self.lock.locked()
|
||||
return self.active_job
|
||||
|
||||
@property
|
||||
def restart_policy(self) -> RestartPolicy | None:
|
||||
@@ -193,7 +203,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
return credentials
|
||||
|
||||
def _docker_login(self, image: str) -> None:
|
||||
async def _docker_login(self, image: str) -> None:
|
||||
"""Try to log in to the registry if there are credentials available."""
|
||||
if not self.sys_docker.config.registries:
|
||||
return
|
||||
@@ -202,30 +212,21 @@ class DockerInterface(CoreSysAttributes):
|
||||
if not credentials:
|
||||
return
|
||||
|
||||
self.sys_docker.docker.login(**credentials)
|
||||
await self.sys_run_in_executor(self.sys_docker.docker.login, **credentials)
|
||||
|
||||
@process_lock
|
||||
def install(
|
||||
self,
|
||||
version: AwesomeVersion,
|
||||
image: str | None = None,
|
||||
latest: bool = False,
|
||||
arch: CpuArch | None = None,
|
||||
):
|
||||
"""Pull docker image."""
|
||||
return self.sys_run_in_executor(self._install, version, image, latest, arch)
|
||||
|
||||
def _install(
|
||||
@Job(
|
||||
name="docker_interface_install",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def install(
|
||||
self,
|
||||
version: AwesomeVersion,
|
||||
image: str | None = None,
|
||||
latest: bool = False,
|
||||
arch: CpuArch | None = None,
|
||||
) -> None:
|
||||
"""Pull Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
"""Pull docker image."""
|
||||
image = image or self.image
|
||||
arch = arch or self.sys_arch.supervisor
|
||||
|
||||
@@ -233,21 +234,24 @@ class DockerInterface(CoreSysAttributes):
|
||||
try:
|
||||
if self.sys_docker.config.registries:
|
||||
# Try login if we have defined credentials
|
||||
self._docker_login(image)
|
||||
await self._docker_login(image)
|
||||
|
||||
# Pull new image
|
||||
docker_image = self.sys_docker.images.pull(
|
||||
docker_image = await self.sys_run_in_executor(
|
||||
self.sys_docker.images.pull,
|
||||
f"{image}:{version!s}",
|
||||
platform=MAP_ARCH[arch],
|
||||
)
|
||||
|
||||
# Validate content
|
||||
try:
|
||||
self._validate_trust(docker_image.id, image, version)
|
||||
await self._validate_trust(docker_image.id, image, version)
|
||||
except CodeNotaryError:
|
||||
with suppress(docker.errors.DockerException):
|
||||
self.sys_docker.images.remove(
|
||||
image=f"{image}:{version!s}", force=True
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.images.remove,
|
||||
image=f"{image}:{version!s}",
|
||||
force=True,
|
||||
)
|
||||
raise
|
||||
|
||||
@@ -256,7 +260,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
_LOGGER.info(
|
||||
"Tagging image %s with version %s as latest", image, version
|
||||
)
|
||||
docker_image.tag(image, tag="latest")
|
||||
await self.sys_run_in_executor(docker_image.tag, image, tag="latest")
|
||||
except docker.errors.APIError as err:
|
||||
if err.status_code == 429:
|
||||
self.sys_resolution.create_issue(
|
||||
@@ -289,34 +293,21 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
self._meta = docker_image.attrs
|
||||
|
||||
def exists(self) -> Awaitable[bool]:
|
||||
async def exists(self) -> bool:
|
||||
"""Return True if Docker image exists in local repository."""
|
||||
return self.sys_run_in_executor(self._exists)
|
||||
|
||||
def _exists(self) -> bool:
|
||||
"""Return True if Docker image exists in local repository.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
self.sys_docker.images.get(f"{self.image}:{self.version!s}")
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.images.get, f"{self.image}:{self.version!s}"
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker is running.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.sys_run_in_executor(self._is_running)
|
||||
|
||||
def _is_running(self) -> bool:
|
||||
"""Return True if Docker is running.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
async def is_running(self) -> bool:
|
||||
"""Return True if Docker is running."""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return False
|
||||
except docker.errors.DockerException as err:
|
||||
@@ -326,20 +317,12 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
return docker_container.status == "running"
|
||||
|
||||
def current_state(self) -> Awaitable[ContainerState]:
|
||||
"""Return current state of container.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.sys_run_in_executor(self._current_state)
|
||||
|
||||
def _current_state(self) -> ContainerState:
|
||||
"""Return current state of container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
async def current_state(self) -> ContainerState:
|
||||
"""Return current state of container."""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return ContainerState.UNKNOWN
|
||||
except docker.errors.DockerException as err:
|
||||
@@ -349,22 +332,15 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
return _container_state_from_model(docker_container)
|
||||
|
||||
@process_lock
|
||||
def attach(
|
||||
@Job(name="docker_interface_attach", limit=JobExecutionLimit.GROUP_WAIT)
|
||||
async def attach(
|
||||
self, version: AwesomeVersion, *, skip_state_event_if_down: bool = False
|
||||
) -> Awaitable[None]:
|
||||
"""Attach to running Docker container."""
|
||||
return self.sys_run_in_executor(self._attach, version, skip_state_event_if_down)
|
||||
|
||||
def _attach(
|
||||
self, version: AwesomeVersion, skip_state_event_if_down: bool = False
|
||||
) -> None:
|
||||
"""Attach to running docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
"""Attach to running Docker container."""
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
self._meta = docker_container.attrs
|
||||
self.sys_docker.monitor.watch_container(docker_container)
|
||||
|
||||
@@ -374,8 +350,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
and state in [ContainerState.STOPPED, ContainerState.FAILED]
|
||||
):
|
||||
# Fire event with current state of container
|
||||
self.sys_loop.call_soon_threadsafe(
|
||||
self.sys_bus.fire_event,
|
||||
self.sys_bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
self.name, state, docker_container.id, int(time())
|
||||
@@ -393,114 +368,85 @@ class DockerInterface(CoreSysAttributes):
|
||||
raise DockerError()
|
||||
_LOGGER.info("Attaching to %s with version %s", self.image, self.version)
|
||||
|
||||
@process_lock
|
||||
def run(self) -> Awaitable[None]:
|
||||
@Job(
|
||||
name="docker_interface_run",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
return self.sys_run_in_executor(self._run)
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@process_lock
|
||||
def stop(self, remove_container=True) -> Awaitable[None]:
|
||||
"""Stop/remove Docker container."""
|
||||
return self.sys_run_in_executor(self._stop, remove_container)
|
||||
|
||||
def _stop(self, remove_container=True) -> None:
|
||||
"""Stop/remove Docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except docker.errors.NotFound:
|
||||
async def _run(self, **kwargs) -> None:
|
||||
"""Run Docker image with retry inf necessary."""
|
||||
if await self.is_running():
|
||||
return
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
if docker_container.status == "running":
|
||||
_LOGGER.info("Stopping %s application", self.name)
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
docker_container.stop(timeout=self.timeout)
|
||||
# Cleanup
|
||||
await self.stop()
|
||||
|
||||
if remove_container:
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
_LOGGER.info("Cleaning %s application", self.name)
|
||||
docker_container.remove(force=True)
|
||||
# Create & Run container
|
||||
try:
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.run, self.image, **kwargs
|
||||
)
|
||||
except DockerNotFound as err:
|
||||
# If image is missing, capture the exception as this shouldn't happen
|
||||
capture_exception(err)
|
||||
raise
|
||||
|
||||
@process_lock
|
||||
# Store metadata
|
||||
self._meta = docker_container.attrs
|
||||
|
||||
@Job(
|
||||
name="docker_interface_stop",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def stop(self, remove_container: bool = True) -> None:
|
||||
"""Stop/remove Docker container."""
|
||||
with suppress(DockerNotFound):
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.stop_container,
|
||||
self.name,
|
||||
self.timeout,
|
||||
remove_container,
|
||||
)
|
||||
|
||||
@Job(
|
||||
name="docker_interface_start",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
def start(self) -> Awaitable[None]:
|
||||
"""Start Docker container."""
|
||||
return self.sys_run_in_executor(self._start)
|
||||
return self.sys_run_in_executor(self.sys_docker.start_container, self.name)
|
||||
|
||||
def _start(self) -> None:
|
||||
"""Start docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"{self.name} not found for starting up", _LOGGER.error
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Starting %s", self.name)
|
||||
try:
|
||||
docker_container.start()
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(f"Can't start {self.name}: {err}", _LOGGER.error) from err
|
||||
|
||||
@process_lock
|
||||
def remove(self) -> Awaitable[None]:
|
||||
@Job(
|
||||
name="docker_interface_remove",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def remove(self) -> None:
|
||||
"""Remove Docker images."""
|
||||
return self.sys_run_in_executor(self._remove)
|
||||
|
||||
def _remove(self) -> None:
|
||||
"""Remove docker images.
|
||||
|
||||
Needs run inside executor.
|
||||
"""
|
||||
# Cleanup container
|
||||
with suppress(DockerError):
|
||||
self._stop()
|
||||
|
||||
_LOGGER.info("Removing image %s with latest and %s", self.image, self.version)
|
||||
|
||||
try:
|
||||
with suppress(docker.errors.ImageNotFound):
|
||||
self.sys_docker.images.remove(image=f"{self.image}:latest", force=True)
|
||||
|
||||
with suppress(docker.errors.ImageNotFound):
|
||||
self.sys_docker.images.remove(
|
||||
image=f"{self.image}:{self.version!s}", force=True
|
||||
)
|
||||
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't remove image {self.image}: {err}", _LOGGER.warning
|
||||
) from err
|
||||
await self.stop()
|
||||
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.remove_image, self.image, self.version
|
||||
)
|
||||
self._meta = None
|
||||
|
||||
@process_lock
|
||||
def update(
|
||||
self, version: AwesomeVersion, image: str | None = None, latest: bool = False
|
||||
) -> Awaitable[None]:
|
||||
"""Update a Docker image."""
|
||||
return self.sys_run_in_executor(self._update, version, image, latest)
|
||||
|
||||
def _update(
|
||||
@Job(
|
||||
name="docker_interface_update",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def update(
|
||||
self, version: AwesomeVersion, image: str | None = None, latest: bool = False
|
||||
) -> None:
|
||||
"""Update a docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
"""Update a Docker image."""
|
||||
image = image or self.image
|
||||
|
||||
_LOGGER.info(
|
||||
@@ -508,163 +454,69 @@ class DockerInterface(CoreSysAttributes):
|
||||
)
|
||||
|
||||
# Update docker image
|
||||
self._install(version, image=image, latest=latest)
|
||||
await self.install(version, image=image, latest=latest)
|
||||
|
||||
# Stop container & cleanup
|
||||
with suppress(DockerError):
|
||||
self._stop()
|
||||
await self.stop()
|
||||
|
||||
def logs(self) -> Awaitable[bytes]:
|
||||
"""Return Docker logs of container.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.sys_run_in_executor(self._logs)
|
||||
|
||||
def _logs(self) -> bytes:
|
||||
"""Return Docker logs of container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
return b""
|
||||
|
||||
try:
|
||||
return docker_container.logs(tail=100, stdout=True, stderr=True)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.warning("Can't grep logs from %s: %s", self.image, err)
|
||||
async def logs(self) -> bytes:
|
||||
"""Return Docker logs of container."""
|
||||
with suppress(DockerError):
|
||||
return await self.sys_run_in_executor(
|
||||
self.sys_docker.container_logs, self.name
|
||||
)
|
||||
|
||||
return b""
|
||||
|
||||
@process_lock
|
||||
def cleanup(self, old_image: str | None = None) -> Awaitable[None]:
|
||||
@Job(name="docker_interface_cleanup", limit=JobExecutionLimit.GROUP_WAIT)
|
||||
def cleanup(
|
||||
self,
|
||||
old_image: str | None = None,
|
||||
image: str | None = None,
|
||||
version: AwesomeVersion | None = None,
|
||||
) -> Awaitable[None]:
|
||||
"""Check if old version exists and cleanup."""
|
||||
return self.sys_run_in_executor(self._cleanup, old_image)
|
||||
return self.sys_run_in_executor(
|
||||
self.sys_docker.cleanup_old_images,
|
||||
image or self.image,
|
||||
version or self.version,
|
||||
{old_image} if old_image else None,
|
||||
)
|
||||
|
||||
def _cleanup(self, old_image: str | None = None) -> None:
|
||||
"""Check if old version exists and cleanup.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
origin = self.sys_docker.images.get(f"{self.image}:{self.version!s}")
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't find {self.image} for cleanup", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
# Cleanup Current
|
||||
try:
|
||||
images_list = self.sys_docker.images.list(name=self.image)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Corrupt docker overlayfs found: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
for image in images_list:
|
||||
if origin.id == image.id:
|
||||
continue
|
||||
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
_LOGGER.info("Cleanup images: %s", image.tags)
|
||||
self.sys_docker.images.remove(image.id, force=True)
|
||||
|
||||
# Cleanup Old
|
||||
if not old_image or self.image == old_image:
|
||||
return
|
||||
|
||||
try:
|
||||
images_list = self.sys_docker.images.list(name=old_image)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Corrupt docker overlayfs found: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
for image in images_list:
|
||||
if origin.id == image.id:
|
||||
continue
|
||||
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
_LOGGER.info("Cleanup images: %s", image.tags)
|
||||
self.sys_docker.images.remove(image.id, force=True)
|
||||
|
||||
@process_lock
|
||||
@Job(
|
||||
name="docker_interface_restart",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
def restart(self) -> Awaitable[None]:
|
||||
"""Restart docker container."""
|
||||
return self.sys_loop.run_in_executor(None, self._restart)
|
||||
return self.sys_run_in_executor(
|
||||
self.sys_docker.restart_container, self.name, self.timeout
|
||||
)
|
||||
|
||||
def _restart(self) -> None:
|
||||
"""Restart docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Restarting %s", self.image)
|
||||
try:
|
||||
container.restart(timeout=self.timeout)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't restart {self.image}: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
@process_lock
|
||||
def execute_command(self, command: str) -> Awaitable[CommandReturn]:
|
||||
@Job(
|
||||
name="docker_interface_execute_command",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def execute_command(self, command: str) -> CommandReturn:
|
||||
"""Create a temporary container and run command."""
|
||||
return self.sys_run_in_executor(self._execute_command, command)
|
||||
|
||||
def _execute_command(self, command: str) -> CommandReturn:
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def stats(self) -> Awaitable[DockerStats]:
|
||||
async def stats(self) -> DockerStats:
|
||||
"""Read and return stats from container."""
|
||||
return self.sys_run_in_executor(self._stats)
|
||||
stats = await self.sys_run_in_executor(
|
||||
self.sys_docker.container_stats, self.name
|
||||
)
|
||||
return DockerStats(stats)
|
||||
|
||||
def _stats(self) -> DockerStats:
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
async def is_failed(self) -> bool:
|
||||
"""Return True if Docker is failing state."""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
# container is not running
|
||||
if docker_container.status != "running":
|
||||
raise DockerError(f"Container {self.name} is not running", _LOGGER.error)
|
||||
|
||||
try:
|
||||
stats = docker_container.stats(stream=False)
|
||||
return DockerStats(stats)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't read stats from {self.name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
def is_failed(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker is failing state.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.sys_run_in_executor(self._is_failed)
|
||||
|
||||
def _is_failed(self) -> bool:
|
||||
"""Return True if Docker is failing state.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
return False
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
@@ -677,18 +529,13 @@ class DockerInterface(CoreSysAttributes):
|
||||
# Check return value
|
||||
return int(docker_container.attrs["State"]["ExitCode"]) != 0
|
||||
|
||||
def get_latest_version(self) -> Awaitable[AwesomeVersion]:
|
||||
async def get_latest_version(self) -> AwesomeVersion:
|
||||
"""Return latest version of local image."""
|
||||
return self.sys_run_in_executor(self._get_latest_version)
|
||||
|
||||
def _get_latest_version(self) -> AwesomeVersion:
|
||||
"""Return latest version of local image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
available_version: list[AwesomeVersion] = []
|
||||
try:
|
||||
for image in self.sys_docker.images.list(self.image):
|
||||
for image in await self.sys_run_in_executor(
|
||||
self.sys_docker.images.list, self.image
|
||||
):
|
||||
for tag in image.tags:
|
||||
version = AwesomeVersion(tag.partition(":")[2])
|
||||
if version.strategy == AwesomeVersionStrategy.UNKNOWN:
|
||||
@@ -713,51 +560,36 @@ class DockerInterface(CoreSysAttributes):
|
||||
available_version.sort(reverse=True)
|
||||
return available_version[0]
|
||||
|
||||
@process_lock
|
||||
@Job(
|
||||
name="docker_interface_run_inside",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
def run_inside(self, command: str) -> Awaitable[CommandReturn]:
|
||||
"""Execute a command inside Docker container."""
|
||||
return self.sys_run_in_executor(self._run_inside, command)
|
||||
return self.sys_run_in_executor(
|
||||
self.sys_docker.container_run_inside, self.name, command
|
||||
)
|
||||
|
||||
def _run_inside(self, command: str) -> CommandReturn:
|
||||
"""Execute a command inside Docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except docker.errors.NotFound:
|
||||
raise DockerNotFound() from None
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
# Execute
|
||||
try:
|
||||
code, output = docker_container.exec_run(command)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
return CommandReturn(code, output)
|
||||
|
||||
def _validate_trust(
|
||||
async def _validate_trust(
|
||||
self, image_id: str, image: str, version: AwesomeVersion
|
||||
) -> None:
|
||||
"""Validate trust of content."""
|
||||
checksum = image_id.partition(":")[2]
|
||||
job = asyncio.run_coroutine_threadsafe(
|
||||
self.sys_security.verify_own_content(checksum), self.sys_loop
|
||||
)
|
||||
job.result()
|
||||
return await self.sys_security.verify_own_content(checksum)
|
||||
|
||||
@process_lock
|
||||
def check_trust(self) -> Awaitable[None]:
|
||||
@Job(
|
||||
name="docker_interface_check_trust",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def check_trust(self) -> None:
|
||||
"""Check trust of exists Docker image."""
|
||||
return self.sys_run_in_executor(self._check_trust)
|
||||
|
||||
def _check_trust(self) -> None:
|
||||
"""Check trust of current image."""
|
||||
try:
|
||||
image = self.sys_docker.images.get(f"{self.image}:{self.version!s}")
|
||||
image = await self.sys_run_in_executor(
|
||||
self.sys_docker.images.get, f"{self.image}:{self.version!s}"
|
||||
)
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
return
|
||||
|
||||
self._validate_trust(image.id, self.image, self.version)
|
||||
await self._validate_trust(image.id, self.image, self.version)
|
||||
|
@@ -11,8 +11,9 @@ from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
from docker import errors as docker_errors
|
||||
from docker.api.client import APIClient
|
||||
from docker.client import DockerClient
|
||||
from docker.errors import DockerException, ImageNotFound, NotFound
|
||||
from docker.models.containers import Container, ContainerCollection
|
||||
from docker.models.images import ImageCollection
|
||||
from docker.models.images import Image, ImageCollection
|
||||
from docker.models.networks import Network
|
||||
from docker.types.daemon import CancellableStream
|
||||
import requests
|
||||
@@ -351,3 +352,224 @@ class DockerAPI:
|
||||
|
||||
with suppress(docker_errors.DockerException, requests.RequestException):
|
||||
network.disconnect(data.get("Name", cid), force=True)
|
||||
|
||||
def container_is_initialized(
|
||||
self, name: str, image: str, version: AwesomeVersion
|
||||
) -> bool:
|
||||
"""Return True if docker container exists in good state and is built from expected image."""
|
||||
try:
|
||||
docker_container = self.containers.get(name)
|
||||
docker_image = self.images.get(f"{image}:{version}")
|
||||
except NotFound:
|
||||
return False
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
# Check the image is correct and state is good
|
||||
return (
|
||||
docker_container.image.id == docker_image.id
|
||||
and docker_container.status in ("exited", "running", "created")
|
||||
)
|
||||
|
||||
def stop_container(
|
||||
self, name: str, timeout: int, remove_container: bool = True
|
||||
) -> None:
|
||||
"""Stop/remove Docker container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except NotFound:
|
||||
raise DockerNotFound() from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
if docker_container.status == "running":
|
||||
_LOGGER.info("Stopping %s application", name)
|
||||
with suppress(DockerException, requests.RequestException):
|
||||
docker_container.stop(timeout=timeout)
|
||||
|
||||
if remove_container:
|
||||
with suppress(DockerException, requests.RequestException):
|
||||
_LOGGER.info("Cleaning %s application", name)
|
||||
docker_container.remove(force=True)
|
||||
|
||||
def start_container(self, name: str) -> None:
|
||||
"""Start Docker container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except NotFound:
|
||||
raise DockerNotFound(
|
||||
f"{name} not found for starting up", _LOGGER.error
|
||||
) from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Could not get {name} for starting up", _LOGGER.error
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Starting %s", name)
|
||||
try:
|
||||
docker_container.start()
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(f"Can't start {name}: {err}", _LOGGER.error) from err
|
||||
|
||||
def restart_container(self, name: str, timeout: int) -> None:
|
||||
"""Restart docker container."""
|
||||
try:
|
||||
container: Container = self.containers.get(name)
|
||||
except NotFound:
|
||||
raise DockerNotFound() from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Restarting %s", name)
|
||||
try:
|
||||
container.restart(timeout=timeout)
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(f"Can't restart {name}: {err}", _LOGGER.warning) from err
|
||||
|
||||
def container_logs(self, name: str, tail: int = 100) -> bytes:
|
||||
"""Return Docker logs of container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except NotFound:
|
||||
raise DockerNotFound() from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
try:
|
||||
return docker_container.logs(tail=tail, stdout=True, stderr=True)
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't grep logs from {name}: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
def container_stats(self, name: str) -> dict[str, Any]:
|
||||
"""Read and return stats from container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except NotFound:
|
||||
raise DockerNotFound() from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
# container is not running
|
||||
if docker_container.status != "running":
|
||||
raise DockerError(f"Container {name} is not running", _LOGGER.error)
|
||||
|
||||
try:
|
||||
return docker_container.stats(stream=False)
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't read stats from {name}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
def container_run_inside(self, name: str, command: str) -> CommandReturn:
|
||||
"""Execute a command inside Docker container."""
|
||||
try:
|
||||
docker_container: Container = self.containers.get(name)
|
||||
except NotFound:
|
||||
raise DockerNotFound() from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
# Execute
|
||||
try:
|
||||
code, output = docker_container.exec_run(command)
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
return CommandReturn(code, output)
|
||||
|
||||
def remove_image(
|
||||
self, image: str, version: AwesomeVersion, latest: bool = True
|
||||
) -> None:
|
||||
"""Remove a Docker image by version and latest."""
|
||||
try:
|
||||
if latest:
|
||||
_LOGGER.info("Removing image %s with latest", image)
|
||||
with suppress(ImageNotFound):
|
||||
self.images.remove(image=f"{image}:latest", force=True)
|
||||
|
||||
_LOGGER.info("Removing image %s with %s", image, version)
|
||||
with suppress(ImageNotFound):
|
||||
self.images.remove(image=f"{image}:{version!s}", force=True)
|
||||
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't remove image {image}: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
def import_image(self, tar_file: Path) -> Image | None:
|
||||
"""Import a tar file as image."""
|
||||
try:
|
||||
with tar_file.open("rb") as read_tar:
|
||||
docker_image_list: list[Image] = self.images.load(read_tar)
|
||||
|
||||
if len(docker_image_list) != 1:
|
||||
_LOGGER.warning(
|
||||
"Unexpected image count %d while importing image from tar",
|
||||
len(docker_image_list),
|
||||
)
|
||||
return None
|
||||
return docker_image_list[0]
|
||||
except (DockerException, OSError) as err:
|
||||
raise DockerError(
|
||||
f"Can't import image from tar: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
def export_image(self, image: str, version: AwesomeVersion, tar_file: Path) -> None:
|
||||
"""Export current images into a tar file."""
|
||||
try:
|
||||
image = self.api.get_image(f"{image}:{version}")
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't fetch image {image}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Export image %s to %s", image, tar_file)
|
||||
try:
|
||||
with tar_file.open("wb") as write_tar:
|
||||
for chunk in image:
|
||||
write_tar.write(chunk)
|
||||
except (OSError, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't write tar file {tar_file}: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Export image %s done", image)
|
||||
|
||||
def cleanup_old_images(
|
||||
self,
|
||||
current_image: str,
|
||||
current_version: AwesomeVersion,
|
||||
old_images: set[str] | None = None,
|
||||
) -> None:
|
||||
"""Clean up old versions of an image."""
|
||||
try:
|
||||
current: Image = self.images.get(f"{current_image}:{current_version!s}")
|
||||
except ImageNotFound:
|
||||
raise DockerNotFound(
|
||||
f"{current_image} not found for cleanup", _LOGGER.warning
|
||||
) from None
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't get {current_image} for cleanup", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
# Cleanup old and current
|
||||
image_names = list(
|
||||
old_images | {current_image} if old_images else {current_image}
|
||||
)
|
||||
try:
|
||||
images_list = self.images.list(name=image_names)
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Corrupt docker overlayfs found: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
for image in images_list:
|
||||
if current.id == image.id:
|
||||
continue
|
||||
|
||||
with suppress(DockerException, requests.RequestException):
|
||||
_LOGGER.info("Cleanup images: %s", image.tags)
|
||||
self.images.remove(image.id, force=True)
|
||||
|
@@ -2,6 +2,9 @@
|
||||
import logging
|
||||
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerJobError
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from .const import ENV_TIME, Capabilities
|
||||
from .interface import DockerInterface
|
||||
|
||||
@@ -24,24 +27,18 @@ class DockerMulticast(DockerInterface, CoreSysAttributes):
|
||||
return MULTICAST_DOCKER_NAME
|
||||
|
||||
@property
|
||||
def capabilities(self) -> list[str]:
|
||||
def capabilities(self) -> list[Capabilities]:
|
||||
"""Generate needed capabilities."""
|
||||
return [Capabilities.NET_ADMIN.value]
|
||||
return [Capabilities.NET_ADMIN]
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
self.image,
|
||||
@Job(
|
||||
name="docker_multicast_run",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
await self._run(
|
||||
tag=str(self.sys_plugins.multicast.version),
|
||||
init=False,
|
||||
name=self.name,
|
||||
@@ -53,8 +50,6 @@ class DockerMulticast(DockerInterface, CoreSysAttributes):
|
||||
extra_hosts={"supervisor": self.sys_docker.network.supervisor},
|
||||
environment={ENV_TIME: self.sys_timezone},
|
||||
)
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting Multicast %s with version %s - Host", self.image, self.version
|
||||
)
|
||||
|
@@ -21,7 +21,7 @@ class DockerNetwork:
|
||||
def __init__(self, docker_client: docker.DockerClient):
|
||||
"""Initialize internal Supervisor network."""
|
||||
self.docker: docker.DockerClient = docker_client
|
||||
self.network: docker.models.networks.Network = self._get_network()
|
||||
self._network: docker.models.networks.Network = self._get_network()
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
@@ -29,18 +29,14 @@ class DockerNetwork:
|
||||
return DOCKER_NETWORK
|
||||
|
||||
@property
|
||||
def containers(self) -> list[docker.models.containers.Container]:
|
||||
"""Return of connected containers from network."""
|
||||
containers: list[docker.models.containers.Container] = []
|
||||
for cid, _ in self.network.attrs.get("Containers", {}).items():
|
||||
try:
|
||||
containers.append(self.docker.containers.get(cid))
|
||||
except docker.errors.NotFound:
|
||||
_LOGGER.warning("Docker network is corrupt! %s", cid)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Unknown error with container lookup %s", err)
|
||||
def network(self) -> docker.models.networks.Network:
|
||||
"""Return docker network."""
|
||||
return self._network
|
||||
|
||||
return containers
|
||||
@property
|
||||
def containers(self) -> list[str]:
|
||||
"""Return of connected containers from network."""
|
||||
return list(self.network.attrs.get("Containers", {}).keys())
|
||||
|
||||
@property
|
||||
def gateway(self) -> IPv4Address:
|
||||
|
@@ -3,6 +3,9 @@ import logging
|
||||
|
||||
from ..const import DOCKER_NETWORK_MASK
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerJobError
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from .const import ENV_TIME, ENV_TOKEN, MOUNT_DOCKER, RestartPolicy
|
||||
from .interface import DockerInterface
|
||||
|
||||
@@ -25,20 +28,14 @@ class DockerObserver(DockerInterface, CoreSysAttributes):
|
||||
"""Return name of Docker container."""
|
||||
return OBSERVER_DOCKER_NAME
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return
|
||||
|
||||
# Cleanup
|
||||
self._stop()
|
||||
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
self.image,
|
||||
@Job(
|
||||
name="docker_observer_run",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def run(self) -> None:
|
||||
"""Run Docker image."""
|
||||
await self._run(
|
||||
tag=str(self.sys_plugins.observer.version),
|
||||
init=False,
|
||||
ipv4=self.sys_docker.network.observer,
|
||||
@@ -46,7 +43,7 @@ class DockerObserver(DockerInterface, CoreSysAttributes):
|
||||
hostname=self.name.replace("_", "-"),
|
||||
detach=True,
|
||||
security_opt=self.security_opt,
|
||||
restart_policy={"Name": RestartPolicy.ALWAYS.value},
|
||||
restart_policy={"Name": RestartPolicy.ALWAYS},
|
||||
extra_hosts={"supervisor": self.sys_docker.network.supervisor},
|
||||
environment={
|
||||
ENV_TIME: self.sys_timezone,
|
||||
@@ -57,8 +54,6 @@ class DockerObserver(DockerInterface, CoreSysAttributes):
|
||||
ports={"80/tcp": 4357},
|
||||
oom_score_adj=-300,
|
||||
)
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Starting Observer %s with version %s - %s",
|
||||
self.image,
|
||||
|
@@ -8,15 +8,16 @@ from awesomeversion.awesomeversion import AwesomeVersion
|
||||
import docker
|
||||
import requests
|
||||
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerError
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from .const import PropagationMode
|
||||
from .interface import DockerInterface
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
class DockerSupervisor(DockerInterface):
|
||||
"""Docker Supervisor wrapper for Supervisor."""
|
||||
|
||||
@property
|
||||
@@ -38,20 +39,20 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
def host_mounts_available(self) -> bool:
|
||||
"""Return True if container can see mounts on host within its data directory."""
|
||||
return self._meta and any(
|
||||
mount.get("Propagation") == PropagationMode.SLAVE.value
|
||||
mount.get("Propagation") == PropagationMode.SLAVE
|
||||
for mount in self.meta_mounts
|
||||
if mount.get("Destination") == "/data"
|
||||
)
|
||||
|
||||
def _attach(
|
||||
self, version: AwesomeVersion, skip_state_event_if_down: bool = False
|
||||
@Job(name="docker_supervisor_attach", limit=JobExecutionLimit.GROUP_WAIT)
|
||||
async def attach(
|
||||
self, version: AwesomeVersion, *, skip_state_event_if_down: bool = False
|
||||
) -> None:
|
||||
"""Attach to running docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
"""Attach to running docker container."""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_container = await self.sys_run_in_executor(
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
|
||||
@@ -63,17 +64,19 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
)
|
||||
|
||||
# If already attach
|
||||
if docker_container in self.sys_docker.network.containers:
|
||||
if docker_container.id in self.sys_docker.network.containers:
|
||||
return
|
||||
|
||||
# Attach to network
|
||||
_LOGGER.info("Connecting Supervisor to hassio-network")
|
||||
self.sys_docker.network.attach_container(
|
||||
await self.sys_run_in_executor(
|
||||
self.sys_docker.network.attach_container,
|
||||
docker_container,
|
||||
alias=["supervisor"],
|
||||
ipv4=self.sys_docker.network.supervisor,
|
||||
)
|
||||
|
||||
@Job(name="docker_supervisor_retag", limit=JobExecutionLimit.GROUP_WAIT)
|
||||
def retag(self) -> Awaitable[None]:
|
||||
"""Retag latest image to version."""
|
||||
return self.sys_run_in_executor(self._retag)
|
||||
@@ -93,6 +96,7 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
f"Can't retag Supervisor version: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
@Job(name="docker_supervisor_update_start_tag", limit=JobExecutionLimit.GROUP_WAIT)
|
||||
def update_start_tag(self, image: str, version: AwesomeVersion) -> Awaitable[None]:
|
||||
"""Update start tag to new version."""
|
||||
return self.sys_run_in_executor(self._update_start_tag, image, version)
|
||||
|
@@ -36,6 +36,22 @@ class JobConditionException(JobException):
|
||||
"""Exception happening for job conditions."""
|
||||
|
||||
|
||||
class JobStartException(JobException):
|
||||
"""Exception occurred starting a job on in current asyncio task."""
|
||||
|
||||
|
||||
class JobNotFound(JobException):
|
||||
"""Exception for job not found."""
|
||||
|
||||
|
||||
class JobInvalidUpdate(JobException):
|
||||
"""Exception for invalid update to a job."""
|
||||
|
||||
|
||||
class JobGroupExecutionLimitExceeded(JobException):
|
||||
"""Exception when job group execution limit exceeded."""
|
||||
|
||||
|
||||
# HomeAssistant
|
||||
|
||||
|
||||
@@ -51,6 +67,10 @@ class HomeAssistantCrashError(HomeAssistantError):
|
||||
"""Error on crash of a Home Assistant startup."""
|
||||
|
||||
|
||||
class HomeAssistantStartupTimeout(HomeAssistantCrashError):
|
||||
"""Timeout waiting for Home Assistant successful startup."""
|
||||
|
||||
|
||||
class HomeAssistantAPIError(HomeAssistantError):
|
||||
"""Home Assistant API exception."""
|
||||
|
||||
@@ -315,6 +335,10 @@ class DBusNotConnectedError(HostNotSupportedError):
|
||||
"""D-Bus is not connected and call a method."""
|
||||
|
||||
|
||||
class DBusServiceUnkownError(HassioNotSupportedError):
|
||||
"""D-Bus service was not available."""
|
||||
|
||||
|
||||
class DBusInterfaceError(HassioNotSupportedError):
|
||||
"""D-Bus interface not connected."""
|
||||
|
||||
@@ -343,6 +367,10 @@ class DBusTimeoutError(DBusError):
|
||||
"""D-Bus call timed out."""
|
||||
|
||||
|
||||
class DBusNoReplyError(DBusError):
|
||||
"""D-Bus remote didn't reply/disconnected."""
|
||||
|
||||
|
||||
class DBusFatalError(DBusError):
|
||||
"""D-Bus call going wrong.
|
||||
|
||||
@@ -478,6 +506,10 @@ class DockerNotFound(DockerError):
|
||||
"""Docker object don't Exists."""
|
||||
|
||||
|
||||
class DockerJobError(DockerError, JobException):
|
||||
"""Error executing docker job."""
|
||||
|
||||
|
||||
# Hardware
|
||||
|
||||
|
||||
@@ -561,6 +593,14 @@ class HomeAssistantBackupError(BackupError, HomeAssistantError):
|
||||
"""Raise if an error during Home Assistant Core backup is happening."""
|
||||
|
||||
|
||||
class BackupInvalidError(BackupError):
|
||||
"""Raise if backup or password provided is invalid."""
|
||||
|
||||
|
||||
class BackupJobError(BackupError, JobException):
|
||||
"""Raise on Backup job error."""
|
||||
|
||||
|
||||
# Security
|
||||
|
||||
|
||||
@@ -593,3 +633,10 @@ class MountNotFound(MountError):
|
||||
|
||||
class MountJobError(MountError, JobException):
|
||||
"""Raise on Mount job error."""
|
||||
|
||||
|
||||
# Network
|
||||
|
||||
|
||||
class NetworkInterfaceNotFound(HassioError):
|
||||
"""Raise on network interface not found."""
|
||||
|
@@ -1,8 +1,8 @@
|
||||
"""Constants for hardware."""
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
|
||||
|
||||
class UdevSubsystem(str, Enum):
|
||||
class UdevSubsystem(StrEnum):
|
||||
"""Udev subsystem class."""
|
||||
|
||||
SERIAL = "tty"
|
||||
@@ -24,7 +24,7 @@ class UdevSubsystem(str, Enum):
|
||||
RPI_H264MEM = "rpivid-h264mem"
|
||||
|
||||
|
||||
class PolicyGroup(str, Enum):
|
||||
class PolicyGroup(StrEnum):
|
||||
"""Policy groups backend."""
|
||||
|
||||
UART = "uart"
|
||||
@@ -35,14 +35,14 @@ class PolicyGroup(str, Enum):
|
||||
BLUETOOTH = "bluetooth"
|
||||
|
||||
|
||||
class HardwareAction(str, Enum):
|
||||
class HardwareAction(StrEnum):
|
||||
"""Hardware device action."""
|
||||
|
||||
ADD = "add"
|
||||
REMOVE = "remove"
|
||||
|
||||
|
||||
class UdevKernelAction(str, Enum):
|
||||
class UdevKernelAction(StrEnum):
|
||||
"""Udev kernel device action."""
|
||||
|
||||
ADD = "add"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
"""Read hardware info from system."""
|
||||
from datetime import datetime
|
||||
from datetime import UTC, datetime
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import re
|
||||
@@ -55,7 +55,7 @@ class HwHelper(CoreSysAttributes):
|
||||
_LOGGER.error("Can't found last boot time!")
|
||||
return None
|
||||
|
||||
return datetime.utcfromtimestamp(int(found.group(1)))
|
||||
return datetime.fromtimestamp(int(found.group(1)), UTC)
|
||||
|
||||
def hide_virtual_device(self, udev_device: pyudev.Device) -> bool:
|
||||
"""Small helper to hide not needed Devices."""
|
||||
|
@@ -94,7 +94,7 @@ class HardwareManager(CoreSysAttributes):
|
||||
udev_device: pyudev.Device = pyudev.Devices.from_sys_path(
|
||||
self._udev, str(device.sysfs)
|
||||
)
|
||||
return udev_device.find_parent(subsystem.value) is not None
|
||||
return udev_device.find_parent(subsystem) is not None
|
||||
|
||||
def _import_devices(self) -> None:
|
||||
"""Import fresh from udev database."""
|
||||
|
@@ -7,16 +7,19 @@ from typing import Any, AsyncContextManager
|
||||
|
||||
import aiohttp
|
||||
from aiohttp import hdrs
|
||||
from awesomeversion import AwesomeVersion
|
||||
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import HomeAssistantAPIError, HomeAssistantAuthError
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..utils import check_port
|
||||
from ..utils import check_port, version_is_new_enough
|
||||
from .const import LANDINGPAGE
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
GET_CORE_STATE_MIN_VERSION: AwesomeVersion = AwesomeVersion("2023.8.0.dev20230720")
|
||||
|
||||
|
||||
class HomeAssistantAPI(CoreSysAttributes):
|
||||
"""Home Assistant core object for handle it."""
|
||||
@@ -29,7 +32,11 @@ class HomeAssistantAPI(CoreSysAttributes):
|
||||
self.access_token: str | None = None
|
||||
self._access_token_expires: datetime | None = None
|
||||
|
||||
@Job(limit=JobExecutionLimit.SINGLE_WAIT)
|
||||
@Job(
|
||||
name="home_assistant_api_ensure_access_token",
|
||||
limit=JobExecutionLimit.SINGLE_WAIT,
|
||||
internal=True,
|
||||
)
|
||||
async def ensure_access_token(self) -> None:
|
||||
"""Ensure there is an access token."""
|
||||
if (
|
||||
@@ -100,42 +107,64 @@ class HomeAssistantAPI(CoreSysAttributes):
|
||||
continue
|
||||
yield resp
|
||||
return
|
||||
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
|
||||
except (TimeoutError, aiohttp.ClientError) as err:
|
||||
_LOGGER.error("Error on call %s: %s", url, err)
|
||||
break
|
||||
|
||||
raise HomeAssistantAPIError()
|
||||
|
||||
async def get_config(self) -> dict[str, Any]:
|
||||
"""Return Home Assistant config."""
|
||||
async with self.make_request("get", "api/config") as resp:
|
||||
async def _get_json(self, path: str) -> dict[str, Any]:
|
||||
"""Return Home Assistant get API."""
|
||||
async with self.make_request("get", path) as resp:
|
||||
if resp.status in (200, 201):
|
||||
return await resp.json()
|
||||
else:
|
||||
_LOGGER.debug("Home Assistant API return: %d", resp.status)
|
||||
raise HomeAssistantAPIError()
|
||||
|
||||
async def check_api_state(self) -> bool:
|
||||
"""Return True if Home Assistant up and running."""
|
||||
async def get_config(self) -> dict[str, Any]:
|
||||
"""Return Home Assistant config."""
|
||||
return await self._get_json("api/config")
|
||||
|
||||
async def get_core_state(self) -> dict[str, Any]:
|
||||
"""Return Home Assistant core state."""
|
||||
return await self._get_json("api/core/state")
|
||||
|
||||
async def get_api_state(self) -> str | None:
|
||||
"""Return state of Home Assistant Core or None."""
|
||||
# Skip check on landingpage
|
||||
if (
|
||||
self.sys_homeassistant.version is None
|
||||
or self.sys_homeassistant.version == LANDINGPAGE
|
||||
):
|
||||
return False
|
||||
return None
|
||||
|
||||
# Check if port is up
|
||||
if not await self.sys_run_in_executor(
|
||||
check_port,
|
||||
if not await check_port(
|
||||
self.sys_homeassistant.ip_address,
|
||||
self.sys_homeassistant.api_port,
|
||||
):
|
||||
return False
|
||||
return None
|
||||
|
||||
# Check if API is up
|
||||
with suppress(HomeAssistantAPIError):
|
||||
data = await self.get_config()
|
||||
# get_core_state is available since 2023.8.0 and preferred
|
||||
# since it is significantly faster than get_config because
|
||||
# it does not require serializing the entire config
|
||||
if version_is_new_enough(
|
||||
self.sys_homeassistant.version, GET_CORE_STATE_MIN_VERSION
|
||||
):
|
||||
data = await self.get_core_state()
|
||||
else:
|
||||
data = await self.get_config()
|
||||
# Older versions of home assistant does not expose the state
|
||||
if data and data.get("state", "RUNNING") == "RUNNING":
|
||||
return True
|
||||
if data:
|
||||
return data.get("state", "RUNNING")
|
||||
|
||||
return None
|
||||
|
||||
async def check_api_state(self) -> bool:
|
||||
"""Return Home Assistant Core state if up."""
|
||||
if state := await self.get_api_state():
|
||||
return state == "RUNNING"
|
||||
return False
|
||||
|
@@ -1,6 +1,6 @@
|
||||
"""Constants for homeassistant."""
|
||||
from datetime import timedelta
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
|
||||
@@ -19,7 +19,7 @@ CLOSING_STATES = [
|
||||
]
|
||||
|
||||
|
||||
class WSType(str, Enum):
|
||||
class WSType(StrEnum):
|
||||
"""Websocket types."""
|
||||
|
||||
AUTH = "auth"
|
||||
@@ -28,12 +28,13 @@ class WSType(str, Enum):
|
||||
BACKUP_END = "backup/end"
|
||||
|
||||
|
||||
class WSEvent(str, Enum):
|
||||
class WSEvent(StrEnum):
|
||||
"""Websocket events."""
|
||||
|
||||
ADDON = "addon"
|
||||
HEALTH_CHANGED = "health_changed"
|
||||
ISSUE_CHANGED = "issue_changed"
|
||||
ISSUE_REMOVED = "issue_removed"
|
||||
JOB = "job"
|
||||
SUPERVISOR_UPDATE = "supervisor_update"
|
||||
SUPPORTED_CHANGED = "supported_changed"
|
||||
|
@@ -2,16 +2,18 @@
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta
|
||||
import logging
|
||||
import re
|
||||
import secrets
|
||||
import shutil
|
||||
from typing import Final
|
||||
|
||||
import attr
|
||||
from awesomeversion import AwesomeVersion
|
||||
|
||||
from ..const import ATTR_HOMEASSISTANT, BusEvent
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..coresys import CoreSys
|
||||
from ..docker.const import ContainerState
|
||||
from ..docker.homeassistant import DockerHomeAssistant
|
||||
from ..docker.monitor import DockerContainerStateEvent
|
||||
@@ -21,12 +23,15 @@ from ..exceptions import (
|
||||
HomeAssistantCrashError,
|
||||
HomeAssistantError,
|
||||
HomeAssistantJobError,
|
||||
HomeAssistantStartupTimeout,
|
||||
HomeAssistantUpdateError,
|
||||
JobException,
|
||||
)
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.const import JOB_GROUP_HOME_ASSISTANT_CORE, JobExecutionLimit
|
||||
from ..jobs.decorator import Job, JobCondition
|
||||
from ..jobs.job_group import JobGroup
|
||||
from ..resolution.const import ContextType, IssueType
|
||||
from ..utils import convert_to_ascii, process_lock
|
||||
from ..utils import convert_to_ascii
|
||||
from ..utils.sentry import capture_exception
|
||||
from .const import (
|
||||
LANDINGPAGE,
|
||||
@@ -38,25 +43,29 @@ from .const import (
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
SECONDS_BETWEEN_API_CHECKS: Final[int] = 5
|
||||
# Core Stage 1 and some wiggle room
|
||||
STARTUP_API_RESPONSE_TIMEOUT: Final[timedelta] = timedelta(minutes=3)
|
||||
# All stages plus event start timeout and some wiggle rooom
|
||||
STARTUP_API_CHECK_RUNNING_TIMEOUT: Final[timedelta] = timedelta(minutes=15)
|
||||
RE_YAML_ERROR = re.compile(r"homeassistant\.util\.yaml")
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
@dataclass
|
||||
class ConfigResult:
|
||||
"""Return object from config check."""
|
||||
|
||||
valid = attr.ib()
|
||||
log = attr.ib()
|
||||
valid: bool
|
||||
log: str
|
||||
|
||||
|
||||
class HomeAssistantCore(CoreSysAttributes):
|
||||
class HomeAssistantCore(JobGroup):
|
||||
"""Home Assistant core object for handle it."""
|
||||
|
||||
def __init__(self, coresys: CoreSys):
|
||||
"""Initialize Home Assistant object."""
|
||||
self.coresys: CoreSys = coresys
|
||||
super().__init__(coresys, JOB_GROUP_HOME_ASSISTANT_CORE)
|
||||
self.instance: DockerHomeAssistant = DockerHomeAssistant(coresys)
|
||||
self.lock: asyncio.Lock = asyncio.Lock()
|
||||
self._error_state: bool = False
|
||||
|
||||
@property
|
||||
@@ -95,9 +104,13 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
_LOGGER.info("Starting HomeAssistant landingpage")
|
||||
if not await self.instance.is_running():
|
||||
with suppress(HomeAssistantError):
|
||||
await self._start()
|
||||
await self.start()
|
||||
|
||||
@process_lock
|
||||
@Job(
|
||||
name="home_assistant_core_install_landing_page",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=HomeAssistantJobError,
|
||||
)
|
||||
async def install_landingpage(self) -> None:
|
||||
"""Install a landing page."""
|
||||
# Try to use a preinstalled landingpage
|
||||
@@ -116,7 +129,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
while True:
|
||||
if not self.sys_updater.image_homeassistant:
|
||||
_LOGGER.warning(
|
||||
"Found no information about Home Assistant. Retry in 30sec"
|
||||
"Found no information about Home Assistant. Retrying in 30sec"
|
||||
)
|
||||
await asyncio.sleep(30)
|
||||
await self.sys_updater.reload()
|
||||
@@ -127,19 +140,23 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
LANDINGPAGE, image=self.sys_updater.image_homeassistant
|
||||
)
|
||||
break
|
||||
except DockerError:
|
||||
except (DockerError, JobException):
|
||||
pass
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
capture_exception(err)
|
||||
|
||||
_LOGGER.warning("Fails install landingpage, retry after 30sec")
|
||||
_LOGGER.warning("Failed to install landingpage, retrying after 30sec")
|
||||
await asyncio.sleep(30)
|
||||
|
||||
self.sys_homeassistant.version = LANDINGPAGE
|
||||
self.sys_homeassistant.image = self.sys_updater.image_homeassistant
|
||||
self.sys_homeassistant.save_data()
|
||||
|
||||
@process_lock
|
||||
@Job(
|
||||
name="home_assistant_core_install",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=HomeAssistantJobError,
|
||||
)
|
||||
async def install(self) -> None:
|
||||
"""Install a landing page."""
|
||||
_LOGGER.info("Home Assistant setup")
|
||||
@@ -155,12 +172,12 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
image=self.sys_updater.image_homeassistant,
|
||||
)
|
||||
break
|
||||
except DockerError:
|
||||
except (DockerError, JobException):
|
||||
pass
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
capture_exception(err)
|
||||
|
||||
_LOGGER.warning("Error on Home Assistant installation. Retry in 30sec")
|
||||
_LOGGER.warning("Error on Home Assistant installation. Retrying in 30sec")
|
||||
await asyncio.sleep(30)
|
||||
|
||||
_LOGGER.info("Home Assistant docker now installed")
|
||||
@@ -171,7 +188,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
# finishing
|
||||
try:
|
||||
_LOGGER.info("Starting Home Assistant")
|
||||
await self._start()
|
||||
await self.start()
|
||||
except HomeAssistantError:
|
||||
_LOGGER.error("Can't start Home Assistant!")
|
||||
|
||||
@@ -179,8 +196,8 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup()
|
||||
|
||||
@process_lock
|
||||
@Job(
|
||||
name="home_assistant_core_update",
|
||||
conditions=[
|
||||
JobCondition.FREE_SPACE,
|
||||
JobCondition.HEALTHY,
|
||||
@@ -188,6 +205,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
JobCondition.PLUGINS_UPDATED,
|
||||
JobCondition.SUPERVISOR_UPDATED,
|
||||
],
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=HomeAssistantJobError,
|
||||
)
|
||||
async def update(
|
||||
@@ -231,7 +249,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
self.sys_homeassistant.image = self.sys_updater.image_homeassistant
|
||||
|
||||
if running:
|
||||
await self._start()
|
||||
await self.start()
|
||||
_LOGGER.info("Successfully started Home Assistant %s", to_version)
|
||||
|
||||
# Successfull - last step
|
||||
@@ -281,23 +299,11 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
self.sys_resolution.create_issue(IssueType.UPDATE_FAILED, ContextType.CORE)
|
||||
raise HomeAssistantUpdateError()
|
||||
|
||||
async def _start(self) -> None:
|
||||
"""Start Home Assistant Docker & wait."""
|
||||
# Create new API token
|
||||
self.sys_homeassistant.supervisor_token = secrets.token_hex(56)
|
||||
self.sys_homeassistant.save_data()
|
||||
|
||||
# Write audio settings
|
||||
self.sys_homeassistant.write_pulse()
|
||||
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
|
||||
@process_lock
|
||||
@Job(
|
||||
name="home_assistant_core_start",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=HomeAssistantJobError,
|
||||
)
|
||||
async def start(self) -> None:
|
||||
"""Run Home Assistant docker."""
|
||||
if await self.instance.is_running():
|
||||
@@ -314,20 +320,37 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
# No Instance/Container found, extended start
|
||||
else:
|
||||
await self._start()
|
||||
# Create new API token
|
||||
self.sys_homeassistant.supervisor_token = secrets.token_hex(56)
|
||||
self.sys_homeassistant.save_data()
|
||||
|
||||
@process_lock
|
||||
# Write audio settings
|
||||
self.sys_homeassistant.write_pulse()
|
||||
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
|
||||
@Job(
|
||||
name="home_assistant_core_stop",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=HomeAssistantJobError,
|
||||
)
|
||||
async def stop(self) -> None:
|
||||
"""Stop Home Assistant Docker.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
"""Stop Home Assistant Docker."""
|
||||
try:
|
||||
return await self.instance.stop(remove_container=False)
|
||||
except DockerError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
@process_lock
|
||||
@Job(
|
||||
name="home_assistant_core_restart",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=HomeAssistantJobError,
|
||||
)
|
||||
async def restart(self) -> None:
|
||||
"""Restart Home Assistant Docker."""
|
||||
try:
|
||||
@@ -337,12 +360,16 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
|
||||
@process_lock
|
||||
@Job(
|
||||
name="home_assistant_core_rebuild",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=HomeAssistantJobError,
|
||||
)
|
||||
async def rebuild(self) -> None:
|
||||
"""Rebuild Home Assistant Docker container."""
|
||||
with suppress(DockerError):
|
||||
await self.instance.stop()
|
||||
await self._start()
|
||||
await self.start()
|
||||
|
||||
def logs(self) -> Awaitable[bytes]:
|
||||
"""Get HomeAssistant docker logs.
|
||||
@@ -359,10 +386,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
return self.instance.check_trust()
|
||||
|
||||
async def stats(self) -> DockerStats:
|
||||
"""Return stats of Home Assistant.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
"""Return stats of Home Assistant."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerError as err:
|
||||
@@ -385,13 +409,16 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
@property
|
||||
def in_progress(self) -> bool:
|
||||
"""Return True if a task is in progress."""
|
||||
return self.instance.in_progress or self.lock.locked()
|
||||
return self.instance.in_progress or self.active_job
|
||||
|
||||
async def check_config(self) -> ConfigResult:
|
||||
"""Run Home Assistant config check."""
|
||||
result = await self.instance.execute_command(
|
||||
"python3 -m homeassistant -c /config --script check_config"
|
||||
)
|
||||
try:
|
||||
result = await self.instance.execute_command(
|
||||
"python3 -m homeassistant -c /config --script check_config"
|
||||
)
|
||||
except DockerError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
# If not valid
|
||||
if result.exit_code is None:
|
||||
@@ -416,28 +443,46 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
return
|
||||
_LOGGER.info("Wait until Home Assistant is ready")
|
||||
|
||||
while True:
|
||||
await asyncio.sleep(5)
|
||||
deadline = datetime.now() + STARTUP_API_RESPONSE_TIMEOUT
|
||||
last_state = None
|
||||
while not (timeout := datetime.now() >= deadline):
|
||||
await asyncio.sleep(SECONDS_BETWEEN_API_CHECKS)
|
||||
|
||||
# 1: Check if Container is is_running
|
||||
if not await self.instance.is_running():
|
||||
_LOGGER.error("Home Assistant has crashed!")
|
||||
break
|
||||
|
||||
# 2: Check if API response
|
||||
if await self.sys_homeassistant.api.check_api_state():
|
||||
_LOGGER.info("Detect a running Home Assistant instance")
|
||||
self._error_state = False
|
||||
return
|
||||
# 2: Check API response
|
||||
if state := await self.sys_homeassistant.api.get_api_state():
|
||||
if last_state is None:
|
||||
# API initially available, move deadline up and check API
|
||||
# state to be running now
|
||||
deadline = datetime.now() + STARTUP_API_CHECK_RUNNING_TIMEOUT
|
||||
|
||||
if last_state != state:
|
||||
_LOGGER.info("Home Assistant Core state changed to %s", state)
|
||||
last_state = state
|
||||
|
||||
if state == "RUNNING":
|
||||
_LOGGER.info("Detect a running Home Assistant instance")
|
||||
self._error_state = False
|
||||
return
|
||||
|
||||
self._error_state = True
|
||||
if timeout:
|
||||
raise HomeAssistantStartupTimeout(
|
||||
"No Home Assistant Core response, assuming a fatal startup error",
|
||||
_LOGGER.error,
|
||||
)
|
||||
raise HomeAssistantCrashError()
|
||||
|
||||
@Job(
|
||||
name="home_assistant_core_repair",
|
||||
conditions=[
|
||||
JobCondition.FREE_SPACE,
|
||||
JobCondition.INTERNET_HOST,
|
||||
]
|
||||
],
|
||||
)
|
||||
async def repair(self):
|
||||
"""Repair local Home Assistant data."""
|
||||
@@ -459,6 +504,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
await self._restart_after_problem(event.state)
|
||||
|
||||
@Job(
|
||||
name="home_assistant_core_restart_after_problem",
|
||||
limit=JobExecutionLimit.THROTTLE_RATE_LIMIT,
|
||||
throttle_period=WATCHDOG_THROTTLE_PERIOD,
|
||||
throttle_max_calls=WATCHDOG_THROTTLE_MAX_CALLS,
|
||||
@@ -470,7 +516,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
# Don't interrupt a task in progress or if rollback is handling it
|
||||
if not (self.in_progress or self.error_state):
|
||||
_LOGGER.warning(
|
||||
"Watchdog found Home Assistant %s, restarting...", state.value
|
||||
"Watchdog found Home Assistant %s, restarting...", state
|
||||
)
|
||||
if state == ContainerState.FAILED and attempts == 0:
|
||||
try:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user