mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-08-13 19:19:21 +00:00
Compare commits
38 Commits
2025.02.3
...
fix-error-
Author | SHA1 | Date | |
---|---|---|---|
![]() |
0786e06eb9 | ||
![]() |
5b18fb6b12 | ||
![]() |
d42ec12ae8 | ||
![]() |
86133f8ecd | ||
![]() |
12c951f62d | ||
![]() |
fcb3e2eb55 | ||
![]() |
176e511180 | ||
![]() |
696dcf6149 | ||
![]() |
8030b346e0 | ||
![]() |
53d97ce0c6 | ||
![]() |
77523f7bec | ||
![]() |
f4d69f1811 | ||
![]() |
cf5a0dc548 | ||
![]() |
a8cc3ae6ef | ||
![]() |
362bd8fd21 | ||
![]() |
2274de969f | ||
![]() |
dfed251c7a | ||
![]() |
151d4bdd73 | ||
![]() |
c5d4ebcd48 | ||
![]() |
0ad559adcd | ||
![]() |
39f5b91f12 | ||
![]() |
ddee79d209 | ||
![]() |
ff111253d5 | ||
![]() |
31193abb7b | ||
![]() |
ae266e1692 | ||
![]() |
c315a15816 | ||
![]() |
3bd732147c | ||
![]() |
ddbde93a6d | ||
![]() |
6db11a8ade | ||
![]() |
42e78408a7 | ||
![]() |
15e8940c7f | ||
![]() |
644ec45ded | ||
![]() |
a8d2743f56 | ||
![]() |
0acef4a6e6 | ||
![]() |
5733db94aa | ||
![]() |
da8c6cf111 | ||
![]() |
802ee25a8b | ||
![]() |
ce8b107f1e |
11
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
11
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -26,7 +26,7 @@ body:
|
||||
attributes:
|
||||
label: What type of installation are you running?
|
||||
description: >
|
||||
If you don't know, can be found in [Settings -> System -> Repairs -> System Information](https://my.home-assistant.io/redirect/system_health/).
|
||||
If you don't know, can be found in [Settings -> System -> Repairs -> (three dot menu) -> System Information](https://my.home-assistant.io/redirect/system_health/).
|
||||
It is listed as the `Installation Type` value.
|
||||
options:
|
||||
- Home Assistant OS
|
||||
@@ -72,9 +72,9 @@ body:
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: System Health information
|
||||
label: System information
|
||||
description: >
|
||||
System Health information can be found in the top right menu in [Settings -> System -> Repairs](https://my.home-assistant.io/redirect/repairs/).
|
||||
The System information can be found in [Settings -> System -> Repairs -> (three dot menu) -> System Information](https://my.home-assistant.io/redirect/system_health/).
|
||||
Click the copy button at the bottom of the pop-up and paste it here.
|
||||
|
||||
[](https://my.home-assistant.io/redirect/system_health/)
|
||||
@@ -83,8 +83,9 @@ body:
|
||||
label: Supervisor diagnostics
|
||||
placeholder: "drag-and-drop the diagnostics data file here (do not copy-and-paste the content)"
|
||||
description: >-
|
||||
Supervisor diagnostics can be found in [Settings -> Integrations](https://my.home-assistant.io/redirect/integrations/).
|
||||
Find the card that says `Home Assistant Supervisor`, open its menu and select 'Download diagnostics'.
|
||||
Supervisor diagnostics can be found in [Settings -> Devices & services](https://my.home-assistant.io/redirect/integrations/).
|
||||
Find the card that says `Home Assistant Supervisor`, open it, and select the three dot menu of the Supervisor integration entry
|
||||
and select 'Download diagnostics'.
|
||||
|
||||
**Please drag-and-drop the downloaded file into the textbox below. Do not copy and paste its contents.**
|
||||
- type: textarea
|
||||
|
6
.github/workflows/builder.yml
vendored
6
.github/workflows/builder.yml
vendored
@@ -108,7 +108,7 @@ jobs:
|
||||
if: needs.init.outputs.requirements == 'true'
|
||||
uses: home-assistant/wheels@2024.11.0
|
||||
with:
|
||||
abi: cp312
|
||||
abi: cp313
|
||||
tag: musllinux_1_2
|
||||
arch: ${{ matrix.arch }}
|
||||
wheels-key: ${{ secrets.WHEELS_KEY }}
|
||||
@@ -160,7 +160,7 @@ jobs:
|
||||
run: echo "BUILD_ARGS=--test" >> $GITHUB_ENV
|
||||
|
||||
- name: Build supervisor
|
||||
uses: home-assistant/builder@2024.08.2
|
||||
uses: home-assistant/builder@2025.02.0
|
||||
with:
|
||||
args: |
|
||||
$BUILD_ARGS \
|
||||
@@ -207,7 +207,7 @@ jobs:
|
||||
|
||||
- name: Build the Supervisor
|
||||
if: needs.init.outputs.publish != 'true'
|
||||
uses: home-assistant/builder@2024.08.2
|
||||
uses: home-assistant/builder@2025.02.0
|
||||
with:
|
||||
args: |
|
||||
--test \
|
||||
|
30
.github/workflows/ci.yaml
vendored
30
.github/workflows/ci.yaml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -47,7 +47,7 @@ jobs:
|
||||
pip install -r requirements.txt -r requirements_tests.txt
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
lookup-only: true
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -87,7 +87,7 @@ jobs:
|
||||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
@@ -118,7 +118,7 @@ jobs:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -130,7 +130,7 @@ jobs:
|
||||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
@@ -176,7 +176,7 @@ jobs:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -188,7 +188,7 @@ jobs:
|
||||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
@@ -220,7 +220,7 @@ jobs:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -232,7 +232,7 @@ jobs:
|
||||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
@@ -264,7 +264,7 @@ jobs:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -304,7 +304,7 @@ jobs:
|
||||
cosign-release: "v2.4.0"
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -359,7 +359,7 @@ jobs:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.2.1
|
||||
uses: actions/cache@v4.2.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
@@ -370,7 +370,7 @@ jobs:
|
||||
echo "Failed to restore Python virtual environment from cache"
|
||||
exit 1
|
||||
- name: Download all coverage artifacts
|
||||
uses: actions/download-artifact@v4.1.8
|
||||
uses: actions/download-artifact@v4.1.9
|
||||
- name: Combine coverage results
|
||||
run: |
|
||||
. venv/bin/activate
|
||||
@@ -378,4 +378,4 @@ jobs:
|
||||
coverage report
|
||||
coverage xml
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v5.3.1
|
||||
uses: codecov/codecov-action@v5.4.0
|
||||
|
29
.github/workflows/update_frontend.yml
vendored
29
.github/workflows/update_frontend.yml
vendored
@@ -10,7 +10,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
skip: ${{ steps.check_version.outputs.skip || steps.check_existing_pr.outputs.skip }}
|
||||
latest_tag: ${{ steps.latest_frontend_version.outputs.latest_tag }}
|
||||
current_version: ${{ steps.check_version.outputs.current_version }}
|
||||
latest_version: ${{ steps.latest_frontend_version.outputs.latest_tag }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -23,11 +24,11 @@ jobs:
|
||||
- name: Check if version is up to date
|
||||
id: check_version
|
||||
run: |
|
||||
SUPERVISOR_VERSION=$(cat .ha-frontend-version)
|
||||
LATEST_VERSION=${{ steps.latest_frontend_version.outputs.latest_tag }}
|
||||
echo "SUPERVISOR_VERSION=$SUPERVISOR_VERSION" >> $GITHUB_ENV
|
||||
echo "LATEST_VERSION=$LATEST_VERSION" >> $GITHUB_ENV
|
||||
if [[ ! "$SUPERVISOR_VERSION" < "$LATEST_VERSION" ]]; then
|
||||
current_version="$(cat .ha-frontend-version)"
|
||||
latest_version="${{ steps.latest_frontend_version.outputs.latest_tag }}"
|
||||
echo "current_version=${current_version}" >> $GITHUB_OUTPUT
|
||||
echo "LATEST_VERSION=${latest_version}" >> $GITHUB_ENV
|
||||
if [[ ! "$current_version" < "$latest_version" ]]; then
|
||||
echo "Frontend version is up to date"
|
||||
echo "skip=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
@@ -37,7 +38,7 @@ jobs:
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
PR=$(gh pr list --state open --base main --json title --search "Autoupdate frontend to version $LATEST_VERSION")
|
||||
PR=$(gh pr list --state open --base main --json title --search "Update frontend to version $LATEST_VERSION")
|
||||
if [[ "$PR" != "[]" ]]; then
|
||||
echo "Skipping - There is already a PR open for version $LATEST_VERSION"
|
||||
echo "skip=true" >> $GITHUB_OUTPUT
|
||||
@@ -54,21 +55,25 @@ jobs:
|
||||
rm -rf supervisor/api/panel/*
|
||||
- name: Update version file
|
||||
run: |
|
||||
echo "${{ needs.check-version.outputs.latest_tag }}" > .ha-frontend-version
|
||||
echo "${{ needs.check-version.outputs.latest_version }}" > .ha-frontend-version
|
||||
- name: Download release assets
|
||||
uses: robinraju/release-downloader@v1
|
||||
with:
|
||||
repository: 'home-assistant/frontend'
|
||||
tag: ${{ needs.check-version.outputs.latest_tag }}
|
||||
fileName: home_assistant_frontend_supervisor-${{ needs.check-version.outputs.latest_tag }}.tar.gz
|
||||
tag: ${{ needs.check-version.outputs.latest_version }}
|
||||
fileName: home_assistant_frontend_supervisor-${{ needs.check-version.outputs.latest_version }}.tar.gz
|
||||
extract: true
|
||||
out-file-path: supervisor/api/panel/
|
||||
- name: Create PR
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
commit-message: "Autoupdate frontend to version ${{ needs.check-version.outputs.latest_tag }}"
|
||||
commit-message: "Update frontend to version ${{ needs.check-version.outputs.latest_version }}"
|
||||
branch: autoupdate-frontend
|
||||
base: main
|
||||
draft: true
|
||||
sign-commits: true
|
||||
title: "Autoupdate frontend to version ${{ needs.check-version.outputs.latest_tag }}"
|
||||
title: "Update frontend to version ${{ needs.check-version.outputs.latest_version }}"
|
||||
body: >
|
||||
Update frontend from ${{ needs.check-version.outputs.current_version }} to
|
||||
[${{ needs.check-version.outputs.latest_version }}](https://github.com/home-assistant/frontend/releases/tag/${{ needs.check-version.outputs.latest_version }})
|
||||
|
||||
|
@@ -9,7 +9,8 @@ ENV \
|
||||
|
||||
ARG \
|
||||
COSIGN_VERSION \
|
||||
BUILD_ARCH
|
||||
BUILD_ARCH \
|
||||
QEMU_CPU
|
||||
|
||||
# Install base
|
||||
WORKDIR /usr/src
|
||||
|
@@ -1,5 +1,5 @@
|
||||
aiodns==3.2.0
|
||||
aiohttp==3.11.12
|
||||
aiohttp==3.11.13
|
||||
atomicwrites-homeassistant==1.4.1
|
||||
attrs==25.1.0
|
||||
awesomeversion==24.6.0
|
||||
@@ -20,10 +20,10 @@ pulsectl==24.12.0
|
||||
pyudev==0.24.3
|
||||
PyYAML==6.0.2
|
||||
requests==2.32.3
|
||||
securetar==2025.2.0
|
||||
securetar==2025.2.1
|
||||
sentry-sdk==2.22.0
|
||||
setuptools==75.8.0
|
||||
setuptools==75.8.2
|
||||
voluptuous==0.15.2
|
||||
dbus-fast==2.33.0
|
||||
dbus-fast==2.34.0
|
||||
typing_extensions==4.12.2
|
||||
zlib-fast==0.2.1
|
||||
|
@@ -7,7 +7,7 @@ pytest-asyncio==0.25.2
|
||||
pytest-cov==6.0.0
|
||||
pytest-timeout==2.3.1
|
||||
pytest==8.3.4
|
||||
ruff==0.9.7
|
||||
ruff==0.9.8
|
||||
time-machine==2.16.0
|
||||
typing_extensions==4.12.2
|
||||
urllib3==2.3.0
|
||||
|
@@ -54,8 +54,7 @@ if __name__ == "__main__":
|
||||
loop.set_debug(coresys.config.debug)
|
||||
loop.run_until_complete(coresys.core.connect())
|
||||
|
||||
bootstrap.supervisor_debugger(coresys)
|
||||
bootstrap.migrate_system_env(coresys)
|
||||
loop.run_until_complete(bootstrap.supervisor_debugger(coresys))
|
||||
|
||||
# Signal health startup for container
|
||||
run_os_startup_check_cleanup()
|
||||
|
@@ -20,7 +20,7 @@ from typing import Any, Final
|
||||
import aiohttp
|
||||
from awesomeversion import AwesomeVersionCompareException
|
||||
from deepmerge import Merger
|
||||
from securetar import atomic_contents_add, secure_path
|
||||
from securetar import AddFileError, atomic_contents_add, secure_path
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
@@ -88,7 +88,7 @@ from ..store.addon import AddonStore
|
||||
from ..utils import check_port
|
||||
from ..utils.apparmor import adjust_profile
|
||||
from ..utils.json import read_json_file, write_json_file
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import (
|
||||
WATCHDOG_MAX_ATTEMPTS,
|
||||
WATCHDOG_RETRY_SECONDS,
|
||||
@@ -243,7 +243,7 @@ class Addon(AddonModel):
|
||||
await self.instance.install(self.version, default_image, arch=self.arch)
|
||||
|
||||
self.persist[ATTR_IMAGE] = default_image
|
||||
self.save_persist()
|
||||
await self.save_persist()
|
||||
|
||||
@property
|
||||
def ip_address(self) -> IPv4Address:
|
||||
@@ -667,9 +667,9 @@ class Addon(AddonModel):
|
||||
"""Is add-on loaded."""
|
||||
return bool(self._listeners)
|
||||
|
||||
def save_persist(self) -> None:
|
||||
async def save_persist(self) -> None:
|
||||
"""Save data of add-on."""
|
||||
self.sys_addons.data.save_data()
|
||||
await self.sys_addons.data.save_data()
|
||||
|
||||
async def watchdog_application(self) -> bool:
|
||||
"""Return True if application is running."""
|
||||
@@ -772,7 +772,7 @@ class Addon(AddonModel):
|
||||
)
|
||||
async def install(self) -> None:
|
||||
"""Install and setup this addon."""
|
||||
self.sys_addons.data.install(self.addon_store)
|
||||
await self.sys_addons.data.install(self.addon_store)
|
||||
await self.load()
|
||||
|
||||
if not self.path_data.is_dir():
|
||||
@@ -790,7 +790,7 @@ class Addon(AddonModel):
|
||||
self.latest_version, self.addon_store.image, arch=self.arch
|
||||
)
|
||||
except DockerError as err:
|
||||
self.sys_addons.data.uninstall(self)
|
||||
await self.sys_addons.data.uninstall(self)
|
||||
raise AddonsError() from err
|
||||
|
||||
# Add to addon manager
|
||||
@@ -839,23 +839,23 @@ class Addon(AddonModel):
|
||||
|
||||
# Cleanup Ingress dynamic port assignment
|
||||
if self.with_ingress:
|
||||
await self.sys_ingress.del_dynamic_port(self.slug)
|
||||
self.sys_create_task(self.sys_ingress.reload())
|
||||
self.sys_ingress.del_dynamic_port(self.slug)
|
||||
|
||||
# Cleanup discovery data
|
||||
for message in self.sys_discovery.list_messages:
|
||||
if message.addon != self.slug:
|
||||
continue
|
||||
self.sys_discovery.remove(message)
|
||||
await self.sys_discovery.remove(message)
|
||||
|
||||
# Cleanup services data
|
||||
for service in self.sys_services.list_services:
|
||||
if self.slug not in service.active:
|
||||
continue
|
||||
service.del_service_data(self)
|
||||
await service.del_service_data(self)
|
||||
|
||||
# Remove from addon manager
|
||||
self.sys_addons.data.uninstall(self)
|
||||
await self.sys_addons.data.uninstall(self)
|
||||
self.sys_addons.local.pop(self.slug)
|
||||
|
||||
@Job(
|
||||
@@ -884,7 +884,7 @@ class Addon(AddonModel):
|
||||
|
||||
try:
|
||||
_LOGGER.info("Add-on '%s' successfully updated", self.slug)
|
||||
self.sys_addons.data.update(store)
|
||||
await self.sys_addons.data.update(store)
|
||||
await self._check_ingress_port()
|
||||
|
||||
# Cleanup
|
||||
@@ -925,7 +925,7 @@ class Addon(AddonModel):
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
self.sys_addons.data.update(self.addon_store)
|
||||
await self.sys_addons.data.update(self.addon_store)
|
||||
await self._check_ingress_port()
|
||||
_LOGGER.info("Add-on '%s' successfully rebuilt", self.slug)
|
||||
|
||||
@@ -977,11 +977,21 @@ class Addon(AddonModel):
|
||||
return
|
||||
|
||||
# Need install/update
|
||||
with TemporaryDirectory(dir=self.sys_config.path_tmp) as tmp_folder:
|
||||
profile_file = Path(tmp_folder, "apparmor.txt")
|
||||
tmp_folder: TemporaryDirectory | None = None
|
||||
|
||||
def install_update_profile() -> Path:
|
||||
nonlocal tmp_folder
|
||||
tmp_folder = TemporaryDirectory(dir=self.sys_config.path_tmp)
|
||||
profile_file = Path(tmp_folder.name, "apparmor.txt")
|
||||
adjust_profile(self.slug, self.path_apparmor, profile_file)
|
||||
return profile_file
|
||||
|
||||
try:
|
||||
profile_file = await self.sys_run_in_executor(install_update_profile)
|
||||
await self.sys_host.apparmor.load_profile(self.slug, profile_file)
|
||||
finally:
|
||||
if tmp_folder:
|
||||
await self.sys_run_in_executor(tmp_folder.cleanup)
|
||||
|
||||
async def uninstall_apparmor(self) -> None:
|
||||
"""Remove AppArmor profile for Add-on."""
|
||||
@@ -1053,7 +1063,7 @@ class Addon(AddonModel):
|
||||
|
||||
# Access Token
|
||||
self.persist[ATTR_ACCESS_TOKEN] = secrets.token_hex(56)
|
||||
self.save_persist()
|
||||
await self.save_persist()
|
||||
|
||||
# Options
|
||||
await self.write_options()
|
||||
@@ -1327,7 +1337,7 @@ class Addon(AddonModel):
|
||||
)
|
||||
)
|
||||
_LOGGER.info("Finish backup for addon %s", self.slug)
|
||||
except (tarfile.TarError, OSError) as err:
|
||||
except (tarfile.TarError, OSError, AddFileError) as err:
|
||||
raise AddonsError(
|
||||
f"Can't write tarfile {tar_file}: {err}", _LOGGER.error
|
||||
) from err
|
||||
@@ -1398,7 +1408,7 @@ class Addon(AddonModel):
|
||||
# Restore local add-on information
|
||||
_LOGGER.info("Restore config for addon %s", self.slug)
|
||||
restore_image = self._image(data[ATTR_SYSTEM])
|
||||
self.sys_addons.data.restore(
|
||||
await self.sys_addons.data.restore(
|
||||
self.slug, data[ATTR_USER], data[ATTR_SYSTEM], restore_image
|
||||
)
|
||||
|
||||
@@ -1520,7 +1530,7 @@ class Addon(AddonModel):
|
||||
except AddonsError as err:
|
||||
attempts = attempts + 1
|
||||
_LOGGER.error("Watchdog restart of addon %s failed!", self.name)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
else:
|
||||
break
|
||||
|
||||
|
@@ -34,16 +34,29 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
self.coresys: CoreSys = coresys
|
||||
self.addon = addon
|
||||
|
||||
# Search for build file later in executor
|
||||
super().__init__(None, SCHEMA_BUILD_CONFIG)
|
||||
|
||||
def _get_build_file(self) -> Path:
|
||||
"""Get build file.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
try:
|
||||
build_file = find_one_filetype(
|
||||
return find_one_filetype(
|
||||
self.addon.path_location, "build", FILE_SUFFIX_CONFIGURATION
|
||||
)
|
||||
except ConfigurationFileError:
|
||||
build_file = self.addon.path_location / "build.json"
|
||||
return self.addon.path_location / "build.json"
|
||||
|
||||
super().__init__(build_file, SCHEMA_BUILD_CONFIG)
|
||||
async def read_data(self) -> None:
|
||||
"""Load data from file."""
|
||||
if not self._file:
|
||||
self._file = await self.sys_run_in_executor(self._get_build_file)
|
||||
|
||||
def save_data(self):
|
||||
await super().read_data()
|
||||
|
||||
async def save_data(self):
|
||||
"""Ignore save function."""
|
||||
raise RuntimeError()
|
||||
|
||||
|
@@ -38,7 +38,7 @@ class AddonsData(FileConfiguration, CoreSysAttributes):
|
||||
"""Return local add-on data."""
|
||||
return self._data[ATTR_SYSTEM]
|
||||
|
||||
def install(self, addon: AddonStore) -> None:
|
||||
async def install(self, addon: AddonStore) -> None:
|
||||
"""Set addon as installed."""
|
||||
self.system[addon.slug] = deepcopy(addon.data)
|
||||
self.user[addon.slug] = {
|
||||
@@ -46,26 +46,28 @@ class AddonsData(FileConfiguration, CoreSysAttributes):
|
||||
ATTR_VERSION: addon.version,
|
||||
ATTR_IMAGE: addon.image,
|
||||
}
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
def uninstall(self, addon: Addon) -> None:
|
||||
async def uninstall(self, addon: Addon) -> None:
|
||||
"""Set add-on as uninstalled."""
|
||||
self.system.pop(addon.slug, None)
|
||||
self.user.pop(addon.slug, None)
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
def update(self, addon: AddonStore) -> None:
|
||||
async def update(self, addon: AddonStore) -> None:
|
||||
"""Update version of add-on."""
|
||||
self.system[addon.slug] = deepcopy(addon.data)
|
||||
self.user[addon.slug].update(
|
||||
{ATTR_VERSION: addon.version, ATTR_IMAGE: addon.image}
|
||||
)
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
def restore(self, slug: str, user: Config, system: Config, image: str) -> None:
|
||||
async def restore(
|
||||
self, slug: str, user: Config, system: Config, image: str
|
||||
) -> None:
|
||||
"""Restore data to add-on."""
|
||||
self.user[slug] = deepcopy(user)
|
||||
self.system[slug] = deepcopy(system)
|
||||
|
||||
self.user[slug][ATTR_IMAGE] = image
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
@@ -5,7 +5,7 @@ from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
import tarfile
|
||||
from typing import Union
|
||||
from typing import Self, Union
|
||||
|
||||
from attr import evolve
|
||||
|
||||
@@ -23,7 +23,7 @@ from ..exceptions import (
|
||||
from ..jobs.decorator import Job, JobCondition
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..store.addon import AddonStore
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .addon import Addon
|
||||
from .const import ADDON_UPDATE_CONDITIONS
|
||||
from .data import AddonsData
|
||||
@@ -74,6 +74,11 @@ class AddonManager(CoreSysAttributes):
|
||||
return addon
|
||||
return None
|
||||
|
||||
async def load_config(self) -> Self:
|
||||
"""Load config in executor."""
|
||||
await self.data.read_data()
|
||||
return self
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Start up add-on management."""
|
||||
# Refresh cache for all store addons
|
||||
@@ -165,7 +170,7 @@ class AddonManager(CoreSysAttributes):
|
||||
await addon.stop()
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.warning("Can't stop Add-on %s: %s", addon.slug, err)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
@Job(
|
||||
name="addon_manager_install",
|
||||
@@ -383,7 +388,7 @@ class AddonManager(CoreSysAttributes):
|
||||
reference=addon.slug,
|
||||
suggestions=[SuggestionType.EXECUTE_REPAIR],
|
||||
)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
else:
|
||||
add_host_coros.append(
|
||||
self.sys_plugins.dns.add_host(
|
||||
|
@@ -210,18 +210,6 @@ class AddonModel(JobGroup, ABC):
|
||||
"""Return description of add-on."""
|
||||
return self.data[ATTR_DESCRIPTON]
|
||||
|
||||
@property
|
||||
def long_description(self) -> str | None:
|
||||
"""Return README.md as long_description."""
|
||||
readme = Path(self.path_location, "README.md")
|
||||
|
||||
# If readme not exists
|
||||
if not readme.exists():
|
||||
return None
|
||||
|
||||
# Return data
|
||||
return readme.read_text(encoding="utf-8")
|
||||
|
||||
@property
|
||||
def repository(self) -> str:
|
||||
"""Return repository of add-on."""
|
||||
@@ -646,6 +634,21 @@ class AddonModel(JobGroup, ABC):
|
||||
"""Return breaking versions of addon."""
|
||||
return self.data[ATTR_BREAKING_VERSIONS]
|
||||
|
||||
async def long_description(self) -> str | None:
|
||||
"""Return README.md as long_description."""
|
||||
|
||||
def read_readme() -> str | None:
|
||||
readme = Path(self.path_location, "README.md")
|
||||
|
||||
# If readme not exists
|
||||
if not readme.exists():
|
||||
return None
|
||||
|
||||
# Return data
|
||||
return readme.read_text(encoding="utf-8")
|
||||
|
||||
return await self.sys_run_in_executor(read_readme)
|
||||
|
||||
def refresh_path_cache(self) -> Awaitable[None]:
|
||||
"""Refresh cache of existing paths."""
|
||||
|
||||
|
@@ -10,7 +10,7 @@ from aiohttp import web
|
||||
from ..const import AddonState
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import APIAddonNotInstalled, HostNotSupportedError
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .addons import APIAddons
|
||||
from .audio import APIAudio
|
||||
from .auth import APIAuth
|
||||
@@ -412,7 +412,7 @@ class RestAPI(CoreSysAttributes):
|
||||
if not isinstance(err, HostNotSupportedError):
|
||||
# No need to capture HostNotSupportedError to Sentry, the cause
|
||||
# is known and reported to the user using the resolution center.
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
kwargs.pop("follow", None) # Follow is not supported for Docker logs
|
||||
return await api_supervisor.logs(*args, **kwargs)
|
||||
|
||||
|
@@ -212,7 +212,7 @@ class APIAddons(CoreSysAttributes):
|
||||
ATTR_HOSTNAME: addon.hostname,
|
||||
ATTR_DNS: addon.dns,
|
||||
ATTR_DESCRIPTON: addon.description,
|
||||
ATTR_LONG_DESCRIPTION: addon.long_description,
|
||||
ATTR_LONG_DESCRIPTION: await addon.long_description(),
|
||||
ATTR_ADVANCED: addon.advanced,
|
||||
ATTR_STAGE: addon.stage,
|
||||
ATTR_REPOSITORY: addon.repository,
|
||||
@@ -322,7 +322,7 @@ class APIAddons(CoreSysAttributes):
|
||||
if ATTR_WATCHDOG in body:
|
||||
addon.watchdog = body[ATTR_WATCHDOG]
|
||||
|
||||
addon.save_persist()
|
||||
await addon.save_persist()
|
||||
|
||||
@api_process
|
||||
async def sys_options(self, request: web.Request) -> None:
|
||||
@@ -336,7 +336,7 @@ class APIAddons(CoreSysAttributes):
|
||||
if ATTR_SYSTEM_MANAGED_CONFIG_ENTRY in body:
|
||||
addon.system_managed_config_entry = body[ATTR_SYSTEM_MANAGED_CONFIG_ENTRY]
|
||||
|
||||
addon.save_persist()
|
||||
await addon.save_persist()
|
||||
|
||||
@api_process
|
||||
async def options_validate(self, request: web.Request) -> None:
|
||||
@@ -402,7 +402,7 @@ class APIAddons(CoreSysAttributes):
|
||||
_LOGGER.warning("Changing protected flag for %s!", addon.slug)
|
||||
addon.protected = body[ATTR_PROTECTED]
|
||||
|
||||
addon.save_persist()
|
||||
await addon.save_persist()
|
||||
|
||||
@api_process
|
||||
async def stats(self, request: web.Request) -> dict[str, Any]:
|
||||
|
@@ -99,7 +99,7 @@ class APIAuth(CoreSysAttributes):
|
||||
@api_process
|
||||
async def cache(self, request: web.Request) -> None:
|
||||
"""Process cache reset request."""
|
||||
self.sys_auth.reset_data()
|
||||
await self.sys_auth.reset_data()
|
||||
|
||||
@api_process
|
||||
async def list_users(self, request: web.Request) -> dict[str, list[dict[str, Any]]]:
|
||||
|
@@ -5,6 +5,7 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
import errno
|
||||
from io import IOBase
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import re
|
||||
@@ -212,7 +213,7 @@ class APIBackups(CoreSysAttributes):
|
||||
if ATTR_DAYS_UNTIL_STALE in body:
|
||||
self.sys_backups.days_until_stale = body[ATTR_DAYS_UNTIL_STALE]
|
||||
|
||||
self.sys_backups.save_data()
|
||||
await self.sys_backups.save_data()
|
||||
|
||||
@api_process
|
||||
async def reload(self, _):
|
||||
@@ -518,29 +519,28 @@ class APIBackups(CoreSysAttributes):
|
||||
except vol.Invalid as ex:
|
||||
raise APIError(humanize_error(filename, ex)) from None
|
||||
|
||||
with TemporaryDirectory(dir=tmp_path.as_posix()) as temp_dir:
|
||||
tar_file = Path(temp_dir, "backup.tar")
|
||||
temp_dir: TemporaryDirectory | None = None
|
||||
backup_file_stream: IOBase | None = None
|
||||
|
||||
def open_backup_file() -> Path:
|
||||
nonlocal temp_dir, backup_file_stream
|
||||
temp_dir = TemporaryDirectory(dir=tmp_path.as_posix())
|
||||
tar_file = Path(temp_dir.name, "backup.tar")
|
||||
backup_file_stream = tar_file.open("wb")
|
||||
return tar_file
|
||||
|
||||
def close_backup_file() -> None:
|
||||
if backup_file_stream:
|
||||
backup_file_stream.close()
|
||||
if temp_dir:
|
||||
temp_dir.cleanup()
|
||||
|
||||
try:
|
||||
reader = await request.multipart()
|
||||
contents = await reader.next()
|
||||
try:
|
||||
with tar_file.open("wb") as backup:
|
||||
while True:
|
||||
chunk = await contents.read_chunk()
|
||||
if not chunk:
|
||||
break
|
||||
backup.write(chunk)
|
||||
|
||||
except OSError as err:
|
||||
if err.errno == errno.EBADMSG and location in {
|
||||
LOCATION_CLOUD_BACKUP,
|
||||
None,
|
||||
}:
|
||||
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
_LOGGER.error("Can't write new backup file: %s", err)
|
||||
return False
|
||||
|
||||
except asyncio.CancelledError:
|
||||
return False
|
||||
tar_file = await self.sys_run_in_executor(open_backup_file)
|
||||
while chunk := await contents.read_chunk(size=2**16):
|
||||
await self.sys_run_in_executor(backup_file_stream.write, chunk)
|
||||
|
||||
backup = await asyncio.shield(
|
||||
self.sys_backups.import_backup(
|
||||
@@ -550,6 +550,21 @@ class APIBackups(CoreSysAttributes):
|
||||
additional_locations=locations,
|
||||
)
|
||||
)
|
||||
except OSError as err:
|
||||
if err.errno == errno.EBADMSG and location in {
|
||||
LOCATION_CLOUD_BACKUP,
|
||||
None,
|
||||
}:
|
||||
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
_LOGGER.error("Can't write new backup file: %s", err)
|
||||
return False
|
||||
|
||||
except asyncio.CancelledError:
|
||||
return False
|
||||
|
||||
finally:
|
||||
if temp_dir or backup:
|
||||
await self.sys_run_in_executor(close_backup_file)
|
||||
|
||||
if backup:
|
||||
return {ATTR_SLUG: backup.slug}
|
||||
|
@@ -83,7 +83,7 @@ class APIDiscovery(CoreSysAttributes):
|
||||
)
|
||||
|
||||
# Process discovery message
|
||||
message = self.sys_discovery.send(addon, **body)
|
||||
message = await self.sys_discovery.send(addon, **body)
|
||||
|
||||
return {ATTR_UUID: message.uuid}
|
||||
|
||||
@@ -110,5 +110,5 @@ class APIDiscovery(CoreSysAttributes):
|
||||
if message.addon != addon.slug:
|
||||
raise APIForbidden("Can't remove discovery message")
|
||||
|
||||
self.sys_discovery.remove(message)
|
||||
await self.sys_discovery.remove(message)
|
||||
return True
|
||||
|
@@ -78,7 +78,7 @@ class APICoreDNS(CoreSysAttributes):
|
||||
if restart_required:
|
||||
self.sys_create_task(self.sys_plugins.dns.restart())
|
||||
|
||||
self.sys_plugins.dns.save_data()
|
||||
await self.sys_plugins.dns.save_data()
|
||||
|
||||
@api_process
|
||||
async def stats(self, request: web.Request) -> dict[str, Any]:
|
||||
|
@@ -53,7 +53,7 @@ class APIDocker(CoreSysAttributes):
|
||||
for hostname, registry in body.items():
|
||||
self.sys_docker.config.registries[hostname] = registry
|
||||
|
||||
self.sys_docker.config.save_data()
|
||||
await self.sys_docker.config.save_data()
|
||||
|
||||
@api_process
|
||||
async def remove_registry(self, request: web.Request):
|
||||
@@ -63,7 +63,7 @@ class APIDocker(CoreSysAttributes):
|
||||
raise APINotFound(f"Hostname {hostname} does not exist in registries")
|
||||
|
||||
del self.sys_docker.config.registries[hostname]
|
||||
self.sys_docker.config.save_data()
|
||||
await self.sys_docker.config.save_data()
|
||||
|
||||
@api_process
|
||||
async def info(self, request: web.Request):
|
||||
|
@@ -149,7 +149,7 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
ATTR_BACKUPS_EXCLUDE_DATABASE
|
||||
]
|
||||
|
||||
self.sys_homeassistant.save_data()
|
||||
await self.sys_homeassistant.save_data()
|
||||
|
||||
@api_process
|
||||
async def stats(self, request: web.Request) -> dict[Any, str]:
|
||||
|
@@ -98,10 +98,10 @@ class APIHost(CoreSysAttributes):
|
||||
ATTR_VIRTUALIZATION: self.sys_host.info.virtualization,
|
||||
ATTR_CPE: self.sys_host.info.cpe,
|
||||
ATTR_DEPLOYMENT: self.sys_host.info.deployment,
|
||||
ATTR_DISK_FREE: self.sys_host.info.free_space,
|
||||
ATTR_DISK_TOTAL: self.sys_host.info.total_space,
|
||||
ATTR_DISK_USED: self.sys_host.info.used_space,
|
||||
ATTR_DISK_LIFE_TIME: self.sys_host.info.disk_life_time,
|
||||
ATTR_DISK_FREE: await self.sys_host.info.free_space(),
|
||||
ATTR_DISK_TOTAL: await self.sys_host.info.total_space(),
|
||||
ATTR_DISK_USED: await self.sys_host.info.used_space(),
|
||||
ATTR_DISK_LIFE_TIME: await self.sys_host.info.disk_life_time(),
|
||||
ATTR_FEATURES: self.sys_host.features,
|
||||
ATTR_HOSTNAME: self.sys_host.info.hostname,
|
||||
ATTR_LLMNR_HOSTNAME: self.sys_host.info.llmnr_hostname,
|
||||
|
@@ -92,14 +92,14 @@ class APIJobs(CoreSysAttributes):
|
||||
if ATTR_IGNORE_CONDITIONS in body:
|
||||
self.sys_jobs.ignore_conditions = body[ATTR_IGNORE_CONDITIONS]
|
||||
|
||||
self.sys_jobs.save_data()
|
||||
await self.sys_jobs.save_data()
|
||||
|
||||
await self.sys_resolution.evaluate.evaluate_system()
|
||||
|
||||
@api_process
|
||||
async def reset(self, request: web.Request) -> None:
|
||||
"""Reset options for JobManager."""
|
||||
self.sys_jobs.reset_data()
|
||||
await self.sys_jobs.reset_data()
|
||||
|
||||
@api_process
|
||||
async def job_info(self, request: web.Request) -> dict[str, Any]:
|
||||
|
@@ -66,7 +66,7 @@ class APIMounts(CoreSysAttributes):
|
||||
else:
|
||||
self.sys_mounts.default_backup_mount = mount
|
||||
|
||||
self.sys_mounts.save_data()
|
||||
await self.sys_mounts.save_data()
|
||||
|
||||
@api_process
|
||||
async def create_mount(self, request: web.Request) -> None:
|
||||
@@ -87,7 +87,7 @@ class APIMounts(CoreSysAttributes):
|
||||
if not self.sys_mounts.default_backup_mount:
|
||||
self.sys_mounts.default_backup_mount = mount
|
||||
|
||||
self.sys_mounts.save_data()
|
||||
await self.sys_mounts.save_data()
|
||||
|
||||
@api_process
|
||||
async def update_mount(self, request: web.Request) -> None:
|
||||
@@ -110,7 +110,7 @@ class APIMounts(CoreSysAttributes):
|
||||
elif self.sys_mounts.default_backup_mount == mount:
|
||||
self.sys_mounts.default_backup_mount = None
|
||||
|
||||
self.sys_mounts.save_data()
|
||||
await self.sys_mounts.save_data()
|
||||
|
||||
@api_process
|
||||
async def delete_mount(self, request: web.Request) -> None:
|
||||
@@ -122,7 +122,7 @@ class APIMounts(CoreSysAttributes):
|
||||
if mount.usage == MountUsage.BACKUP:
|
||||
self.sys_create_task(self.sys_backups.reload())
|
||||
|
||||
self.sys_mounts.save_data()
|
||||
await self.sys_mounts.save_data()
|
||||
|
||||
@api_process
|
||||
async def reload_mount(self, request: web.Request) -> None:
|
||||
|
@@ -169,7 +169,7 @@ class APIOS(CoreSysAttributes):
|
||||
body[ATTR_SYSTEM_HEALTH_LED]
|
||||
)
|
||||
|
||||
self.sys_dbus.agent.board.green.save_data()
|
||||
await self.sys_dbus.agent.board.green.save_data()
|
||||
|
||||
@api_process
|
||||
async def boards_yellow_info(self, request: web.Request) -> dict[str, Any]:
|
||||
@@ -196,7 +196,7 @@ class APIOS(CoreSysAttributes):
|
||||
if ATTR_POWER_LED in body:
|
||||
await self.sys_dbus.agent.board.yellow.set_power_led(body[ATTR_POWER_LED])
|
||||
|
||||
self.sys_dbus.agent.board.yellow.save_data()
|
||||
await self.sys_dbus.agent.board.yellow.save_data()
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.REBOOT_REQUIRED,
|
||||
ContextType.SYSTEM,
|
||||
|
@@ -126,7 +126,7 @@ class APIResoulution(CoreSysAttributes):
|
||||
if ATTR_ENABLED in body:
|
||||
check.enabled = body[ATTR_ENABLED]
|
||||
|
||||
self.sys_resolution.save_data()
|
||||
await self.sys_resolution.save_data()
|
||||
|
||||
@api_process
|
||||
async def run_check(self, request: web.Request) -> None:
|
||||
|
@@ -48,7 +48,7 @@ class APISecurity(CoreSysAttributes):
|
||||
if ATTR_FORCE_SECURITY in body:
|
||||
self.sys_security.force = body[ATTR_FORCE_SECURITY]
|
||||
|
||||
self.sys_security.save_data()
|
||||
await self.sys_security.save_data()
|
||||
|
||||
await self.sys_resolution.evaluate.evaluate_system()
|
||||
|
||||
|
@@ -47,7 +47,7 @@ class APIServices(CoreSysAttributes):
|
||||
addon = request[REQUEST_FROM]
|
||||
|
||||
_check_access(request, service.slug)
|
||||
service.set_service_data(addon, body)
|
||||
await service.set_service_data(addon, body)
|
||||
|
||||
@api_process
|
||||
async def get_service(self, request):
|
||||
@@ -69,7 +69,7 @@ class APIServices(CoreSysAttributes):
|
||||
|
||||
# Access
|
||||
_check_access(request, service.slug, True)
|
||||
service.del_service_data(addon)
|
||||
await service.del_service_data(addon)
|
||||
|
||||
|
||||
def _check_access(request, service, provide=False):
|
||||
|
@@ -2,6 +2,7 @@
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import web
|
||||
@@ -68,6 +69,15 @@ SCHEMA_ADD_REPOSITORY = vol.Schema(
|
||||
)
|
||||
|
||||
|
||||
def _read_static_file(path: Path, binary: bool = False) -> Any:
|
||||
"""Read in a static file asset for API output.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
with path.open("rb" if binary else "r") as asset:
|
||||
return asset.read()
|
||||
|
||||
|
||||
class APIStore(CoreSysAttributes):
|
||||
"""Handle RESTful API for store functions."""
|
||||
|
||||
@@ -99,7 +109,7 @@ class APIStore(CoreSysAttributes):
|
||||
|
||||
return self.sys_store.get(repository_slug)
|
||||
|
||||
def _generate_addon_information(
|
||||
async def _generate_addon_information(
|
||||
self, addon: AddonStore, extended: bool = False
|
||||
) -> dict[str, Any]:
|
||||
"""Generate addon information."""
|
||||
@@ -146,7 +156,7 @@ class APIStore(CoreSysAttributes):
|
||||
ATTR_HOST_NETWORK: addon.host_network,
|
||||
ATTR_HOST_PID: addon.host_pid,
|
||||
ATTR_INGRESS: addon.with_ingress,
|
||||
ATTR_LONG_DESCRIPTION: addon.long_description,
|
||||
ATTR_LONG_DESCRIPTION: await addon.long_description(),
|
||||
ATTR_RATING: rating_security(addon),
|
||||
ATTR_SIGNED: addon.signed,
|
||||
}
|
||||
@@ -175,10 +185,12 @@ class APIStore(CoreSysAttributes):
|
||||
async def store_info(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return store information."""
|
||||
return {
|
||||
ATTR_ADDONS: [
|
||||
self._generate_addon_information(self.sys_addons.store[addon])
|
||||
for addon in self.sys_addons.store
|
||||
],
|
||||
ATTR_ADDONS: await asyncio.gather(
|
||||
*[
|
||||
self._generate_addon_information(self.sys_addons.store[addon])
|
||||
for addon in self.sys_addons.store
|
||||
]
|
||||
),
|
||||
ATTR_REPOSITORIES: [
|
||||
self._generate_repository_information(repository)
|
||||
for repository in self.sys_store.all
|
||||
@@ -189,10 +201,12 @@ class APIStore(CoreSysAttributes):
|
||||
async def addons_list(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return all store add-ons."""
|
||||
return {
|
||||
ATTR_ADDONS: [
|
||||
self._generate_addon_information(self.sys_addons.store[addon])
|
||||
for addon in self.sys_addons.store
|
||||
]
|
||||
ATTR_ADDONS: await asyncio.gather(
|
||||
*[
|
||||
self._generate_addon_information(self.sys_addons.store[addon])
|
||||
for addon in self.sys_addons.store
|
||||
]
|
||||
)
|
||||
}
|
||||
|
||||
@api_process
|
||||
@@ -224,7 +238,7 @@ class APIStore(CoreSysAttributes):
|
||||
async def addons_addon_info_wrapped(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return add-on information directly (not api)."""
|
||||
addon: AddonStore = self._extract_addon(request)
|
||||
return self._generate_addon_information(addon, True)
|
||||
return await self._generate_addon_information(addon, True)
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_PNG)
|
||||
async def addons_addon_icon(self, request: web.Request) -> bytes:
|
||||
@@ -233,8 +247,7 @@ class APIStore(CoreSysAttributes):
|
||||
if not addon.with_icon:
|
||||
raise APIError(f"No icon found for add-on {addon.slug}!")
|
||||
|
||||
with addon.path_icon.open("rb") as png:
|
||||
return png.read()
|
||||
return await self.sys_run_in_executor(_read_static_file, addon.path_icon, True)
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_PNG)
|
||||
async def addons_addon_logo(self, request: web.Request) -> bytes:
|
||||
@@ -243,8 +256,7 @@ class APIStore(CoreSysAttributes):
|
||||
if not addon.with_logo:
|
||||
raise APIError(f"No logo found for add-on {addon.slug}!")
|
||||
|
||||
with addon.path_logo.open("rb") as png:
|
||||
return png.read()
|
||||
return await self.sys_run_in_executor(_read_static_file, addon.path_logo, True)
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_TEXT)
|
||||
async def addons_addon_changelog(self, request: web.Request) -> str:
|
||||
@@ -258,8 +270,7 @@ class APIStore(CoreSysAttributes):
|
||||
if not addon.with_changelog:
|
||||
return f"No changelog found for add-on {addon.slug}!"
|
||||
|
||||
with addon.path_changelog.open("r") as changelog:
|
||||
return changelog.read()
|
||||
return await self.sys_run_in_executor(_read_static_file, addon.path_changelog)
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_TEXT)
|
||||
async def addons_addon_documentation(self, request: web.Request) -> str:
|
||||
@@ -273,8 +284,9 @@ class APIStore(CoreSysAttributes):
|
||||
if not addon.with_documentation:
|
||||
return f"No documentation found for add-on {addon.slug}!"
|
||||
|
||||
with addon.path_documentation.open("r") as documentation:
|
||||
return documentation.read()
|
||||
return await self.sys_run_in_executor(
|
||||
_read_static_file, addon.path_documentation
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def repositories_list(self, request: web.Request) -> list[dict[str, Any]]:
|
||||
|
@@ -159,8 +159,8 @@ class APISupervisor(CoreSysAttributes):
|
||||
self.sys_config.wait_boot = body[ATTR_WAIT_BOOT]
|
||||
|
||||
# Save changes before processing addons in case of errors
|
||||
self.sys_updater.save_data()
|
||||
self.sys_config.save_data()
|
||||
await self.sys_updater.save_data()
|
||||
await self.sys_config.save_data()
|
||||
|
||||
# Remove: 2022.9
|
||||
if ATTR_ADDONS_REPOSITORIES in body:
|
||||
|
@@ -25,7 +25,7 @@ from ..coresys import CoreSys
|
||||
from ..exceptions import APIError, BackupFileNotFoundError, DockerAPIError, HassioError
|
||||
from ..utils import check_exception_chain, get_message_from_exception_chain
|
||||
from ..utils.json import json_dumps, json_loads as json_loads_util
|
||||
from ..utils.log_format import format_message
|
||||
from ..utils.log_format import async_format_message
|
||||
from . import const
|
||||
|
||||
|
||||
@@ -139,7 +139,7 @@ def api_return_error(
|
||||
if error and not message:
|
||||
message = get_message_from_exception_chain(error)
|
||||
if check_exception_chain(error, DockerAPIError):
|
||||
message = format_message(message)
|
||||
message = async_format_message(message)
|
||||
if not message:
|
||||
message = "Unknown error, see supervisor"
|
||||
|
||||
|
@@ -46,7 +46,7 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _update_cache(self, username: str, password: str) -> None:
|
||||
async def _update_cache(self, username: str, password: str) -> None:
|
||||
"""Cache a username, password."""
|
||||
username_h = self._rehash(username)
|
||||
password_h = self._rehash(password, username)
|
||||
@@ -55,9 +55,9 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
return
|
||||
|
||||
self._data[username_h] = password_h
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
def _dismatch_cache(self, username: str, password: str) -> None:
|
||||
async def _dismatch_cache(self, username: str, password: str) -> None:
|
||||
"""Remove user from cache."""
|
||||
username_h = self._rehash(username)
|
||||
password_h = self._rehash(password, username)
|
||||
@@ -66,7 +66,7 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
return
|
||||
|
||||
self._data.pop(username_h, None)
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
async def check_login(self, addon: Addon, username: str, password: str) -> bool:
|
||||
"""Check username login."""
|
||||
@@ -109,11 +109,11 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
) as req:
|
||||
if req.status == 200:
|
||||
_LOGGER.info("Successful login for '%s'", username)
|
||||
self._update_cache(username, password)
|
||||
await self._update_cache(username, password)
|
||||
return True
|
||||
|
||||
_LOGGER.warning("Unauthorized login for '%s'", username)
|
||||
self._dismatch_cache(username, password)
|
||||
await self._dismatch_cache(username, password)
|
||||
return False
|
||||
except HomeAssistantAPIError:
|
||||
_LOGGER.error("Can't request auth on Home Assistant!")
|
||||
|
@@ -19,7 +19,7 @@ from typing import Any, Self
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
|
||||
from securetar import SecureTarFile, atomic_contents_add, secure_path
|
||||
from securetar import AddFileError, SecureTarFile, atomic_contents_add, secure_path
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
@@ -50,8 +50,10 @@ from ..coresys import CoreSys
|
||||
from ..exceptions import (
|
||||
AddonsError,
|
||||
BackupError,
|
||||
BackupFileExistError,
|
||||
BackupFileNotFoundError,
|
||||
BackupInvalidError,
|
||||
BackupPermissionError,
|
||||
)
|
||||
from ..jobs.const import JOB_GROUP_BACKUP
|
||||
from ..jobs.decorator import Job
|
||||
@@ -457,18 +459,31 @@ class Backup(JobGroup):
|
||||
def _open_outer_tarfile():
|
||||
"""Create and open outer tarfile."""
|
||||
if self.tarfile.is_file():
|
||||
raise BackupError(
|
||||
raise BackupFileExistError(
|
||||
f"Cannot make new backup at {self.tarfile.as_posix()}, file already exists!",
|
||||
_LOGGER.error,
|
||||
)
|
||||
|
||||
outer_secure_tarfile = SecureTarFile(
|
||||
_outer_secure_tarfile = SecureTarFile(
|
||||
self.tarfile,
|
||||
"w",
|
||||
gzip=False,
|
||||
bufsize=BUF_SIZE,
|
||||
)
|
||||
return outer_secure_tarfile, outer_secure_tarfile.open()
|
||||
try:
|
||||
_outer_tarfile = _outer_secure_tarfile.open()
|
||||
except PermissionError as ex:
|
||||
raise BackupPermissionError(
|
||||
f"Cannot open backup file {self.tarfile.as_posix()}, permission error!",
|
||||
_LOGGER.error,
|
||||
) from ex
|
||||
except OSError as ex:
|
||||
raise BackupError(
|
||||
f"Cannot open backup file {self.tarfile.as_posix()} for writing",
|
||||
_LOGGER.error,
|
||||
) from ex
|
||||
|
||||
return _outer_secure_tarfile, _outer_tarfile
|
||||
|
||||
def _close_outer_tarfile() -> int:
|
||||
"""Close outer tarfile."""
|
||||
@@ -726,7 +741,7 @@ class Backup(JobGroup):
|
||||
try:
|
||||
if await self.sys_run_in_executor(_save):
|
||||
self._data[ATTR_FOLDERS].append(name)
|
||||
except (tarfile.TarError, OSError) as err:
|
||||
except (tarfile.TarError, OSError, AddFileError) as err:
|
||||
raise BackupError(
|
||||
f"Can't backup folder {name}: {str(err)}", _LOGGER.error
|
||||
) from err
|
||||
|
@@ -36,7 +36,7 @@ from ..resolution.const import UnhealthyReason
|
||||
from ..utils.common import FileConfiguration
|
||||
from ..utils.dt import utcnow
|
||||
from ..utils.sentinel import DEFAULT
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .backup import Backup
|
||||
from .const import (
|
||||
DEFAULT_FREEZE_TIMEOUT,
|
||||
@@ -525,7 +525,7 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
return None
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.exception("Backup %s error", backup.slug)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
self.sys_jobs.current.capture_error(
|
||||
BackupError(f"Backup {backup.slug} error, see supervisor logs")
|
||||
)
|
||||
@@ -718,7 +718,7 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
raise
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.exception("Restore %s error", backup.slug)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
raise BackupError(
|
||||
f"Restore {backup.slug} error, see supervisor logs"
|
||||
) from err
|
||||
|
@@ -1,12 +1,14 @@
|
||||
"""Bootstrap Supervisor."""
|
||||
|
||||
# ruff: noqa: T100
|
||||
from importlib import import_module
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import signal
|
||||
import warnings
|
||||
|
||||
from colorlog import ColoredFormatter
|
||||
from sentry_sdk import capture_exception
|
||||
|
||||
from .addons.manager import AddonManager
|
||||
from .api import RestAPI
|
||||
@@ -15,13 +17,10 @@ from .auth import Auth
|
||||
from .backups.manager import BackupManager
|
||||
from .bus import Bus
|
||||
from .const import (
|
||||
ATTR_ADDONS_CUSTOM_LIST,
|
||||
ATTR_REPOSITORIES,
|
||||
ENV_HOMEASSISTANT_REPOSITORY,
|
||||
ENV_SUPERVISOR_MACHINE,
|
||||
ENV_SUPERVISOR_NAME,
|
||||
ENV_SUPERVISOR_SHARE,
|
||||
MACHINE_ID,
|
||||
SOCKET_DOCKER,
|
||||
LogLevel,
|
||||
UpdateChannel,
|
||||
@@ -45,7 +44,6 @@ from .resolution.module import ResolutionManager
|
||||
from .security.module import Security
|
||||
from .services import ServiceManager
|
||||
from .store import StoreManager
|
||||
from .store.validate import ensure_builtin_repositories
|
||||
from .supervisor import Supervisor
|
||||
from .updater import Updater
|
||||
from .utils.sentry import init_sentry
|
||||
@@ -55,35 +53,39 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
async def initialize_coresys() -> CoreSys:
|
||||
"""Initialize supervisor coresys/objects."""
|
||||
coresys = CoreSys()
|
||||
coresys = await CoreSys().load_config()
|
||||
|
||||
# Initialize core objects
|
||||
coresys.docker = DockerAPI(coresys)
|
||||
coresys.resolution = ResolutionManager(coresys)
|
||||
coresys.jobs = JobManager(coresys)
|
||||
coresys.docker = await DockerAPI(coresys).load_config()
|
||||
coresys.resolution = await ResolutionManager(coresys).load_config()
|
||||
await coresys.resolution.load_modules()
|
||||
coresys.jobs = await JobManager(coresys).load_config()
|
||||
coresys.core = Core(coresys)
|
||||
coresys.plugins = PluginManager(coresys)
|
||||
coresys.plugins = await PluginManager(coresys).load_config()
|
||||
coresys.arch = CpuArch(coresys)
|
||||
coresys.auth = Auth(coresys)
|
||||
coresys.updater = Updater(coresys)
|
||||
coresys.auth = await Auth(coresys).load_config()
|
||||
coresys.updater = await Updater(coresys).load_config()
|
||||
coresys.api = RestAPI(coresys)
|
||||
coresys.supervisor = Supervisor(coresys)
|
||||
coresys.homeassistant = HomeAssistant(coresys)
|
||||
coresys.addons = AddonManager(coresys)
|
||||
coresys.backups = BackupManager(coresys)
|
||||
coresys.homeassistant = await HomeAssistant(coresys).load_config()
|
||||
coresys.addons = await AddonManager(coresys).load_config()
|
||||
coresys.backups = await BackupManager(coresys).load_config()
|
||||
coresys.host = HostManager(coresys)
|
||||
coresys.hardware = HardwareManager(coresys)
|
||||
coresys.ingress = Ingress(coresys)
|
||||
coresys.ingress = await Ingress(coresys).load_config()
|
||||
coresys.tasks = Tasks(coresys)
|
||||
coresys.services = ServiceManager(coresys)
|
||||
coresys.store = StoreManager(coresys)
|
||||
coresys.discovery = Discovery(coresys)
|
||||
coresys.services = await ServiceManager(coresys).load_config()
|
||||
coresys.store = await StoreManager(coresys).load_config()
|
||||
coresys.discovery = await Discovery(coresys).load_config()
|
||||
coresys.dbus = DBusManager(coresys)
|
||||
coresys.os = OSManager(coresys)
|
||||
coresys.scheduler = Scheduler(coresys)
|
||||
coresys.security = Security(coresys)
|
||||
coresys.security = await Security(coresys).load_config()
|
||||
coresys.bus = Bus(coresys)
|
||||
coresys.mounts = MountManager(coresys)
|
||||
coresys.mounts = await MountManager(coresys).load_config()
|
||||
|
||||
# Set Machine/Host ID
|
||||
await coresys.init_machine()
|
||||
|
||||
# diagnostics
|
||||
if coresys.config.diagnostics:
|
||||
@@ -92,10 +94,6 @@ async def initialize_coresys() -> CoreSys:
|
||||
# bootstrap config
|
||||
initialize_system(coresys)
|
||||
|
||||
# Set Machine/Host ID
|
||||
if MACHINE_ID.exists():
|
||||
coresys.machine_id = MACHINE_ID.read_text(encoding="utf-8").strip()
|
||||
|
||||
# Check if ENV is in development mode
|
||||
if coresys.dev:
|
||||
_LOGGER.warning("Environment variable 'SUPERVISOR_DEV' is set")
|
||||
@@ -109,16 +107,6 @@ async def initialize_coresys() -> CoreSys:
|
||||
# Convert datetime
|
||||
logging.Formatter.converter = lambda *args: coresys.now().timetuple()
|
||||
|
||||
# Set machine type
|
||||
if os.environ.get(ENV_SUPERVISOR_MACHINE):
|
||||
coresys.machine = os.environ[ENV_SUPERVISOR_MACHINE]
|
||||
elif os.environ.get(ENV_HOMEASSISTANT_REPOSITORY):
|
||||
coresys.machine = os.environ[ENV_HOMEASSISTANT_REPOSITORY][14:-14]
|
||||
_LOGGER.warning(
|
||||
"Missing SUPERVISOR_MACHINE environment variable. Fallback to deprecated extraction!"
|
||||
)
|
||||
_LOGGER.info("Setting up coresys for machine: %s", coresys.machine)
|
||||
|
||||
return coresys
|
||||
|
||||
|
||||
@@ -237,27 +225,11 @@ def initialize_system(coresys: CoreSys) -> None:
|
||||
config.path_addon_configs.mkdir()
|
||||
|
||||
|
||||
def migrate_system_env(coresys: CoreSys) -> None:
|
||||
"""Cleanup some stuff after update."""
|
||||
config = coresys.config
|
||||
|
||||
# hass.io 0.37 -> 0.38
|
||||
old_build = Path(config.path_supervisor, "addons/build")
|
||||
if old_build.is_dir():
|
||||
try:
|
||||
old_build.rmdir()
|
||||
except OSError:
|
||||
_LOGGER.error("Can't cleanup old Add-on build directory at '%s'", old_build)
|
||||
|
||||
# Supervisor 2022.5 -> 2022.6. Can be removed after 2022.9
|
||||
# pylint: disable=protected-access
|
||||
if len(coresys.config.addons_repositories) > 0:
|
||||
coresys.store._data[ATTR_REPOSITORIES] = ensure_builtin_repositories(
|
||||
coresys.config.addons_repositories
|
||||
)
|
||||
coresys.config._data[ATTR_ADDONS_CUSTOM_LIST] = []
|
||||
coresys.store.save_data()
|
||||
coresys.config.save_data()
|
||||
def warning_handler(message, category, filename, lineno, file=None, line=None):
|
||||
"""Warning handler which logs warnings using the logging module."""
|
||||
_LOGGER.warning("%s:%s: %s: %s", filename, lineno, category.__name__, message)
|
||||
if isinstance(message, Exception):
|
||||
capture_exception(message)
|
||||
|
||||
|
||||
def initialize_logging() -> None:
|
||||
@@ -286,6 +258,7 @@ def initialize_logging() -> None:
|
||||
},
|
||||
)
|
||||
)
|
||||
warnings.showwarning = warning_handler
|
||||
|
||||
|
||||
def check_environment() -> None:
|
||||
@@ -334,12 +307,12 @@ def reg_signal(loop, coresys: CoreSys) -> None:
|
||||
_LOGGER.warning("Could not bind to SIGINT")
|
||||
|
||||
|
||||
def supervisor_debugger(coresys: CoreSys) -> None:
|
||||
async def supervisor_debugger(coresys: CoreSys) -> None:
|
||||
"""Start debugger if needed."""
|
||||
if not coresys.config.debug:
|
||||
return
|
||||
# pylint: disable=import-outside-toplevel
|
||||
import debugpy
|
||||
|
||||
debugpy = await coresys.run_in_executor(import_module, "debugpy")
|
||||
|
||||
_LOGGER.info("Initializing Supervisor debugger")
|
||||
|
||||
|
@@ -26,7 +26,7 @@ from .exceptions import (
|
||||
from .homeassistant.core import LANDINGPAGE
|
||||
from .resolution.const import ContextType, IssueType, SuggestionType, UnhealthyReason
|
||||
from .utils.dt import utcnow
|
||||
from .utils.sentry import capture_exception
|
||||
from .utils.sentry import async_capture_exception
|
||||
from .utils.whoami import WhoamiData, retrieve_whoami
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@@ -38,7 +38,8 @@ class Core(CoreSysAttributes):
|
||||
def __init__(self, coresys: CoreSys):
|
||||
"""Initialize Supervisor object."""
|
||||
self.coresys: CoreSys = coresys
|
||||
self._state: CoreState | None = None
|
||||
self._state: CoreState = CoreState.INITIALIZE
|
||||
self._write_run_state(self._state)
|
||||
self.exit_code: int = 0
|
||||
|
||||
@property
|
||||
@@ -56,34 +57,36 @@ class Core(CoreSysAttributes):
|
||||
"""Return true if the installation is healthy."""
|
||||
return len(self.sys_resolution.unhealthy) == 0
|
||||
|
||||
def _write_run_state(self, new_state: CoreState):
|
||||
"""Write run state for s6 service supervisor."""
|
||||
try:
|
||||
RUN_SUPERVISOR_STATE.write_text(str(new_state), encoding="utf-8")
|
||||
except OSError as err:
|
||||
_LOGGER.warning(
|
||||
"Can't update the Supervisor state to %s: %s", new_state, err
|
||||
)
|
||||
|
||||
@state.setter
|
||||
def state(self, new_state: CoreState) -> None:
|
||||
"""Set core into new state."""
|
||||
if self._state == new_state:
|
||||
return
|
||||
try:
|
||||
RUN_SUPERVISOR_STATE.write_text(new_state, encoding="utf-8")
|
||||
except OSError as err:
|
||||
_LOGGER.warning(
|
||||
"Can't update the Supervisor state to %s: %s", new_state, err
|
||||
)
|
||||
finally:
|
||||
self._state = new_state
|
||||
|
||||
# Don't attempt to notify anyone on CLOSE as we're about to stop the event loop
|
||||
if new_state != CoreState.CLOSE:
|
||||
self.sys_bus.fire_event(BusEvent.SUPERVISOR_STATE_CHANGE, new_state)
|
||||
self._write_run_state(new_state)
|
||||
self._state = new_state
|
||||
|
||||
# These will be received by HA after startup has completed which won't make sense
|
||||
if new_state not in STARTING_STATES:
|
||||
self.sys_homeassistant.websocket.supervisor_update_event(
|
||||
"info", {"state": new_state}
|
||||
)
|
||||
# Don't attempt to notify anyone on CLOSE as we're about to stop the event loop
|
||||
if new_state != CoreState.CLOSE:
|
||||
self.sys_bus.fire_event(BusEvent.SUPERVISOR_STATE_CHANGE, new_state)
|
||||
|
||||
# These will be received by HA after startup has completed which won't make sense
|
||||
if new_state not in STARTING_STATES:
|
||||
self.sys_homeassistant.websocket.supervisor_update_event(
|
||||
"info", {"state": new_state}
|
||||
)
|
||||
|
||||
async def connect(self):
|
||||
"""Connect Supervisor container."""
|
||||
self.state = CoreState.INITIALIZE
|
||||
|
||||
# Load information from container
|
||||
await self.sys_supervisor.load()
|
||||
|
||||
@@ -109,7 +112,7 @@ class Core(CoreSysAttributes):
|
||||
|
||||
# Fix wrong version in config / avoid boot loop on OS
|
||||
self.sys_config.version = self.sys_supervisor.version
|
||||
self.sys_config.save_data()
|
||||
await self.sys_config.save_data()
|
||||
|
||||
async def setup(self):
|
||||
"""Start setting up supervisor orchestration."""
|
||||
@@ -169,7 +172,7 @@ class Core(CoreSysAttributes):
|
||||
"Fatal error happening on load Task %s: %s", setup_task, err
|
||||
)
|
||||
self.sys_resolution.unhealthy = UnhealthyReason.SETUP
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
# Set OS Agent diagnostics if needed
|
||||
if (
|
||||
@@ -186,7 +189,7 @@ class Core(CoreSysAttributes):
|
||||
self.sys_config.diagnostics,
|
||||
err,
|
||||
)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
# Evaluate the system
|
||||
await self.sys_resolution.evaluate.evaluate_system()
|
||||
@@ -219,13 +222,13 @@ class Core(CoreSysAttributes):
|
||||
await self.sys_addons.boot(AddonStartup.INITIALIZE)
|
||||
|
||||
try:
|
||||
# HomeAssistant is already running / supervisor have only reboot
|
||||
# HomeAssistant is already running, only Supervisor restarted
|
||||
if self.sys_hardware.helper.last_boot == self.sys_config.last_boot:
|
||||
_LOGGER.info("Supervisor reboot detected")
|
||||
_LOGGER.info("Detected Supervisor restart")
|
||||
return
|
||||
|
||||
# reset register services / discovery
|
||||
self.sys_services.reset()
|
||||
await self.sys_services.reset()
|
||||
|
||||
# start addon mark as system
|
||||
await self.sys_addons.boot(AddonStartup.SYSTEM)
|
||||
@@ -243,12 +246,12 @@ class Core(CoreSysAttributes):
|
||||
await self.sys_homeassistant.core.start()
|
||||
except HomeAssistantCrashError as err:
|
||||
_LOGGER.error("Can't start Home Assistant Core - rebuiling")
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
with suppress(HomeAssistantError):
|
||||
await self.sys_homeassistant.core.rebuild()
|
||||
except HomeAssistantError as err:
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
else:
|
||||
_LOGGER.info("Skipping start of Home Assistant")
|
||||
|
||||
@@ -264,7 +267,7 @@ class Core(CoreSysAttributes):
|
||||
await self.sys_addons.boot(AddonStartup.APPLICATION)
|
||||
|
||||
# store new last boot
|
||||
self._update_last_boot()
|
||||
await self._update_last_boot()
|
||||
|
||||
finally:
|
||||
# Add core tasks into scheduler
|
||||
@@ -289,7 +292,7 @@ class Core(CoreSysAttributes):
|
||||
"""Stop a running orchestration."""
|
||||
# store new last boot / prevent time adjustments
|
||||
if self.state in (CoreState.RUNNING, CoreState.SHUTDOWN):
|
||||
self._update_last_boot()
|
||||
await self._update_last_boot()
|
||||
if self.state in (CoreState.STOPPING, CoreState.CLOSE):
|
||||
return
|
||||
|
||||
@@ -357,10 +360,10 @@ class Core(CoreSysAttributes):
|
||||
if self.state in (CoreState.STOPPING, CoreState.SHUTDOWN):
|
||||
await self.sys_plugins.shutdown()
|
||||
|
||||
def _update_last_boot(self):
|
||||
async def _update_last_boot(self):
|
||||
"""Update last boot time."""
|
||||
self.sys_config.last_boot = self.sys_hardware.helper.last_boot
|
||||
self.sys_config.save_data()
|
||||
await self.sys_config.save_data()
|
||||
|
||||
async def _retrieve_whoami(self, with_ssl: bool) -> WhoamiData | None:
|
||||
try:
|
||||
|
@@ -10,12 +10,18 @@ from functools import partial
|
||||
import logging
|
||||
import os
|
||||
from types import MappingProxyType
|
||||
from typing import TYPE_CHECKING, Any, TypeVar
|
||||
from typing import TYPE_CHECKING, Any, Self, TypeVar
|
||||
|
||||
import aiohttp
|
||||
|
||||
from .config import CoreConfig
|
||||
from .const import ENV_SUPERVISOR_DEV, SERVER_SOFTWARE
|
||||
from .const import (
|
||||
ENV_HOMEASSISTANT_REPOSITORY,
|
||||
ENV_SUPERVISOR_DEV,
|
||||
ENV_SUPERVISOR_MACHINE,
|
||||
MACHINE_ID,
|
||||
SERVER_SOFTWARE,
|
||||
)
|
||||
from .utils.dt import UTC, get_time_zone
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -102,6 +108,31 @@ class CoreSys:
|
||||
# Task factory attributes
|
||||
self._set_task_context: list[Callable[[Context], Context]] = []
|
||||
|
||||
async def load_config(self) -> Self:
|
||||
"""Load config in executor."""
|
||||
await self.config.read_data()
|
||||
return self
|
||||
|
||||
async def init_machine(self):
|
||||
"""Initialize machine information."""
|
||||
|
||||
def _load_machine_id() -> str | None:
|
||||
if MACHINE_ID.exists():
|
||||
return MACHINE_ID.read_text(encoding="utf-8").strip()
|
||||
return None
|
||||
|
||||
self.machine_id = await self.run_in_executor(_load_machine_id)
|
||||
|
||||
# Set machine type
|
||||
if os.environ.get(ENV_SUPERVISOR_MACHINE):
|
||||
self.machine = os.environ[ENV_SUPERVISOR_MACHINE]
|
||||
elif os.environ.get(ENV_HOMEASSISTANT_REPOSITORY):
|
||||
self.machine = os.environ[ENV_HOMEASSISTANT_REPOSITORY][14:-14]
|
||||
_LOGGER.warning(
|
||||
"Missing SUPERVISOR_MACHINE environment variable. Fallback to deprecated extraction!"
|
||||
)
|
||||
_LOGGER.info("Setting up coresys for machine: %s", self.machine)
|
||||
|
||||
@property
|
||||
def dev(self) -> bool:
|
||||
"""Return True if we run dev mode."""
|
||||
|
@@ -70,11 +70,11 @@ class BoardManager(DBusInterfaceProxy):
|
||||
await super().connect(bus)
|
||||
|
||||
if self.board == BOARD_NAME_YELLOW:
|
||||
self._board_proxy = Yellow()
|
||||
self._board_proxy = await Yellow().load_config()
|
||||
elif self.board == BOARD_NAME_GREEN:
|
||||
self._board_proxy = Green()
|
||||
self._board_proxy = await Green().load_config()
|
||||
elif self.board == BOARD_NAME_SUPERVISED:
|
||||
self._board_proxy = Supervised()
|
||||
self._board_proxy = await Supervised().load_config()
|
||||
else:
|
||||
return
|
||||
|
||||
|
@@ -15,7 +15,7 @@ from ...exceptions import (
|
||||
HostNotSupportedError,
|
||||
NetworkInterfaceNotFound,
|
||||
)
|
||||
from ...utils.sentry import capture_exception
|
||||
from ...utils.sentry import async_capture_exception
|
||||
from ..const import (
|
||||
DBUS_ATTR_CONNECTION_ENABLED,
|
||||
DBUS_ATTR_DEVICES,
|
||||
@@ -223,13 +223,13 @@ class NetworkManager(DBusInterfaceProxy):
|
||||
device,
|
||||
err,
|
||||
)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
return
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.exception(
|
||||
"Unkown error while processing %s: %s", device, err
|
||||
)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
continue
|
||||
|
||||
# Skeep interface
|
||||
|
@@ -66,8 +66,8 @@ class UDisks2Manager(DBusInterfaceProxy):
|
||||
try:
|
||||
await super().connect(bus)
|
||||
await self.udisks2_object_manager.connect(bus)
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to udisks2")
|
||||
except DBusError as err:
|
||||
_LOGGER.critical("Can't connect to udisks2: %s", err)
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning(
|
||||
"No udisks2 support on the host. Host control has been disabled."
|
||||
|
@@ -53,7 +53,7 @@ class Discovery(CoreSysAttributes, FileConfiguration):
|
||||
_LOGGER.info("Loaded %d messages", len(messages))
|
||||
self.message_obj = messages
|
||||
|
||||
def save(self) -> None:
|
||||
async def save(self) -> None:
|
||||
"""Write discovery message into data file."""
|
||||
messages: list[dict[str, Any]] = []
|
||||
for message in self.list_messages:
|
||||
@@ -61,7 +61,7 @@ class Discovery(CoreSysAttributes, FileConfiguration):
|
||||
|
||||
self._data[ATTR_DISCOVERY].clear()
|
||||
self._data[ATTR_DISCOVERY].extend(messages)
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
def get(self, uuid: str) -> Message | None:
|
||||
"""Return discovery message."""
|
||||
@@ -72,7 +72,7 @@ class Discovery(CoreSysAttributes, FileConfiguration):
|
||||
"""Return list of available discovery messages."""
|
||||
return list(self.message_obj.values())
|
||||
|
||||
def send(self, addon: Addon, service: str, config: dict[str, Any]) -> Message:
|
||||
async def send(self, addon: Addon, service: str, config: dict[str, Any]) -> Message:
|
||||
"""Send a discovery message to Home Assistant."""
|
||||
# Create message
|
||||
message = Message(addon.slug, service, config)
|
||||
@@ -93,15 +93,15 @@ class Discovery(CoreSysAttributes, FileConfiguration):
|
||||
"Sending discovery to Home Assistant %s from %s", service, addon.slug
|
||||
)
|
||||
self.message_obj[message.uuid] = message
|
||||
self.save()
|
||||
await self.save()
|
||||
|
||||
self.sys_create_task(self._push_discovery(message, CMD_NEW))
|
||||
return message
|
||||
|
||||
def remove(self, message: Message) -> None:
|
||||
async def remove(self, message: Message) -> None:
|
||||
"""Remove a discovery message from Home Assistant."""
|
||||
self.message_obj.pop(message.uuid, None)
|
||||
self.save()
|
||||
await self.save()
|
||||
|
||||
_LOGGER.info(
|
||||
"Delete discovery to Home Assistant %s from %s",
|
||||
|
@@ -42,7 +42,7 @@ from ..hardware.data import Device
|
||||
from ..jobs.const import JobCondition, JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..resolution.const import CGROUP_V2_VERSION, ContextType, IssueType, SuggestionType
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import (
|
||||
ENV_TIME,
|
||||
ENV_TOKEN,
|
||||
@@ -606,7 +606,7 @@ class DockerAddon(DockerInterface):
|
||||
)
|
||||
except CoreDNSError as err:
|
||||
_LOGGER.warning("Can't update DNS for %s", self.name)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
# Hardware Access
|
||||
if self.addon.static_devices:
|
||||
@@ -664,7 +664,7 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
async def _build(self, version: AwesomeVersion, image: str | None = None) -> None:
|
||||
"""Build a Docker container."""
|
||||
build_env = AddonBuild(self.coresys, self.addon)
|
||||
build_env = await AddonBuild(self.coresys, self.addon).load_config()
|
||||
if not build_env.is_valid:
|
||||
_LOGGER.error("Invalid build environment, can't build this add-on!")
|
||||
raise DockerError()
|
||||
@@ -787,7 +787,7 @@ class DockerAddon(DockerInterface):
|
||||
await self.sys_plugins.dns.delete_host(self.addon.hostname)
|
||||
except CoreDNSError as err:
|
||||
_LOGGER.warning("Can't update DNS for %s", self.name)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
# Hardware
|
||||
if self._hw_listener:
|
||||
|
@@ -42,7 +42,7 @@ from ..jobs.const import JOB_GROUP_DOCKER_INTERFACE, JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..jobs.job_group import JobGroup
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import ContainerState, RestartPolicy
|
||||
from .manager import CommandReturn
|
||||
from .monitor import DockerContainerStateEvent
|
||||
@@ -278,7 +278,7 @@ class DockerInterface(JobGroup):
|
||||
f"Can't install {image}:{version!s}: {err}", _LOGGER.error
|
||||
) from err
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
raise DockerError(
|
||||
f"Unknown error with {image}:{version!s} -> {err!s}", _LOGGER.error
|
||||
) from err
|
||||
@@ -394,7 +394,7 @@ class DockerInterface(JobGroup):
|
||||
)
|
||||
except DockerNotFound as err:
|
||||
# If image is missing, capture the exception as this shouldn't happen
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
raise
|
||||
|
||||
# Store metadata
|
||||
|
@@ -5,7 +5,7 @@ from ipaddress import IPv4Address
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Final
|
||||
from typing import Any, Final, Self
|
||||
|
||||
import attr
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
@@ -113,6 +113,11 @@ class DockerAPI:
|
||||
self.config: DockerConfig = DockerConfig()
|
||||
self._monitor: DockerMonitor = DockerMonitor(coresys)
|
||||
|
||||
async def load_config(self) -> Self:
|
||||
"""Load config in executor."""
|
||||
await self.config.read_data()
|
||||
return self
|
||||
|
||||
@property
|
||||
def images(self) -> ImageCollection:
|
||||
"""Return API images."""
|
||||
|
@@ -403,7 +403,11 @@ class DBusParseError(DBusError):
|
||||
|
||||
|
||||
class DBusTimeoutError(DBusError):
|
||||
"""D-Bus call timed out."""
|
||||
"""D-Bus call timeout."""
|
||||
|
||||
|
||||
class DBusTimedOutError(DBusError):
|
||||
"""D-Bus call timed out (typically when systemd D-Bus service activation fail)."""
|
||||
|
||||
|
||||
class DBusNoReplyError(DBusError):
|
||||
@@ -663,6 +667,14 @@ class BackupFileNotFoundError(BackupError):
|
||||
"""Raise if the backup file hasn't been found."""
|
||||
|
||||
|
||||
class BackupPermissionError(BackupError):
|
||||
"""Raise if we could not write the backup due to permission error."""
|
||||
|
||||
|
||||
class BackupFileExistError(BackupError):
|
||||
"""Raise if the backup file already exists."""
|
||||
|
||||
|
||||
# Security
|
||||
|
||||
|
||||
|
@@ -49,17 +49,26 @@ class HwDisk(CoreSysAttributes):
|
||||
return False
|
||||
|
||||
def get_disk_total_space(self, path: str | Path) -> float:
|
||||
"""Return total space (GiB) on disk for path."""
|
||||
"""Return total space (GiB) on disk for path.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
total, _, _ = shutil.disk_usage(path)
|
||||
return round(total / (1024.0**3), 1)
|
||||
|
||||
def get_disk_used_space(self, path: str | Path) -> float:
|
||||
"""Return used space (GiB) on disk for path."""
|
||||
"""Return used space (GiB) on disk for path.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
_, used, _ = shutil.disk_usage(path)
|
||||
return round(used / (1024.0**3), 1)
|
||||
|
||||
def get_disk_free_space(self, path: str | Path) -> float:
|
||||
"""Return free space (GiB) on disk for path."""
|
||||
"""Return free space (GiB) on disk for path.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
_, _, free = shutil.disk_usage(path)
|
||||
return round(free / (1024.0**3), 1)
|
||||
|
||||
@@ -113,7 +122,10 @@ class HwDisk(CoreSysAttributes):
|
||||
return life_time_value * 10.0
|
||||
|
||||
def get_disk_life_time(self, path: str | Path) -> float:
|
||||
"""Return life time estimate of the underlying SSD drive."""
|
||||
"""Return life time estimate of the underlying SSD drive.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
mount_source = self._get_mount_source(str(path))
|
||||
if mount_source == "overlay":
|
||||
return None
|
||||
|
@@ -33,7 +33,7 @@ from ..jobs.decorator import Job, JobCondition
|
||||
from ..jobs.job_group import JobGroup
|
||||
from ..resolution.const import ContextType, IssueType
|
||||
from ..utils import convert_to_ascii
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import (
|
||||
LANDINGPAGE,
|
||||
SAFE_MODE_FILENAME,
|
||||
@@ -110,7 +110,7 @@ class HomeAssistantCore(JobGroup):
|
||||
else:
|
||||
self.sys_homeassistant.version = self.instance.version
|
||||
self.sys_homeassistant.image = self.instance.image
|
||||
self.sys_homeassistant.save_data()
|
||||
await self.sys_homeassistant.save_data()
|
||||
|
||||
# Start landingpage
|
||||
if self.instance.version != LANDINGPAGE:
|
||||
@@ -139,7 +139,7 @@ class HomeAssistantCore(JobGroup):
|
||||
_LOGGER.info("Using preinstalled landingpage")
|
||||
self.sys_homeassistant.version = LANDINGPAGE
|
||||
self.sys_homeassistant.image = self.instance.image
|
||||
self.sys_homeassistant.save_data()
|
||||
await self.sys_homeassistant.save_data()
|
||||
return
|
||||
|
||||
_LOGGER.info("Setting up Home Assistant landingpage")
|
||||
@@ -160,14 +160,14 @@ class HomeAssistantCore(JobGroup):
|
||||
except (DockerError, JobException):
|
||||
pass
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
_LOGGER.warning("Failed to install landingpage, retrying after 30sec")
|
||||
await asyncio.sleep(30)
|
||||
|
||||
self.sys_homeassistant.version = LANDINGPAGE
|
||||
self.sys_homeassistant.image = self.sys_updater.image_homeassistant
|
||||
self.sys_homeassistant.save_data()
|
||||
await self.sys_homeassistant.save_data()
|
||||
|
||||
@Job(
|
||||
name="home_assistant_core_install",
|
||||
@@ -192,7 +192,7 @@ class HomeAssistantCore(JobGroup):
|
||||
except (DockerError, JobException):
|
||||
pass
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
_LOGGER.warning("Error on Home Assistant installation. Retrying in 30sec")
|
||||
await asyncio.sleep(30)
|
||||
@@ -200,7 +200,7 @@ class HomeAssistantCore(JobGroup):
|
||||
_LOGGER.info("Home Assistant docker now installed")
|
||||
self.sys_homeassistant.version = self.instance.version
|
||||
self.sys_homeassistant.image = self.sys_updater.image_homeassistant
|
||||
self.sys_homeassistant.save_data()
|
||||
await self.sys_homeassistant.save_data()
|
||||
|
||||
# finishing
|
||||
try:
|
||||
@@ -270,7 +270,7 @@ class HomeAssistantCore(JobGroup):
|
||||
_LOGGER.info("Successfully started Home Assistant %s", to_version)
|
||||
|
||||
# Successfull - last step
|
||||
self.sys_homeassistant.save_data()
|
||||
await self.sys_homeassistant.save_data()
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
@@ -339,7 +339,7 @@ class HomeAssistantCore(JobGroup):
|
||||
else:
|
||||
# Create new API token
|
||||
self.sys_homeassistant.supervisor_token = secrets.token_hex(56)
|
||||
self.sys_homeassistant.save_data()
|
||||
await self.sys_homeassistant.save_data()
|
||||
|
||||
# Write audio settings
|
||||
self.sys_homeassistant.write_pulse()
|
||||
@@ -557,7 +557,7 @@ class HomeAssistantCore(JobGroup):
|
||||
try:
|
||||
await self.start()
|
||||
except HomeAssistantError as err:
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
else:
|
||||
break
|
||||
|
||||
@@ -569,7 +569,7 @@ class HomeAssistantCore(JobGroup):
|
||||
except HomeAssistantError as err:
|
||||
attempts = attempts + 1
|
||||
_LOGGER.error("Watchdog restart of Home Assistant failed!")
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
else:
|
||||
break
|
||||
|
||||
|
@@ -13,7 +13,7 @@ from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionException
|
||||
from securetar import atomic_contents_add, secure_path
|
||||
from securetar import AddFileError, atomic_contents_add, secure_path
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
@@ -435,7 +435,7 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
file_filter=_is_excluded_by_filter,
|
||||
arcname="data",
|
||||
)
|
||||
except (tarfile.TarError, OSError) as err:
|
||||
except (tarfile.TarError, OSError, AddFileError) as err:
|
||||
raise HomeAssistantBackupError(
|
||||
f"Can't backup Home Assistant Core config folder: {str(err)}",
|
||||
_LOGGER.error,
|
||||
|
@@ -71,7 +71,9 @@ class AppArmorControl(CoreSysAttributes):
|
||||
|
||||
async def load_profile(self, profile_name: str, profile_file: Path) -> None:
|
||||
"""Load/Update a new/exists profile into AppArmor."""
|
||||
if not validate_profile(profile_name, profile_file):
|
||||
if not await self.sys_run_in_executor(
|
||||
validate_profile, profile_name, profile_file
|
||||
):
|
||||
raise HostAppArmorError(
|
||||
f"AppArmor profile '{profile_name}' is not valid", _LOGGER.error
|
||||
)
|
||||
|
@@ -102,39 +102,39 @@ class InfoCenter(CoreSysAttributes):
|
||||
"""Return the boot timestamp."""
|
||||
return self.sys_dbus.systemd.boot_timestamp
|
||||
|
||||
@property
|
||||
def total_space(self) -> float:
|
||||
"""Return total space (GiB) on disk for supervisor data directory."""
|
||||
return self.sys_hardware.disk.get_disk_total_space(
|
||||
self.coresys.config.path_supervisor
|
||||
)
|
||||
|
||||
@property
|
||||
def used_space(self) -> float:
|
||||
"""Return used space (GiB) on disk for supervisor data directory."""
|
||||
return self.sys_hardware.disk.get_disk_used_space(
|
||||
self.coresys.config.path_supervisor
|
||||
)
|
||||
|
||||
@property
|
||||
def free_space(self) -> float:
|
||||
"""Return available space (GiB) on disk for supervisor data directory."""
|
||||
return self.sys_hardware.disk.get_disk_free_space(
|
||||
self.coresys.config.path_supervisor
|
||||
)
|
||||
|
||||
@property
|
||||
def disk_life_time(self) -> float:
|
||||
"""Return the estimated life-time usage (in %) of the SSD storing the data directory."""
|
||||
return self.sys_hardware.disk.get_disk_life_time(
|
||||
self.coresys.config.path_supervisor
|
||||
)
|
||||
|
||||
@property
|
||||
def virtualization(self) -> str | None:
|
||||
"""Return virtualization hypervisor being used."""
|
||||
return self.sys_dbus.systemd.virtualization
|
||||
|
||||
async def total_space(self) -> float:
|
||||
"""Return total space (GiB) on disk for supervisor data directory."""
|
||||
return await self.sys_run_in_executor(
|
||||
self.sys_hardware.disk.get_disk_total_space,
|
||||
self.coresys.config.path_supervisor,
|
||||
)
|
||||
|
||||
async def used_space(self) -> float:
|
||||
"""Return used space (GiB) on disk for supervisor data directory."""
|
||||
return await self.sys_run_in_executor(
|
||||
self.sys_hardware.disk.get_disk_used_space,
|
||||
self.coresys.config.path_supervisor,
|
||||
)
|
||||
|
||||
async def free_space(self) -> float:
|
||||
"""Return available space (GiB) on disk for supervisor data directory."""
|
||||
return await self.sys_run_in_executor(
|
||||
self.sys_hardware.disk.get_disk_free_space,
|
||||
self.coresys.config.path_supervisor,
|
||||
)
|
||||
|
||||
async def disk_life_time(self) -> float:
|
||||
"""Return the estimated life-time usage (in %) of the SSD storing the data directory."""
|
||||
return await self.sys_run_in_executor(
|
||||
self.sys_hardware.disk.get_disk_life_time,
|
||||
self.coresys.config.path_supervisor,
|
||||
)
|
||||
|
||||
async def get_dmesg(self) -> bytes:
|
||||
"""Return host dmesg output."""
|
||||
proc = await asyncio.create_subprocess_shell(
|
||||
|
@@ -82,7 +82,7 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
||||
|
||||
async def unload(self) -> None:
|
||||
"""Shutdown sessions."""
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
def _cleanup_sessions(self) -> None:
|
||||
"""Remove not used sessions."""
|
||||
@@ -170,16 +170,16 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
||||
|
||||
# Save port for next time
|
||||
self.ports[addon_slug] = port
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
return port
|
||||
|
||||
def del_dynamic_port(self, addon_slug: str) -> None:
|
||||
async def del_dynamic_port(self, addon_slug: str) -> None:
|
||||
"""Remove a previously assigned dynamic port."""
|
||||
if addon_slug not in self.ports:
|
||||
return
|
||||
|
||||
del self.ports[addon_slug]
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
async def update_hass_panel(self, addon: Addon):
|
||||
"""Return True if Home Assistant up and running."""
|
||||
|
@@ -20,7 +20,6 @@ from ..exceptions import HassioError, JobNotFound, JobStartException
|
||||
from ..homeassistant.const import WSEvent
|
||||
from ..utils.common import FileConfiguration
|
||||
from ..utils.dt import utcnow
|
||||
from ..utils.sentry import capture_exception
|
||||
from .const import ATTR_IGNORE_CONDITIONS, FILE_CONFIG_JOBS, JobCondition
|
||||
from .validate import SCHEMA_JOBS_CONFIG
|
||||
|
||||
@@ -191,9 +190,10 @@ class JobManager(FileConfiguration, CoreSysAttributes):
|
||||
"""
|
||||
try:
|
||||
return self.get_job(_CURRENT_JOB.get())
|
||||
except (LookupError, JobNotFound) as err:
|
||||
capture_exception(err)
|
||||
raise RuntimeError("No job for the current asyncio task!") from None
|
||||
except (LookupError, JobNotFound):
|
||||
raise RuntimeError(
|
||||
"No job for the current asyncio task!", _LOGGER.critical
|
||||
) from None
|
||||
|
||||
@property
|
||||
def is_job(self) -> bool:
|
||||
|
@@ -18,7 +18,7 @@ from ..exceptions import (
|
||||
)
|
||||
from ..host.const import HostFeature
|
||||
from ..resolution.const import MINIMUM_FREE_SPACE_THRESHOLD, ContextType, IssueType
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from . import SupervisorJob
|
||||
from .const import JobCondition, JobExecutionLimit
|
||||
from .job_group import JobGroup
|
||||
@@ -313,7 +313,7 @@ class Job(CoreSysAttributes):
|
||||
except Exception as err:
|
||||
_LOGGER.exception("Unhandled exception: %s", err)
|
||||
job.capture_error()
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
raise JobException() from err
|
||||
finally:
|
||||
self._release_exception_limits()
|
||||
@@ -373,13 +373,14 @@ class Job(CoreSysAttributes):
|
||||
|
||||
if (
|
||||
JobCondition.FREE_SPACE in used_conditions
|
||||
and coresys.sys_host.info.free_space < MINIMUM_FREE_SPACE_THRESHOLD
|
||||
and (free_space := await coresys.sys_host.info.free_space())
|
||||
< MINIMUM_FREE_SPACE_THRESHOLD
|
||||
):
|
||||
coresys.sys_resolution.create_issue(
|
||||
IssueType.FREE_SPACE, ContextType.SYSTEM
|
||||
)
|
||||
raise JobConditionException(
|
||||
f"'{method_name}' blocked from execution, not enough free space ({coresys.sys_host.info.free_space}GB) left on the device"
|
||||
f"'{method_name}' blocked from execution, not enough free space ({free_space}GB) left on the device"
|
||||
)
|
||||
|
||||
if JobCondition.INTERNET_SYSTEM in used_conditions:
|
||||
|
@@ -1,25 +1,42 @@
|
||||
"""Filter tools."""
|
||||
|
||||
import ipaddress
|
||||
import os
|
||||
import re
|
||||
|
||||
from aiohttp import hdrs
|
||||
import attr
|
||||
|
||||
from ..const import HEADER_TOKEN, HEADER_TOKEN_OLD, CoreState
|
||||
from ..const import DOCKER_NETWORK_MASK, HEADER_TOKEN, HEADER_TOKEN_OLD, CoreState
|
||||
from ..coresys import CoreSys
|
||||
from ..exceptions import AddonConfigurationError
|
||||
|
||||
RE_URL: re.Pattern = re.compile(r"(\w+:\/\/)(.*\.\w+)(.*)")
|
||||
|
||||
|
||||
def sanitize_host(host: str) -> str:
|
||||
"""Return a sanitized host."""
|
||||
try:
|
||||
# Allow internal URLs
|
||||
ip = ipaddress.ip_address(host)
|
||||
if ip in ipaddress.ip_network(DOCKER_NETWORK_MASK):
|
||||
return host
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return "sanitized-host.invalid"
|
||||
|
||||
|
||||
def sanitize_url(url: str) -> str:
|
||||
"""Return a sanitized url."""
|
||||
if not re.match(RE_URL, url):
|
||||
match = re.match(RE_URL, url)
|
||||
if not match:
|
||||
# Not a URL, just return it back
|
||||
return url
|
||||
|
||||
return re.sub(RE_URL, r"\1example.com\3", url)
|
||||
host = sanitize_host(match.group(2))
|
||||
|
||||
return f"{match.group(1)}{host}{match.group(3)}"
|
||||
|
||||
|
||||
def filter_data(coresys: CoreSys, event: dict, hint: dict) -> dict:
|
||||
@@ -35,6 +52,12 @@ def filter_data(coresys: CoreSys, event: dict, hint: dict) -> dict:
|
||||
return None
|
||||
|
||||
event.setdefault("extra", {}).update({"os.environ": dict(os.environ)})
|
||||
event.setdefault("user", {}).update({"id": coresys.machine_id})
|
||||
event.setdefault("tags", {}).update(
|
||||
{
|
||||
"machine": coresys.machine,
|
||||
}
|
||||
)
|
||||
|
||||
# Not full startup - missing information
|
||||
if coresys.core.state in (CoreState.INITIALIZE, CoreState.SETUP):
|
||||
@@ -47,7 +70,6 @@ def filter_data(coresys: CoreSys, event: dict, hint: dict) -> dict:
|
||||
]
|
||||
|
||||
# Update information
|
||||
event.setdefault("user", {}).update({"id": coresys.machine_id})
|
||||
event.setdefault("contexts", {}).update(
|
||||
{
|
||||
"supervisor": {
|
||||
@@ -58,7 +80,9 @@ def filter_data(coresys: CoreSys, event: dict, hint: dict) -> dict:
|
||||
"arch": coresys.arch.default,
|
||||
"board": coresys.os.board,
|
||||
"deployment": coresys.host.info.deployment,
|
||||
"disk_free_space": coresys.host.info.free_space,
|
||||
"disk_free_space": coresys.hardware.disk.get_disk_free_space(
|
||||
coresys.config.path_supervisor
|
||||
),
|
||||
"host": coresys.host.info.operating_system,
|
||||
"kernel": coresys.host.info.kernel,
|
||||
"machine": coresys.machine,
|
||||
@@ -92,35 +116,28 @@ def filter_data(coresys: CoreSys, event: dict, hint: dict) -> dict:
|
||||
{plugin.slug: plugin.version for plugin in coresys.plugins.all_plugins}
|
||||
)
|
||||
|
||||
event.setdefault("tags", []).extend(
|
||||
[
|
||||
["installation_type", "os" if coresys.os.available else "supervised"],
|
||||
["machine", coresys.machine],
|
||||
],
|
||||
event["tags"].update(
|
||||
{
|
||||
"installation_type": "os" if coresys.os.available else "supervised",
|
||||
}
|
||||
)
|
||||
|
||||
# Sanitize event
|
||||
for i, tag in enumerate(event.get("tags", [])):
|
||||
key, value = tag
|
||||
if key == "url":
|
||||
event["tags"][i] = [key, sanitize_url(value)]
|
||||
|
||||
if event.get("request"):
|
||||
if event["request"].get("url"):
|
||||
event["request"]["url"] = sanitize_url(event["request"]["url"])
|
||||
|
||||
for i, header in enumerate(event["request"].get("headers", [])):
|
||||
key, value = header
|
||||
if key == hdrs.REFERER:
|
||||
event["request"]["headers"][i] = [key, sanitize_url(value)]
|
||||
|
||||
if key == HEADER_TOKEN:
|
||||
event["request"]["headers"][i] = [key, "XXXXXXXXXXXXXXXXXXX"]
|
||||
|
||||
if key == HEADER_TOKEN_OLD:
|
||||
event["request"]["headers"][i] = [key, "XXXXXXXXXXXXXXXXXXX"]
|
||||
|
||||
if key in [hdrs.HOST, hdrs.X_FORWARDED_HOST]:
|
||||
event["request"]["headers"][i] = [key, "example.com"]
|
||||
headers = event["request"].get("headers", {})
|
||||
if hdrs.REFERER in headers:
|
||||
headers[hdrs.REFERER] = sanitize_url(headers[hdrs.REFERER])
|
||||
if HEADER_TOKEN in headers:
|
||||
headers[HEADER_TOKEN] = "XXXXXXXXXXXXXXXXXXX"
|
||||
if HEADER_TOKEN_OLD in headers:
|
||||
headers[HEADER_TOKEN_OLD] = "XXXXXXXXXXXXXXXXXXX"
|
||||
if hdrs.HOST in headers:
|
||||
headers[hdrs.HOST] = sanitize_host(headers[hdrs.HOST])
|
||||
if hdrs.X_FORWARDED_HOST in headers:
|
||||
headers[hdrs.X_FORWARDED_HOST] = sanitize_host(
|
||||
headers[hdrs.X_FORWARDED_HOST]
|
||||
)
|
||||
|
||||
return event
|
||||
|
@@ -19,7 +19,7 @@ from ..homeassistant.const import LANDINGPAGE
|
||||
from ..jobs.decorator import Job, JobCondition, JobExecutionLimit
|
||||
from ..plugins.const import PLUGIN_UPDATE_CONDITIONS
|
||||
from ..utils.dt import utcnow
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -224,7 +224,7 @@ class Tasks(CoreSysAttributes):
|
||||
await self.sys_homeassistant.core.restart()
|
||||
except HomeAssistantError as err:
|
||||
if reanimate_fails == 0 or safe_mode:
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
if safe_mode:
|
||||
_LOGGER.critical(
|
||||
@@ -341,7 +341,7 @@ class Tasks(CoreSysAttributes):
|
||||
await (await addon.restart())
|
||||
except AddonsError as err:
|
||||
_LOGGER.error("%s watchdog reanimation failed with %s", addon.slug, err)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
finally:
|
||||
self._cache[addon.slug] = 0
|
||||
|
||||
|
@@ -5,6 +5,7 @@ from collections.abc import Awaitable
|
||||
from dataclasses import dataclass
|
||||
import logging
|
||||
from pathlib import PurePath
|
||||
from typing import Self
|
||||
|
||||
from attr import evolve
|
||||
|
||||
@@ -17,7 +18,7 @@ from ..jobs.const import JobCondition
|
||||
from ..jobs.decorator import Job
|
||||
from ..resolution.const import SuggestionType
|
||||
from ..utils.common import FileConfiguration
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import (
|
||||
ATTR_DEFAULT_BACKUP_MOUNT,
|
||||
ATTR_MOUNTS,
|
||||
@@ -49,11 +50,17 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
||||
)
|
||||
|
||||
self.coresys: CoreSys = coresys
|
||||
self._mounts: dict[str, Mount] = {}
|
||||
self._bound_mounts: dict[str, BoundMount] = {}
|
||||
|
||||
async def load_config(self) -> Self:
|
||||
"""Load config in executor."""
|
||||
await super().load_config()
|
||||
self._mounts: dict[str, Mount] = {
|
||||
mount[ATTR_NAME]: Mount.from_dict(coresys, mount)
|
||||
mount[ATTR_NAME]: Mount.from_dict(self.coresys, mount)
|
||||
for mount in self._data[ATTR_MOUNTS]
|
||||
}
|
||||
self._bound_mounts: dict[str, BoundMount] = {}
|
||||
return self
|
||||
|
||||
@property
|
||||
def mounts(self) -> list[Mount]:
|
||||
@@ -170,7 +177,7 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
||||
if mounts[i].failed_issue in self.sys_resolution.issues:
|
||||
continue
|
||||
if not isinstance(errors[i], MountError):
|
||||
capture_exception(errors[i])
|
||||
await async_capture_exception(errors[i])
|
||||
|
||||
self.sys_resolution.add_issue(
|
||||
evolve(mounts[i].failed_issue),
|
||||
@@ -303,9 +310,9 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
||||
)
|
||||
await bound_mount.bind_mount.load()
|
||||
|
||||
def save_data(self) -> None:
|
||||
async def save_data(self) -> None:
|
||||
"""Store data to configuration file."""
|
||||
self._data[ATTR_MOUNTS] = [
|
||||
mount.to_dict(skip_secrets=False) for mount in self.mounts
|
||||
]
|
||||
super().save_data()
|
||||
await super().save_data()
|
||||
|
@@ -40,7 +40,7 @@ from ..exceptions import (
|
||||
)
|
||||
from ..resolution.const import ContextType, IssueType
|
||||
from ..resolution.data import Issue
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import (
|
||||
ATTR_PATH,
|
||||
ATTR_READ_ONLY,
|
||||
@@ -208,7 +208,7 @@ class Mount(CoreSysAttributes, ABC):
|
||||
try:
|
||||
self._state = await self.unit.get_active_state()
|
||||
except DBusError as err:
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
raise MountError(
|
||||
f"Could not get active state of mount due to: {err!s}"
|
||||
) from err
|
||||
@@ -221,7 +221,7 @@ class Mount(CoreSysAttributes, ABC):
|
||||
self._unit = None
|
||||
self._state = None
|
||||
except DBusError as err:
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
raise MountError(f"Could not get mount unit due to: {err!s}") from err
|
||||
return self.unit
|
||||
|
||||
@@ -278,21 +278,25 @@ class Mount(CoreSysAttributes, ABC):
|
||||
"""Mount using systemd."""
|
||||
# If supervisor can see where it will mount, ensure there's an empty folder there
|
||||
if self.local_where:
|
||||
if not self.local_where.exists():
|
||||
_LOGGER.info(
|
||||
"Creating folder for mount: %s", self.local_where.as_posix()
|
||||
)
|
||||
self.local_where.mkdir(parents=True)
|
||||
elif not self.local_where.is_dir():
|
||||
raise MountInvalidError(
|
||||
f"Cannot mount {self.name} at {self.local_where.as_posix()} as it is not a directory",
|
||||
_LOGGER.error,
|
||||
)
|
||||
elif any(self.local_where.iterdir()):
|
||||
raise MountInvalidError(
|
||||
f"Cannot mount {self.name} at {self.local_where.as_posix()} because it is not empty",
|
||||
_LOGGER.error,
|
||||
)
|
||||
|
||||
def ensure_empty_folder() -> None:
|
||||
if not self.local_where.exists():
|
||||
_LOGGER.info(
|
||||
"Creating folder for mount: %s", self.local_where.as_posix()
|
||||
)
|
||||
self.local_where.mkdir(parents=True)
|
||||
elif not self.local_where.is_dir():
|
||||
raise MountInvalidError(
|
||||
f"Cannot mount {self.name} at {self.local_where.as_posix()} as it is not a directory",
|
||||
_LOGGER.error,
|
||||
)
|
||||
elif any(self.local_where.iterdir()):
|
||||
raise MountInvalidError(
|
||||
f"Cannot mount {self.name} at {self.local_where.as_posix()} because it is not empty",
|
||||
_LOGGER.error,
|
||||
)
|
||||
|
||||
await self.sys_run_in_executor(ensure_empty_folder)
|
||||
|
||||
try:
|
||||
options = (
|
||||
@@ -488,17 +492,23 @@ class CIFSMount(NetworkMount):
|
||||
async def mount(self) -> None:
|
||||
"""Mount using systemd."""
|
||||
if self.username and self.password:
|
||||
if not self.path_credentials.exists():
|
||||
self.path_credentials.touch(mode=0o600)
|
||||
|
||||
with self.path_credentials.open(mode="w") as cred_file:
|
||||
cred_file.write(f"username={self.username}\npassword={self.password}")
|
||||
def write_credentials() -> None:
|
||||
if not self.path_credentials.exists():
|
||||
self.path_credentials.touch(mode=0o600)
|
||||
|
||||
with self.path_credentials.open(mode="w") as cred_file:
|
||||
cred_file.write(
|
||||
f"username={self.username}\npassword={self.password}"
|
||||
)
|
||||
|
||||
await self.sys_run_in_executor(write_credentials)
|
||||
|
||||
await super().mount()
|
||||
|
||||
async def unmount(self) -> None:
|
||||
"""Unmount using systemd."""
|
||||
self.path_credentials.unlink(missing_ok=True)
|
||||
await self.sys_run_in_executor(self.path_credentials.unlink, missing_ok=True)
|
||||
await super().unmount()
|
||||
|
||||
|
||||
|
@@ -26,7 +26,7 @@ from ..jobs.const import JobCondition, JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..resolution.checks.disabled_data_disk import CheckDisabledDataDisk
|
||||
from ..resolution.checks.multiple_data_disks import CheckMultipleDataDisks
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import (
|
||||
FILESYSTEM_LABEL_DATA_DISK,
|
||||
FILESYSTEM_LABEL_DISABLED_DATA_DISK,
|
||||
@@ -189,12 +189,13 @@ class DataDisk(CoreSysAttributes):
|
||||
await self.sys_dbus.agent.datadisk.reload_device()
|
||||
|
||||
# Register for signals on devices added/removed
|
||||
self.sys_dbus.udisks2.udisks2_object_manager.dbus.object_manager.on_interfaces_added(
|
||||
self._udisks2_interface_added
|
||||
)
|
||||
self.sys_dbus.udisks2.udisks2_object_manager.dbus.object_manager.on_interfaces_removed(
|
||||
self._udisks2_interface_removed
|
||||
)
|
||||
if self.sys_dbus.udisks2.is_connected:
|
||||
self.sys_dbus.udisks2.udisks2_object_manager.dbus.object_manager.on_interfaces_added(
|
||||
self._udisks2_interface_added
|
||||
)
|
||||
self.sys_dbus.udisks2.udisks2_object_manager.dbus.object_manager.on_interfaces_removed(
|
||||
self._udisks2_interface_removed
|
||||
)
|
||||
|
||||
@Job(
|
||||
name="data_disk_migrate",
|
||||
@@ -336,7 +337,7 @@ class DataDisk(CoreSysAttributes):
|
||||
try:
|
||||
await block_device.format(FormatType.GPT)
|
||||
except DBusError as err:
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
raise HassOSDataDiskError(
|
||||
f"Could not format {new_disk.id}: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
@@ -353,7 +354,7 @@ class DataDisk(CoreSysAttributes):
|
||||
0, 0, LINUX_DATA_PARTITION_GUID, PARTITION_NAME_EXTERNAL_DATA_DISK
|
||||
)
|
||||
except DBusError as err:
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
raise HassOSDataDiskError(
|
||||
f"Could not create new data partition: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
|
@@ -24,7 +24,7 @@ from ..exceptions import (
|
||||
from ..jobs.const import JobCondition, JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..resolution.const import UnhealthyReason
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .data_disk import DataDisk
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@@ -217,12 +217,15 @@ class OSManager(CoreSysAttributes):
|
||||
)
|
||||
|
||||
# Download RAUCB file
|
||||
with raucb.open("wb") as ota_file:
|
||||
ota_file = await self.sys_run_in_executor(raucb.open, "wb")
|
||||
try:
|
||||
while True:
|
||||
chunk = await request.content.read(1_048_576)
|
||||
if not chunk:
|
||||
break
|
||||
ota_file.write(chunk)
|
||||
await self.sys_run_in_executor(ota_file.write, chunk)
|
||||
finally:
|
||||
await self.sys_run_in_executor(ota_file.close)
|
||||
|
||||
_LOGGER.info("Completed download of OTA update file %s", raucb)
|
||||
|
||||
@@ -382,7 +385,7 @@ class OSManager(CoreSysAttributes):
|
||||
RaucState.ACTIVE, self.get_slot_name(boot_name)
|
||||
)
|
||||
except DBusError as err:
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
raise HassOSSlotUpdateError(
|
||||
f"Can't mark {boot_name} as active!", _LOGGER.error
|
||||
) from err
|
||||
|
@@ -27,7 +27,7 @@ from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..resolution.const import UnhealthyReason
|
||||
from ..utils.json import write_json_file
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .base import PluginBase
|
||||
from .const import (
|
||||
FILE_HASSIO_AUDIO,
|
||||
@@ -163,7 +163,7 @@ class PluginAudio(PluginBase):
|
||||
await self.instance.install(self.version)
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Repair of Audio failed")
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
def pulse_client(self, input_profile=None, output_profile=None) -> str:
|
||||
"""Generate an /etc/pulse/client.conf data."""
|
||||
|
@@ -15,7 +15,7 @@ from ..docker.interface import DockerInterface
|
||||
from ..docker.monitor import DockerContainerStateEvent
|
||||
from ..exceptions import DockerError, PluginError
|
||||
from ..utils.common import FileConfiguration
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import WATCHDOG_MAX_ATTEMPTS, WATCHDOG_RETRY_SECONDS
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@@ -129,7 +129,7 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
except PluginError as err:
|
||||
attempts = attempts + 1
|
||||
_LOGGER.error("Watchdog restart of %s plugin failed!", self.slug)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
else:
|
||||
break
|
||||
|
||||
@@ -179,7 +179,7 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
else:
|
||||
self.version = self.instance.version
|
||||
self.image = self.default_image
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
# Run plugin
|
||||
with suppress(PluginError):
|
||||
@@ -208,7 +208,7 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
_LOGGER.info("%s plugin now installed", self.slug)
|
||||
self.version = self.instance.version
|
||||
self.image = self.default_image
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
async def update(self, version: str | None = None) -> None:
|
||||
"""Update system plugin."""
|
||||
@@ -224,7 +224,7 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
await self.instance.update(version, image=self.default_image)
|
||||
self.version = self.instance.version
|
||||
self.image = self.default_image
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerError):
|
||||
|
@@ -17,7 +17,7 @@ from ..docker.stats import DockerStats
|
||||
from ..exceptions import CliError, CliJobError, CliUpdateError, DockerError
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .base import PluginBase
|
||||
from .const import (
|
||||
FILE_HASSIO_CLI,
|
||||
@@ -73,7 +73,7 @@ class PluginCli(PluginBase):
|
||||
"""Run cli."""
|
||||
# Create new API token
|
||||
self._data[ATTR_ACCESS_TOKEN] = secrets.token_hex(56)
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
# Start Instance
|
||||
_LOGGER.info("Starting CLI plugin")
|
||||
@@ -114,7 +114,7 @@ class PluginCli(PluginBase):
|
||||
await self.instance.install(self.version)
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Repair of HA cli failed")
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
@Job(
|
||||
name="plugin_cli_restart_after_problem",
|
||||
|
@@ -33,7 +33,7 @@ from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType, UnhealthyReason
|
||||
from ..utils.json import write_json_file
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from ..validate import dns_url
|
||||
from .base import PluginBase
|
||||
from .const import (
|
||||
@@ -226,7 +226,7 @@ class PluginDns(PluginBase):
|
||||
# Reset manually defined DNS
|
||||
self.servers.clear()
|
||||
self.fallback = True
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
# Resets hosts
|
||||
with suppress(OSError):
|
||||
@@ -410,7 +410,7 @@ class PluginDns(PluginBase):
|
||||
await self.instance.install(self.version)
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Repair of CoreDNS failed")
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
def _write_resolv(self, resolv_conf: Path) -> None:
|
||||
"""Update/Write resolv.conf file."""
|
||||
|
@@ -2,11 +2,12 @@
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Self
|
||||
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import HassioError
|
||||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .audio import PluginAudio
|
||||
from .base import PluginBase
|
||||
from .cli import PluginCli
|
||||
@@ -30,6 +31,11 @@ class PluginManager(CoreSysAttributes):
|
||||
self._observer: PluginObserver = PluginObserver(coresys)
|
||||
self._multicast: PluginMulticast = PluginMulticast(coresys)
|
||||
|
||||
async def load_config(self) -> Self:
|
||||
"""Load config in executor."""
|
||||
await asyncio.gather(*[plugin.read_data() for plugin in self.all_plugins])
|
||||
return self
|
||||
|
||||
@property
|
||||
def all_plugins(self) -> list[PluginBase]:
|
||||
"""Return cli handler."""
|
||||
@@ -74,7 +80,7 @@ class PluginManager(CoreSysAttributes):
|
||||
reference=plugin.slug,
|
||||
suggestions=[SuggestionType.EXECUTE_REPAIR],
|
||||
)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
# Exit if supervisor out of date. Plugins can't update until then
|
||||
if self.sys_supervisor.need_update:
|
||||
@@ -87,17 +93,18 @@ class PluginManager(CoreSysAttributes):
|
||||
continue
|
||||
|
||||
_LOGGER.info(
|
||||
"%s does not have the latest version %s, updating",
|
||||
"Plugin %s is not up-to-date, latest version %s, updating",
|
||||
plugin.slug,
|
||||
plugin.latest_version,
|
||||
)
|
||||
try:
|
||||
await plugin.update()
|
||||
except HassioError:
|
||||
except HassioError as ex:
|
||||
_LOGGER.error(
|
||||
"Can't update %s to %s, the Supervisor healthy could be compromised!",
|
||||
"Can't update %s to %s: %s",
|
||||
plugin.slug,
|
||||
plugin.latest_version,
|
||||
str(ex),
|
||||
)
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.UPDATE_FAILED,
|
||||
@@ -107,7 +114,7 @@ class PluginManager(CoreSysAttributes):
|
||||
)
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.warning("Can't update plugin %s: %s", plugin.slug, err)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
async def repair(self) -> None:
|
||||
"""Repair Supervisor plugins."""
|
||||
@@ -125,4 +132,4 @@ class PluginManager(CoreSysAttributes):
|
||||
await plugin.stop()
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.warning("Can't stop plugin %s: %s", plugin.slug, err)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
@@ -19,7 +19,7 @@ from ..exceptions import (
|
||||
)
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .base import PluginBase
|
||||
from .const import (
|
||||
FILE_HASSIO_MULTICAST,
|
||||
@@ -109,7 +109,7 @@ class PluginMulticast(PluginBase):
|
||||
await self.instance.install(self.version)
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Repair of Multicast failed")
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
@Job(
|
||||
name="plugin_multicast_restart_after_problem",
|
||||
|
@@ -22,7 +22,7 @@ from ..exceptions import (
|
||||
)
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .base import PluginBase
|
||||
from .const import (
|
||||
FILE_HASSIO_OBSERVER,
|
||||
@@ -80,7 +80,7 @@ class PluginObserver(PluginBase):
|
||||
"""Run observer."""
|
||||
# Create new API token
|
||||
self._data[ATTR_ACCESS_TOKEN] = secrets.token_hex(56)
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
# Start Instance
|
||||
_LOGGER.info("Starting observer plugin")
|
||||
@@ -121,7 +121,7 @@ class PluginObserver(PluginBase):
|
||||
await self.instance.install(self.version)
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Repair of HA observer failed")
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
@Job(
|
||||
name="plugin_observer_restart_after_problem",
|
||||
|
@@ -7,7 +7,7 @@ from typing import Any
|
||||
from ..const import ATTR_CHECKS
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import ResolutionNotFound
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .checks.base import CheckBase
|
||||
from .validate import get_valid_modules
|
||||
|
||||
@@ -22,8 +22,6 @@ class ResolutionCheck(CoreSysAttributes):
|
||||
self.coresys = coresys
|
||||
self._checks: dict[str, CheckBase] = {}
|
||||
|
||||
self._load()
|
||||
|
||||
@property
|
||||
def data(self) -> dict[str, Any]:
|
||||
"""Return data."""
|
||||
@@ -34,13 +32,18 @@ class ResolutionCheck(CoreSysAttributes):
|
||||
"""Return all list of all checks."""
|
||||
return list(self._checks.values())
|
||||
|
||||
def _load(self):
|
||||
"""Load all checks."""
|
||||
def load_modules(self) -> None:
|
||||
"""Load and setup all checks.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
package = f"{__package__}.checks"
|
||||
checks: dict[str, CheckBase] = {}
|
||||
for module in get_valid_modules("checks"):
|
||||
check_module = import_module(f"{package}.{module}")
|
||||
check = check_module.setup(self.coresys)
|
||||
self._checks[check.slug] = check
|
||||
checks[check.slug] = check
|
||||
self._checks = checks
|
||||
|
||||
def get(self, slug: str) -> CheckBase:
|
||||
"""Return check based on slug."""
|
||||
@@ -61,6 +64,6 @@ class ResolutionCheck(CoreSysAttributes):
|
||||
await check()
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.error("Error during processing %s: %s", check.issue, err)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
_LOGGER.info("System checks complete")
|
||||
|
@@ -10,7 +10,7 @@ from ...const import CoreState
|
||||
from ...coresys import CoreSys
|
||||
from ...jobs.const import JobCondition, JobExecutionLimit
|
||||
from ...jobs.decorator import Job
|
||||
from ...utils.sentry import capture_exception
|
||||
from ...utils.sentry import async_capture_exception
|
||||
from ..const import DNS_CHECK_HOST, ContextType, IssueType
|
||||
from .base import CheckBase
|
||||
|
||||
@@ -42,7 +42,7 @@ class CheckDNSServer(CheckBase):
|
||||
ContextType.DNS_SERVER,
|
||||
reference=dns_servers[i],
|
||||
)
|
||||
capture_exception(results[i])
|
||||
await async_capture_exception(results[i])
|
||||
|
||||
@Job(name="check_dns_server_approve", conditions=[JobCondition.INTERNET_SYSTEM])
|
||||
async def approve_check(self, reference: str | None = None) -> bool:
|
||||
|
@@ -10,7 +10,7 @@ from ...const import CoreState
|
||||
from ...coresys import CoreSys
|
||||
from ...jobs.const import JobCondition, JobExecutionLimit
|
||||
from ...jobs.decorator import Job
|
||||
from ...utils.sentry import capture_exception
|
||||
from ...utils.sentry import async_capture_exception
|
||||
from ..const import DNS_CHECK_HOST, DNS_ERROR_NO_DATA, ContextType, IssueType
|
||||
from .base import CheckBase
|
||||
|
||||
@@ -47,7 +47,7 @@ class CheckDNSServerIPv6(CheckBase):
|
||||
ContextType.DNS_SERVER,
|
||||
reference=dns_servers[i],
|
||||
)
|
||||
capture_exception(results[i])
|
||||
await async_capture_exception(results[i])
|
||||
|
||||
@Job(
|
||||
name="check_dns_server_ipv6_approve", conditions=[JobCondition.INTERNET_SYSTEM]
|
||||
|
@@ -23,7 +23,7 @@ class CheckFreeSpace(CheckBase):
|
||||
|
||||
async def run_check(self) -> None:
|
||||
"""Run check if not affected by issue."""
|
||||
if self.sys_host.info.free_space > MINIMUM_FREE_SPACE_THRESHOLD:
|
||||
if await self.sys_host.info.free_space() > MINIMUM_FREE_SPACE_THRESHOLD:
|
||||
return
|
||||
|
||||
suggestions: list[SuggestionType] = []
|
||||
@@ -45,7 +45,7 @@ class CheckFreeSpace(CheckBase):
|
||||
|
||||
async def approve_check(self, reference: str | None = None) -> bool:
|
||||
"""Approve check if it is affected by issue."""
|
||||
if self.sys_host.info.free_space > MINIMUM_FREE_SPACE_THRESHOLD:
|
||||
if await self.sys_host.info.free_space() > MINIMUM_FREE_SPACE_THRESHOLD:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
@@ -5,7 +5,7 @@ import logging
|
||||
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import ResolutionNotFound
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import UnhealthyReason, UnsupportedReason
|
||||
from .evaluations.base import EvaluateBase
|
||||
from .validate import get_valid_modules
|
||||
@@ -28,20 +28,23 @@ class ResolutionEvaluation(CoreSysAttributes):
|
||||
self.cached_images: set[str] = set()
|
||||
self._evalutions: dict[str, EvaluateBase] = {}
|
||||
|
||||
self._load()
|
||||
|
||||
@property
|
||||
def all_evaluations(self) -> list[EvaluateBase]:
|
||||
"""Return all list of all checks."""
|
||||
return list(self._evalutions.values())
|
||||
|
||||
def _load(self):
|
||||
"""Load all checks."""
|
||||
def load_modules(self) -> None:
|
||||
"""Load and setup all evaluations.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
package = f"{__package__}.evaluations"
|
||||
evaluations: dict[str, EvaluateBase] = {}
|
||||
for module in get_valid_modules("evaluations"):
|
||||
check_module = import_module(f"{package}.{module}")
|
||||
check = check_module.setup(self.coresys)
|
||||
self._evalutions[check.slug] = check
|
||||
evaluate_module = import_module(f"{package}.{module}")
|
||||
evaluation = evaluate_module.setup(self.coresys)
|
||||
evaluations[evaluation.slug] = evaluation
|
||||
self._evalutions = evaluations
|
||||
|
||||
def get(self, slug: str) -> EvaluateBase:
|
||||
"""Return check based on slug."""
|
||||
@@ -61,7 +64,7 @@ class ResolutionEvaluation(CoreSysAttributes):
|
||||
_LOGGER.warning(
|
||||
"Error during processing %s: %s", evaluation.reason, err
|
||||
)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
if any(reason in self.sys_resolution.unsupported for reason in UNHEALTHY):
|
||||
self.sys_resolution.unhealthy = UnhealthyReason.DOCKER
|
||||
|
@@ -6,7 +6,7 @@ import logging
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..jobs.const import JobCondition
|
||||
from ..jobs.decorator import Job
|
||||
from ..utils.sentry import capture_exception
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .data import Issue, Suggestion
|
||||
from .fixups.base import FixupBase
|
||||
from .validate import get_valid_modules
|
||||
@@ -22,15 +22,18 @@ class ResolutionFixup(CoreSysAttributes):
|
||||
self.coresys = coresys
|
||||
self._fixups: dict[str, FixupBase] = {}
|
||||
|
||||
self._load()
|
||||
def load_modules(self) -> None:
|
||||
"""Load and setup all fixups.
|
||||
|
||||
def _load(self):
|
||||
"""Load all checks."""
|
||||
Must be run in executor.
|
||||
"""
|
||||
package = f"{__package__}.fixups"
|
||||
fixups: dict[str, FixupBase] = {}
|
||||
for module in get_valid_modules("fixups"):
|
||||
fixup_module = import_module(f"{package}.{module}")
|
||||
fixup = fixup_module.setup(self.coresys)
|
||||
self._fixups[fixup.slug] = fixup
|
||||
fixups[fixup.slug] = fixup
|
||||
self._fixups = fixups
|
||||
|
||||
@property
|
||||
def all_fixes(self) -> list[FixupBase]:
|
||||
@@ -52,7 +55,7 @@ class ResolutionFixup(CoreSysAttributes):
|
||||
await fix()
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.warning("Error during processing %s: %s", fix.suggestion, err)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
|
||||
_LOGGER.info("System autofix complete")
|
||||
|
||||
|
@@ -25,7 +25,7 @@ class FixupMountExecuteRemove(FixupBase):
|
||||
except MountNotFound:
|
||||
_LOGGER.warning("Can't find mount %s for fixup", reference)
|
||||
else:
|
||||
self.sys_mounts.save_data()
|
||||
await self.sys_mounts.save_data()
|
||||
|
||||
@property
|
||||
def suggestion(self) -> SuggestionType:
|
||||
|
@@ -1,5 +1,6 @@
|
||||
"""Helpers to check and fix issues with free space."""
|
||||
|
||||
from functools import partial
|
||||
import logging
|
||||
|
||||
from ...coresys import CoreSys
|
||||
@@ -40,7 +41,9 @@ class FixupStoreExecuteReset(FixupBase):
|
||||
_LOGGER.warning("Can't find store %s for fixup", reference)
|
||||
return
|
||||
|
||||
await self.sys_run_in_executor(remove_folder, repository.git.path)
|
||||
await self.sys_run_in_executor(
|
||||
partial(remove_folder, folder=repository.git.path, content_only=True)
|
||||
)
|
||||
|
||||
# Load data again
|
||||
try:
|
||||
|
@@ -46,6 +46,17 @@ class ResolutionManager(FileConfiguration, CoreSysAttributes):
|
||||
self._unsupported: list[UnsupportedReason] = []
|
||||
self._unhealthy: list[UnhealthyReason] = []
|
||||
|
||||
async def load_modules(self):
|
||||
"""Load resolution evaluation, check and fixup modules."""
|
||||
|
||||
def _load_modules():
|
||||
"""Load and setup all resolution modules."""
|
||||
self._evaluate.load_modules()
|
||||
self._check.load_modules()
|
||||
self._fixup.load_modules()
|
||||
|
||||
await self.sys_run_in_executor(_load_modules)
|
||||
|
||||
@property
|
||||
def data(self) -> dict[str, Any]:
|
||||
"""Return data."""
|
||||
|
@@ -36,7 +36,7 @@ class ResolutionNotify(CoreSysAttributes):
|
||||
messages.append(
|
||||
{
|
||||
"title": "Available space is less than 1GB!",
|
||||
"message": f"Available space is {self.sys_host.info.free_space}GB, see https://www.home-assistant.io/more-info/free-space for more information.",
|
||||
"message": f"Available space is {await self.sys_host.info.free_space()}GB, see https://www.home-assistant.io/more-info/free-space for more information.",
|
||||
"notification_id": "supervisor_issue_free_space",
|
||||
}
|
||||
)
|
||||
|
@@ -1,5 +1,7 @@
|
||||
"""Handle internal services discovery."""
|
||||
|
||||
from typing import Self
|
||||
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from .const import SERVICE_MQTT, SERVICE_MYSQL
|
||||
from .data import ServicesData
|
||||
@@ -19,6 +21,11 @@ class ServiceManager(CoreSysAttributes):
|
||||
self.data: ServicesData = ServicesData()
|
||||
self.services_obj: dict[str, ServiceInterface] = {}
|
||||
|
||||
async def load_config(self) -> Self:
|
||||
"""Load config in executor."""
|
||||
await self.data.read_data()
|
||||
return self
|
||||
|
||||
@property
|
||||
def list_services(self) -> list[ServiceInterface]:
|
||||
"""Return a list of services."""
|
||||
@@ -33,6 +40,6 @@ class ServiceManager(CoreSysAttributes):
|
||||
for slug, service in AVAILABLE_SERVICES.items():
|
||||
self.services_obj[slug] = service(self.coresys)
|
||||
|
||||
def reset(self) -> None:
|
||||
async def reset(self) -> None:
|
||||
"""Reset available data."""
|
||||
self.data.reset_data()
|
||||
await self.data.reset_data()
|
||||
|
@@ -51,9 +51,9 @@ class ServiceInterface(CoreSysAttributes, ABC):
|
||||
"""Return True if the service is in use."""
|
||||
return bool(self._data)
|
||||
|
||||
def save(self) -> None:
|
||||
async def save(self) -> None:
|
||||
"""Save changes."""
|
||||
self.sys_services.data.save_data()
|
||||
await self.sys_services.data.save_data()
|
||||
|
||||
def get_service_data(self) -> dict[str, Any] | None:
|
||||
"""Return the requested service data."""
|
||||
@@ -62,9 +62,9 @@ class ServiceInterface(CoreSysAttributes, ABC):
|
||||
return None
|
||||
|
||||
@abstractmethod
|
||||
def set_service_data(self, addon: Addon, data: dict[str, Any]) -> None:
|
||||
async def set_service_data(self, addon: Addon, data: dict[str, Any]) -> None:
|
||||
"""Write the data into service object."""
|
||||
|
||||
@abstractmethod
|
||||
def del_service_data(self, addon: Addon) -> None:
|
||||
async def del_service_data(self, addon: Addon) -> None:
|
||||
"""Remove the data from service object."""
|
||||
|
@@ -65,7 +65,7 @@ class MQTTService(ServiceInterface):
|
||||
return []
|
||||
return [self._data[ATTR_ADDON]]
|
||||
|
||||
def set_service_data(self, addon: Addon, data: dict[str, Any]) -> None:
|
||||
async def set_service_data(self, addon: Addon, data: dict[str, Any]) -> None:
|
||||
"""Write the data into service object."""
|
||||
if self.enabled:
|
||||
raise ServicesError(
|
||||
@@ -77,9 +77,9 @@ class MQTTService(ServiceInterface):
|
||||
self._data[ATTR_ADDON] = addon.slug
|
||||
|
||||
_LOGGER.info("Set %s as service provider for mqtt", addon.slug)
|
||||
self.save()
|
||||
await self.save()
|
||||
|
||||
def del_service_data(self, addon: Addon) -> None:
|
||||
async def del_service_data(self, addon: Addon) -> None:
|
||||
"""Remove the data from service object."""
|
||||
if not self.enabled:
|
||||
raise ServicesError(
|
||||
@@ -87,4 +87,4 @@ class MQTTService(ServiceInterface):
|
||||
)
|
||||
|
||||
self._data.clear()
|
||||
self.save()
|
||||
await self.save()
|
||||
|
@@ -59,7 +59,7 @@ class MySQLService(ServiceInterface):
|
||||
return []
|
||||
return [self._data[ATTR_ADDON]]
|
||||
|
||||
def set_service_data(self, addon: Addon, data: dict[str, Any]) -> None:
|
||||
async def set_service_data(self, addon: Addon, data: dict[str, Any]) -> None:
|
||||
"""Write the data into service object."""
|
||||
if self.enabled:
|
||||
raise ServicesError(
|
||||
@@ -71,12 +71,12 @@ class MySQLService(ServiceInterface):
|
||||
self._data[ATTR_ADDON] = addon.slug
|
||||
|
||||
_LOGGER.info("Set %s as service provider for MySQL", addon.slug)
|
||||
self.save()
|
||||
await self.save()
|
||||
|
||||
def del_service_data(self, addon: Addon) -> None:
|
||||
async def del_service_data(self, addon: Addon) -> None:
|
||||
"""Remove the data from service object."""
|
||||
if not self.enabled:
|
||||
raise ServicesError("Can't remove not exists services", _LOGGER.warning)
|
||||
|
||||
self._data.clear()
|
||||
self.save()
|
||||
await self.save()
|
||||
|
@@ -206,7 +206,7 @@ class StoreManager(CoreSysAttributes, FileConfiguration):
|
||||
# On start-up we add the saved repos to force a load. But they're already in data
|
||||
if url not in self._data[ATTR_REPOSITORIES]:
|
||||
self._data[ATTR_REPOSITORIES].append(url)
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
# Persist changes
|
||||
if persist:
|
||||
@@ -227,7 +227,7 @@ class StoreManager(CoreSysAttributes, FileConfiguration):
|
||||
)
|
||||
await self.repositories.pop(repository.slug).remove()
|
||||
self._data[ATTR_REPOSITORIES].remove(repository.source)
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
if persist:
|
||||
await self.data.update()
|
||||
|
@@ -74,7 +74,10 @@ def _read_addon_translations(addon_path: Path) -> dict:
|
||||
|
||||
|
||||
def _read_git_repository(path: Path) -> ProcessedRepository | None:
|
||||
"""Process a custom repository folder."""
|
||||
"""Process a custom repository folder.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
slug = extract_hash_from_path(path)
|
||||
|
||||
# exists repository json
|
||||
|
@@ -49,7 +49,7 @@ class GitRepo(CoreSysAttributes):
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Init Git add-on repository."""
|
||||
if not self.path.is_dir():
|
||||
if not (self.path / ".git").is_dir():
|
||||
await self.clone()
|
||||
return
|
||||
|
||||
|
@@ -74,7 +74,10 @@ class Repository(CoreSysAttributes):
|
||||
return self.data.get(ATTR_MAINTAINER, UNKNOWN)
|
||||
|
||||
def validate(self) -> bool:
|
||||
"""Check if store is valid."""
|
||||
"""Check if store is valid.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
if self.type != StoreType.GIT:
|
||||
return True
|
||||
|
||||
@@ -104,7 +107,7 @@ class Repository(CoreSysAttributes):
|
||||
|
||||
async def update(self) -> bool:
|
||||
"""Update add-on repository."""
|
||||
if not self.validate():
|
||||
if not await self.sys_run_in_executor(self.validate):
|
||||
return False
|
||||
return self.type == StoreType.LOCAL or await self.git.pull()
|
||||
|
||||
|
@@ -36,7 +36,7 @@ from .jobs.const import JobCondition, JobExecutionLimit
|
||||
from .jobs.decorator import Job
|
||||
from .resolution.const import ContextType, IssueType, UnhealthyReason
|
||||
from .utils.codenotary import calc_checksum
|
||||
from .utils.sentry import capture_exception
|
||||
from .utils.sentry import async_capture_exception
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -158,25 +158,35 @@ class Supervisor(CoreSysAttributes):
|
||||
) from err
|
||||
|
||||
# Load
|
||||
with TemporaryDirectory(dir=self.sys_config.path_tmp) as tmp_dir:
|
||||
profile_file = Path(tmp_dir, "apparmor.txt")
|
||||
try:
|
||||
profile_file.write_text(data, encoding="utf-8")
|
||||
except OSError as err:
|
||||
if err.errno == errno.EBADMSG:
|
||||
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
raise SupervisorAppArmorError(
|
||||
f"Can't write temporary profile: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
temp_dir: TemporaryDirectory | None = None
|
||||
|
||||
try:
|
||||
await self.sys_host.apparmor.load_profile(
|
||||
"hassio-supervisor", profile_file
|
||||
)
|
||||
except HostAppArmorError as err:
|
||||
raise SupervisorAppArmorError(
|
||||
"Can't update AppArmor profile!", _LOGGER.error
|
||||
) from err
|
||||
def write_profile() -> Path:
|
||||
nonlocal temp_dir
|
||||
temp_dir = TemporaryDirectory(dir=self.sys_config.path_tmp)
|
||||
profile_file = Path(temp_dir.name, "apparmor.txt")
|
||||
profile_file.write_text(data, encoding="utf-8")
|
||||
return profile_file
|
||||
|
||||
try:
|
||||
profile_file = await self.sys_run_in_executor(write_profile)
|
||||
|
||||
await self.sys_host.apparmor.load_profile("hassio-supervisor", profile_file)
|
||||
|
||||
except OSError as err:
|
||||
if err.errno == errno.EBADMSG:
|
||||
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
raise SupervisorAppArmorError(
|
||||
f"Can't write temporary profile: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
except HostAppArmorError as err:
|
||||
raise SupervisorAppArmorError(
|
||||
"Can't update AppArmor profile!", _LOGGER.error
|
||||
) from err
|
||||
|
||||
finally:
|
||||
if temp_dir:
|
||||
await self.sys_run_in_executor(temp_dir.cleanup)
|
||||
|
||||
async def update(self, version: AwesomeVersion | None = None) -> None:
|
||||
"""Update Supervisor version."""
|
||||
@@ -209,14 +219,14 @@ class Supervisor(CoreSysAttributes):
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.UPDATE_FAILED, ContextType.SUPERVISOR
|
||||
)
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
raise SupervisorUpdateError(
|
||||
f"Update of Supervisor failed: {err!s}", _LOGGER.critical
|
||||
) from err
|
||||
|
||||
self.sys_config.version = version
|
||||
self.sys_config.image = self.sys_updater.image_supervisor
|
||||
self.sys_config.save_data()
|
||||
await self.sys_config.save_data()
|
||||
|
||||
self.sys_create_task(self.sys_core.stop())
|
||||
|
||||
|
@@ -310,7 +310,7 @@ class Updater(FileConfiguration, CoreSysAttributes):
|
||||
f"Can't process version data: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
# Send status update to core
|
||||
for event in events:
|
||||
|
@@ -90,6 +90,7 @@ def remove_folder(
|
||||
Is needed to avoid issue with:
|
||||
- CAP_DAC_OVERRIDE
|
||||
- CAP_DAC_READ_SEARCH
|
||||
Must be run in executor.
|
||||
"""
|
||||
find_args = []
|
||||
if content_only:
|
||||
@@ -106,8 +107,7 @@ def remove_folder(
|
||||
except OSError as err:
|
||||
_LOGGER.exception("Can't remove folder %s: %s", folder, err)
|
||||
except subprocess.CalledProcessError as procerr:
|
||||
_LOGGER.error("Can't remove folder %s: %s", folder, procerr.stderr.strip())
|
||||
raise procerr
|
||||
_LOGGER.critical("Can't remove folder %s: %s", folder, procerr.stderr.strip())
|
||||
|
||||
|
||||
def remove_folder_with_excludes(
|
||||
@@ -115,7 +115,10 @@ def remove_folder_with_excludes(
|
||||
excludes: list[str],
|
||||
tmp_dir: Path | None = None,
|
||||
) -> None:
|
||||
"""Remove folder with excludes."""
|
||||
"""Remove folder with excludes.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
with TemporaryDirectory(dir=tmp_dir) as temp_path:
|
||||
temp_path = Path(temp_path)
|
||||
moved_files: list[Path] = []
|
||||
|
@@ -12,7 +12,10 @@ RE_PROFILE = re.compile(r"^profile ([^ ]+).*$")
|
||||
|
||||
|
||||
def get_profile_name(profile_file: Path) -> str:
|
||||
"""Read the profile name from file."""
|
||||
"""Read the profile name from file.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
profiles = set()
|
||||
|
||||
try:
|
||||
@@ -42,14 +45,20 @@ def get_profile_name(profile_file: Path) -> str:
|
||||
|
||||
|
||||
def validate_profile(profile_name: str, profile_file: Path) -> bool:
|
||||
"""Check if profile from file is valid with profile name."""
|
||||
"""Check if profile from file is valid with profile name.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
if profile_name == get_profile_name(profile_file):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def adjust_profile(profile_name: str, profile_file: Path, profile_new: Path) -> None:
|
||||
"""Fix the profile name."""
|
||||
"""Fix the profile name.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
org_profile = get_profile_name(profile_file)
|
||||
profile_data = []
|
||||
|
||||
|
@@ -1,9 +1,10 @@
|
||||
"""Common utils."""
|
||||
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import Any, Self
|
||||
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
@@ -18,7 +19,10 @@ _DEFAULT: dict[str, Any] = {}
|
||||
|
||||
|
||||
def find_one_filetype(path: Path, filename: str, filetypes: list[str]) -> Path:
|
||||
"""Find first file matching filetypes."""
|
||||
"""Find first file matching filetypes.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
for file in path.glob(f"**/{filename}.*"):
|
||||
if file.suffix in filetypes:
|
||||
return file
|
||||
@@ -26,7 +30,10 @@ def find_one_filetype(path: Path, filename: str, filetypes: list[str]) -> Path:
|
||||
|
||||
|
||||
def read_json_or_yaml_file(path: Path) -> dict:
|
||||
"""Read JSON or YAML file."""
|
||||
"""Read JSON or YAML file.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
if path.suffix == ".json":
|
||||
return read_json_file(path)
|
||||
|
||||
@@ -37,7 +44,10 @@ def read_json_or_yaml_file(path: Path) -> dict:
|
||||
|
||||
|
||||
def write_json_or_yaml_file(path: Path, data: dict) -> None:
|
||||
"""Write JSON or YAML file."""
|
||||
"""Write JSON or YAML file.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
if path.suffix == ".json":
|
||||
return write_json_file(path, data)
|
||||
|
||||
@@ -50,15 +60,18 @@ def write_json_or_yaml_file(path: Path, data: dict) -> None:
|
||||
class FileConfiguration:
|
||||
"""Baseclass for classes that uses configuration files, the files can be JSON/YAML."""
|
||||
|
||||
def __init__(self, file_path: Path, schema: vol.Schema):
|
||||
def __init__(self, file_path: Path | None, schema: vol.Schema):
|
||||
"""Initialize hass object."""
|
||||
self._file: Path = file_path
|
||||
self._file: Path | None = file_path
|
||||
self._schema: vol.Schema = schema
|
||||
self._data: dict[str, Any] = _DEFAULT
|
||||
|
||||
self.read_data()
|
||||
async def load_config(self) -> Self:
|
||||
"""Read in config in executor."""
|
||||
await self.read_data()
|
||||
return self
|
||||
|
||||
def reset_data(self) -> None:
|
||||
async def reset_data(self) -> None:
|
||||
"""Reset configuration to default."""
|
||||
try:
|
||||
self._data = self._schema(_DEFAULT)
|
||||
@@ -67,15 +80,20 @@ class FileConfiguration:
|
||||
"Can't reset %s: %s", self._file, humanize_error(self._data, ex)
|
||||
)
|
||||
else:
|
||||
self.save_data()
|
||||
await self.save_data()
|
||||
|
||||
def read_data(self) -> None:
|
||||
async def read_data(self) -> None:
|
||||
"""Read configuration file."""
|
||||
if self._file.is_file():
|
||||
try:
|
||||
self._data = read_json_or_yaml_file(self._file)
|
||||
except ConfigurationFileError:
|
||||
self._data = _DEFAULT
|
||||
if not self._file:
|
||||
raise RuntimeError("Path to config file must be set!")
|
||||
|
||||
def _read_data() -> dict[str, Any]:
|
||||
if self._file.is_file():
|
||||
with suppress(ConfigurationFileError):
|
||||
return read_json_or_yaml_file(self._file)
|
||||
return _DEFAULT
|
||||
|
||||
self._data = await asyncio.get_running_loop().run_in_executor(None, _read_data)
|
||||
|
||||
# Validate
|
||||
try:
|
||||
@@ -89,8 +107,11 @@ class FileConfiguration:
|
||||
_LOGGER.warning("Resetting %s to default", self._file)
|
||||
self._data = self._schema(_DEFAULT)
|
||||
|
||||
def save_data(self) -> None:
|
||||
async def save_data(self) -> None:
|
||||
"""Store data to configuration file."""
|
||||
if not self._file:
|
||||
raise RuntimeError("Path to config file must be set!")
|
||||
|
||||
# Validate
|
||||
try:
|
||||
self._data = self._schema(self._data)
|
||||
@@ -100,8 +121,10 @@ class FileConfiguration:
|
||||
# Load last valid data
|
||||
_LOGGER.warning("Resetting %s to last version", self._file)
|
||||
self._data = _DEFAULT
|
||||
self.read_data()
|
||||
await self.read_data()
|
||||
else:
|
||||
# write
|
||||
with suppress(ConfigurationFileError):
|
||||
write_json_or_yaml_file(self._file, self._data)
|
||||
await asyncio.get_running_loop().run_in_executor(
|
||||
None, write_json_or_yaml_file, self._file, self._data
|
||||
)
|
||||
|
@@ -31,10 +31,11 @@ from ..exceptions import (
|
||||
DBusObjectError,
|
||||
DBusParseError,
|
||||
DBusServiceUnkownError,
|
||||
DBusTimedOutError,
|
||||
DBusTimeoutError,
|
||||
HassioNotSupportedError,
|
||||
)
|
||||
from .sentry import capture_exception
|
||||
from .sentry import async_capture_exception
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -87,6 +88,8 @@ class DBus:
|
||||
return DBusNotConnectedError(err.text)
|
||||
if err.type == ErrorType.TIMEOUT:
|
||||
return DBusTimeoutError(err.text)
|
||||
if err.type == ErrorType.TIMED_OUT:
|
||||
return DBusTimedOutError(err.text)
|
||||
if err.type == ErrorType.NO_REPLY:
|
||||
return DBusNoReplyError(err.text)
|
||||
return DBusFatalError(err.text, type_=err.type)
|
||||
@@ -121,7 +124,7 @@ class DBus:
|
||||
)
|
||||
raise DBus.from_dbus_error(err) from None
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
capture_exception(err)
|
||||
await async_capture_exception(err)
|
||||
raise DBusFatalError(str(err)) from err
|
||||
|
||||
def _add_interfaces(self):
|
||||
@@ -136,7 +139,7 @@ class DBus:
|
||||
for _ in range(3):
|
||||
try:
|
||||
return await self._bus.introspect(
|
||||
self.bus_name, self.object_path, timeout=10
|
||||
self.bus_name, self.object_path, timeout=30
|
||||
)
|
||||
except InvalidIntrospectionError as err:
|
||||
raise DBusParseError(
|
||||
@@ -144,7 +147,13 @@ class DBus:
|
||||
) from err
|
||||
except DBusFastDBusError as err:
|
||||
raise DBus.from_dbus_error(err) from None
|
||||
except (EOFError, TimeoutError):
|
||||
except TimeoutError:
|
||||
# The systemd D-Bus activate service has a timeout of 25s, which will raise. We should
|
||||
# not end up here unless the D-Bus broker is majorly overwhelmed.
|
||||
_LOGGER.critical(
|
||||
"Timeout connecting to %s - %s", self.bus_name, self.object_path
|
||||
)
|
||||
except EOFError:
|
||||
_LOGGER.warning(
|
||||
"Busy system at %s - %s", self.bus_name, self.object_path
|
||||
)
|
||||
|
@@ -1,9 +1,10 @@
|
||||
"""Custom log messages."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import re
|
||||
|
||||
from .sentry import capture_exception
|
||||
from .sentry import async_capture_exception
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -12,14 +13,17 @@ RE_BIND_FAILED = re.compile(
|
||||
)
|
||||
|
||||
|
||||
def format_message(message: str) -> str:
|
||||
"""Return a formated message if it's known."""
|
||||
def async_format_message(message: str) -> str:
|
||||
"""Return a formated message if it's known.
|
||||
|
||||
Must be called from event loop.
|
||||
"""
|
||||
try:
|
||||
match = RE_BIND_FAILED.match(message)
|
||||
if match:
|
||||
return f"Port '{match.group(1)}' is already in use by something else on the host."
|
||||
except TypeError as err:
|
||||
_LOGGER.error("The type of message is not a string - %s", err)
|
||||
capture_exception(err)
|
||||
asyncio.get_running_loop().create_task(async_capture_exception(err))
|
||||
|
||||
return message
|
||||
|
@@ -1,10 +1,13 @@
|
||||
"""Utilities for sentry."""
|
||||
|
||||
import asyncio
|
||||
from functools import partial
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
import sentry_sdk
|
||||
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
|
||||
from sentry_sdk.integrations.asyncio import AsyncioIntegration
|
||||
from sentry_sdk.integrations.atexit import AtexitIntegration
|
||||
from sentry_sdk.integrations.dedupe import DedupeIntegration
|
||||
from sentry_sdk.integrations.excepthook import ExcepthookIntegration
|
||||
@@ -26,11 +29,12 @@ def init_sentry(coresys: CoreSys) -> None:
|
||||
_LOGGER.info("Initializing Supervisor Sentry")
|
||||
sentry_sdk.init(
|
||||
dsn="https://9c6ea70f49234442b4746e447b24747e@o427061.ingest.sentry.io/5370612",
|
||||
before_send=lambda event, hint: filter_data(coresys, event, hint),
|
||||
before_send=partial(filter_data, coresys),
|
||||
auto_enabling_integrations=False,
|
||||
default_integrations=False,
|
||||
integrations=[
|
||||
AioHttpIntegration(),
|
||||
AsyncioIntegration(),
|
||||
ExcepthookIntegration(),
|
||||
DedupeIntegration(),
|
||||
AtexitIntegration(),
|
||||
@@ -43,19 +47,47 @@ def init_sentry(coresys: CoreSys) -> None:
|
||||
|
||||
|
||||
def capture_event(event: dict[str, Any], only_once: str | None = None):
|
||||
"""Capture an event and send to sentry."""
|
||||
"""Capture an event and send to sentry.
|
||||
|
||||
Must be called in executor.
|
||||
"""
|
||||
if sentry_sdk.is_initialized():
|
||||
if only_once and only_once not in only_once_events:
|
||||
only_once_events.add(only_once)
|
||||
sentry_sdk.capture_event(event)
|
||||
|
||||
|
||||
async def async_capture_event(event: dict[str, Any], only_once: str | None = None):
|
||||
"""Capture an event and send to sentry.
|
||||
|
||||
Safe to call from event loop.
|
||||
"""
|
||||
if sentry_sdk.is_initialized():
|
||||
await asyncio.get_running_loop().run_in_executor(
|
||||
None, capture_event, event, only_once
|
||||
)
|
||||
|
||||
|
||||
def capture_exception(err: Exception) -> None:
|
||||
"""Capture an exception and send to sentry."""
|
||||
"""Capture an exception and send to sentry.
|
||||
|
||||
Must be called in executor.
|
||||
"""
|
||||
if sentry_sdk.is_initialized():
|
||||
sentry_sdk.capture_exception(err)
|
||||
|
||||
|
||||
async def async_capture_exception(err: Exception) -> None:
|
||||
"""Capture an exception and send to sentry.
|
||||
|
||||
Safe to call in event loop.
|
||||
"""
|
||||
if sentry_sdk.is_initialized():
|
||||
await asyncio.get_running_loop().run_in_executor(
|
||||
None, sentry_sdk.capture_exception, err
|
||||
)
|
||||
|
||||
|
||||
def close_sentry() -> None:
|
||||
"""Close the current sentry client.
|
||||
|
||||
|
@@ -17,7 +17,10 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def read_yaml_file(path: Path) -> dict:
|
||||
"""Read YAML file from path."""
|
||||
"""Read YAML file from path.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
try:
|
||||
with open(path, encoding="utf-8") as yaml_file:
|
||||
return load(yaml_file, Loader=SafeLoader) or {}
|
||||
@@ -29,7 +32,10 @@ def read_yaml_file(path: Path) -> dict:
|
||||
|
||||
|
||||
def write_yaml_file(path: Path, data: dict) -> None:
|
||||
"""Write a YAML file."""
|
||||
"""Write a YAML file.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
try:
|
||||
with atomic_write(path, overwrite=True) as fp:
|
||||
dump(data, fp, Dumper=Dumper)
|
||||
|
@@ -246,7 +246,7 @@ async def test_watchdog_during_attach(
|
||||
):
|
||||
"""Test host reboot treated as manual stop but not supervisor restart."""
|
||||
store = coresys.addons.store[TEST_ADDON_SLUG]
|
||||
coresys.addons.data.install(store)
|
||||
await coresys.addons.data.install(store)
|
||||
|
||||
with (
|
||||
patch.object(Addon, "restart") as restart,
|
||||
|
@@ -11,7 +11,7 @@ from supervisor.coresys import CoreSys
|
||||
|
||||
async def test_platform_set(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test platform set in docker args."""
|
||||
build = AddonBuild(coresys, install_addon_ssh)
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
with (
|
||||
patch.object(
|
||||
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
|
||||
@@ -27,7 +27,7 @@ async def test_platform_set(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
|
||||
async def test_dockerfile_evaluation(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test platform set in docker args."""
|
||||
build = AddonBuild(coresys, install_addon_ssh)
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
with (
|
||||
patch.object(
|
||||
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
|
||||
@@ -45,7 +45,7 @@ async def test_dockerfile_evaluation(coresys: CoreSys, install_addon_ssh: Addon)
|
||||
|
||||
async def test_dockerfile_evaluation_arch(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test platform set in docker args."""
|
||||
build = AddonBuild(coresys, install_addon_ssh)
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
with (
|
||||
patch.object(
|
||||
type(coresys.arch), "supported", new=PropertyMock(return_value=["aarch64"])
|
||||
@@ -65,7 +65,7 @@ async def test_dockerfile_evaluation_arch(coresys: CoreSys, install_addon_ssh: A
|
||||
|
||||
async def test_build_valid(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test platform set in docker args."""
|
||||
build = AddonBuild(coresys, install_addon_ssh)
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
with (
|
||||
patch.object(
|
||||
type(coresys.arch), "supported", new=PropertyMock(return_value=["aarch64"])
|
||||
@@ -79,7 +79,7 @@ async def test_build_valid(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
|
||||
async def test_build_invalid(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test platform set in docker args."""
|
||||
build = AddonBuild(coresys, install_addon_ssh)
|
||||
build = await AddonBuild(coresys, install_addon_ssh).load_config()
|
||||
with (
|
||||
patch.object(
|
||||
type(coresys.arch), "supported", new=PropertyMock(return_value=["amd64"])
|
||||
|
@@ -66,12 +66,12 @@ async def fixture_remove_wait_boot(coresys: CoreSys) -> AsyncGenerator[None]:
|
||||
|
||||
|
||||
@pytest.fixture(name="install_addon_example_image")
|
||||
def fixture_install_addon_example_image(
|
||||
async def fixture_install_addon_example_image(
|
||||
coresys: CoreSys, repository
|
||||
) -> Generator[Addon]:
|
||||
"""Install local_example add-on with image."""
|
||||
store = coresys.addons.store["local_example_image"]
|
||||
coresys.addons.data.install(store)
|
||||
await coresys.addons.data.install(store)
|
||||
# pylint: disable-next=protected-access
|
||||
coresys.addons.data._data = coresys.addons.data._schema(coresys.addons.data._data)
|
||||
|
||||
@@ -195,7 +195,7 @@ async def test_addon_uninstall_removes_discovery(
|
||||
"""Test discovery messages removed when addon uninstalled."""
|
||||
assert coresys.discovery.list_messages == []
|
||||
|
||||
message = coresys.discovery.send(
|
||||
message = await coresys.discovery.send(
|
||||
install_addon_ssh, "mqtt", {"host": "localhost", "port": 1883}
|
||||
)
|
||||
assert message.addon == TEST_ADDON_SLUG
|
||||
@@ -504,7 +504,7 @@ async def test_shared_image_kept_on_uninstall(
|
||||
store_data = deepcopy(coresys.addons.store["local_example"].data)
|
||||
store = AddonStore(coresys, "local_example2", store_data)
|
||||
coresys.addons.store["local_example2"] = store
|
||||
coresys.addons.data.install(store)
|
||||
await coresys.addons.data.install(store)
|
||||
# pylint: disable-next=protected-access
|
||||
coresys.addons.data._data = coresys.addons.data._schema(coresys.addons.data._data)
|
||||
|
||||
@@ -545,7 +545,7 @@ async def test_shared_image_kept_on_update(
|
||||
|
||||
coresys.store.data.addons["local_example2"] = new_store_data
|
||||
coresys.addons.store["local_example2"] = new_store
|
||||
coresys.addons.data.install(curr_store)
|
||||
await coresys.addons.data.install(curr_store)
|
||||
# pylint: disable-next=protected-access
|
||||
coresys.addons.data._data = coresys.addons.data._schema(coresys.addons.data._data)
|
||||
|
||||
|
@@ -23,7 +23,7 @@ async def mock_handler(request):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def api_system(aiohttp_client, run_dir, coresys: CoreSys) -> TestClient:
|
||||
async def api_system(aiohttp_client, coresys: CoreSys) -> TestClient:
|
||||
"""Fixture for RestAPI client."""
|
||||
api = RestAPI(coresys)
|
||||
api.webapp = web.Application()
|
||||
@@ -39,7 +39,7 @@ async def api_system(aiohttp_client, run_dir, coresys: CoreSys) -> TestClient:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def api_token_validation(aiohttp_client, run_dir, coresys: CoreSys) -> TestClient:
|
||||
async def api_token_validation(aiohttp_client, coresys: CoreSys) -> TestClient:
|
||||
"""Fixture for RestAPI client with token validation middleware."""
|
||||
api = RestAPI(coresys)
|
||||
api.webapp = web.Application()
|
||||
|
@@ -54,7 +54,7 @@ async def test_api_list_discovery(
|
||||
),
|
||||
patch("supervisor.utils.common.Path.is_file", return_value=True),
|
||||
):
|
||||
coresys.discovery.read_data()
|
||||
await coresys.discovery.read_data()
|
||||
|
||||
await coresys.discovery.load()
|
||||
assert coresys.discovery.list_messages == [
|
||||
|
@@ -358,11 +358,13 @@ async def test_advanced_logs_formatters(
|
||||
async def test_advanced_logs_errors(api_client: TestClient):
|
||||
"""Test advanced logging API errors."""
|
||||
# coresys = coresys_logs_control
|
||||
resp = await api_client.get("/host/logs")
|
||||
assert resp.content_type == "text/plain"
|
||||
assert resp.status == 400
|
||||
content = await resp.text()
|
||||
assert content == "No systemd-journal-gatewayd Unix socket available"
|
||||
with patch("supervisor.host.logs.SYSTEMD_JOURNAL_GATEWAYD_SOCKET") as socket:
|
||||
socket.is_socket.return_value = False
|
||||
resp = await api_client.get("/host/logs")
|
||||
assert resp.content_type == "text/plain"
|
||||
assert resp.status == 400
|
||||
content = await resp.text()
|
||||
assert content == "No systemd-journal-gatewayd Unix socket available"
|
||||
|
||||
headers = {"Accept": "application/json"}
|
||||
resp = await api_client.get("/host/logs", headers=headers)
|
||||
|
@@ -216,7 +216,7 @@ async def test_api_supervisor_fallback_log_capture(
|
||||
"No systemd-journal-gatewayd Unix socket available!"
|
||||
)
|
||||
|
||||
with patch("supervisor.api.capture_exception") as capture_exception:
|
||||
with patch("supervisor.api.async_capture_exception") as capture_exception:
|
||||
await api_client.get("/supervisor/logs")
|
||||
capture_exception.assert_not_called()
|
||||
|
||||
@@ -224,7 +224,7 @@ async def test_api_supervisor_fallback_log_capture(
|
||||
|
||||
journald_logs.side_effect = HassioError("Something bad happened!")
|
||||
|
||||
with patch("supervisor.api.capture_exception") as capture_exception:
|
||||
with patch("supervisor.api.async_capture_exception") as capture_exception:
|
||||
await api_client.get("/supervisor/logs")
|
||||
capture_exception.assert_called_once()
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user