mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-07-24 09:36:31 +00:00
commit
77be115eec
2
.github/workflows/sentry.yaml
vendored
2
.github/workflows/sentry.yaml
vendored
@ -12,7 +12,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v2
|
||||
- name: Sentry Release
|
||||
uses: getsentry/action-release@v1.0.1
|
||||
uses: getsentry/action-release@v1.0.2
|
||||
env:
|
||||
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
|
||||
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 0c7c536f73bc8c73aea8c2c3f89114bdd62b9551
|
||||
Subproject commit d6aba040dde366249ba10c5149a5fbc68a1d98bc
|
@ -14,6 +14,6 @@ pulsectl==20.5.1
|
||||
pytz==2020.1
|
||||
pyudev==0.22.0
|
||||
ruamel.yaml==0.15.100
|
||||
sentry-sdk==0.17.4
|
||||
sentry-sdk==0.17.5
|
||||
uvloop==0.14.0
|
||||
voluptuous==0.11.7
|
||||
|
@ -1,6 +1,6 @@
|
||||
black==20.8b1
|
||||
codecov==2.1.9
|
||||
coverage==5.2.1
|
||||
coverage==5.3
|
||||
flake8-docstrings==1.5.0
|
||||
flake8==3.8.3
|
||||
pre-commit==2.7.1
|
||||
@ -10,5 +10,5 @@ pytest-aiohttp==0.3.0
|
||||
pytest-asyncio==0.12.0 # NB!: Versions over 0.12.0 breaks pytest-aiohttp (https://github.com/aio-libs/pytest-aiohttp/issues/16)
|
||||
pytest-cov==2.10.1
|
||||
pytest-timeout==1.4.2
|
||||
pytest==6.0.1
|
||||
pytest==6.0.2
|
||||
pyupgrade==2.7.2
|
||||
|
@ -5,13 +5,17 @@ import logging
|
||||
import tarfile
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from ..const import BOOT_AUTO, AddonStartup, AddonState
|
||||
from ..const import AddonBoot, AddonStartup, AddonState
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import (
|
||||
AddonConfigurationError,
|
||||
AddonsError,
|
||||
AddonsNotSupportedError,
|
||||
CoreDNSError,
|
||||
DockerAPIError,
|
||||
DockerError,
|
||||
DockerNotFound,
|
||||
DockerRequestError,
|
||||
HomeAssistantAPIError,
|
||||
HostAppArmorError,
|
||||
)
|
||||
@ -84,7 +88,7 @@ class AddonManager(CoreSysAttributes):
|
||||
"""Boot add-ons with mode auto."""
|
||||
tasks: List[Addon] = []
|
||||
for addon in self.installed:
|
||||
if addon.boot != BOOT_AUTO or addon.startup != stage:
|
||||
if addon.boot != AddonBoot.AUTO or addon.startup != stage:
|
||||
continue
|
||||
tasks.append(addon)
|
||||
|
||||
@ -98,9 +102,17 @@ class AddonManager(CoreSysAttributes):
|
||||
for addon in tasks:
|
||||
try:
|
||||
await addon.start()
|
||||
except DockerRequestError:
|
||||
pass
|
||||
except (AddonConfigurationError, DockerAPIError, DockerNotFound):
|
||||
addon.boot = AddonBoot.MANUAL
|
||||
addon.save_persist()
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.warning("Can't start Add-on %s: %s", addon.slug, err)
|
||||
self.sys_capture_exception(err)
|
||||
else:
|
||||
continue
|
||||
|
||||
_LOGGER.warning("Can't start Add-on %s", addon.slug)
|
||||
|
||||
await asyncio.sleep(self.sys_config.wait_boot)
|
||||
|
||||
@ -153,7 +165,7 @@ class AddonManager(CoreSysAttributes):
|
||||
|
||||
try:
|
||||
await addon.instance.install(store.version, store.image)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
self.data.uninstall(addon)
|
||||
raise AddonsError() from err
|
||||
else:
|
||||
@ -174,7 +186,7 @@ class AddonManager(CoreSysAttributes):
|
||||
|
||||
try:
|
||||
await addon.instance.remove()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
else:
|
||||
addon.state = AddonState.UNKNOWN
|
||||
@ -245,9 +257,9 @@ class AddonManager(CoreSysAttributes):
|
||||
await addon.instance.update(store.version, store.image)
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await addon.instance.cleanup()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
else:
|
||||
self.data.update(store)
|
||||
@ -285,7 +297,7 @@ class AddonManager(CoreSysAttributes):
|
||||
try:
|
||||
await addon.instance.remove()
|
||||
await addon.instance.install(addon.version)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
else:
|
||||
self.data.update(store)
|
||||
@ -337,7 +349,7 @@ class AddonManager(CoreSysAttributes):
|
||||
self.sys_docker.network.stale_cleanup, addon.instance.name
|
||||
)
|
||||
|
||||
with suppress(DockerAPIError, KeyError):
|
||||
with suppress(DockerError, KeyError):
|
||||
# Need pull a image again
|
||||
if not addon.need_build:
|
||||
await addon.instance.install(addon.version, addon.image)
|
||||
@ -362,7 +374,7 @@ class AddonManager(CoreSysAttributes):
|
||||
try:
|
||||
if not await addon.instance.is_running():
|
||||
continue
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.warning("Add-on %s is corrupt: %s", addon.slug, err)
|
||||
self.sys_core.healthy = False
|
||||
self.sys_capture_exception(err)
|
||||
|
@ -39,6 +39,7 @@ from ..const import (
|
||||
ATTR_VERSION,
|
||||
ATTR_WATCHDOG,
|
||||
DNS_SUFFIX,
|
||||
AddonBoot,
|
||||
AddonStartup,
|
||||
AddonState,
|
||||
)
|
||||
@ -49,7 +50,8 @@ from ..exceptions import (
|
||||
AddonConfigurationError,
|
||||
AddonsError,
|
||||
AddonsNotSupportedError,
|
||||
DockerAPIError,
|
||||
DockerError,
|
||||
DockerRequestError,
|
||||
HostAppArmorError,
|
||||
JsonFileError,
|
||||
)
|
||||
@ -98,7 +100,7 @@ class Addon(AddonModel):
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Async initialize of object."""
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.attach(tag=self.version)
|
||||
|
||||
# Evaluate state
|
||||
@ -163,12 +165,12 @@ class Addon(AddonModel):
|
||||
self.persist[ATTR_OPTIONS] = {} if value is None else deepcopy(value)
|
||||
|
||||
@property
|
||||
def boot(self) -> bool:
|
||||
def boot(self) -> AddonBoot:
|
||||
"""Return boot config with prio local settings."""
|
||||
return self.persist.get(ATTR_BOOT, super().boot)
|
||||
|
||||
@boot.setter
|
||||
def boot(self, value: bool) -> None:
|
||||
def boot(self, value: AddonBoot) -> None:
|
||||
"""Store user boot options."""
|
||||
self.persist[ATTR_BOOT] = value
|
||||
|
||||
@ -560,7 +562,10 @@ class Addon(AddonModel):
|
||||
# Start Add-on
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError as err:
|
||||
except DockerRequestError as err:
|
||||
self.state = AddonState.STOPPED
|
||||
raise AddonsError() from err
|
||||
except DockerError as err:
|
||||
self.state = AddonState.ERROR
|
||||
raise AddonsError(err) from err
|
||||
else:
|
||||
@ -570,7 +575,9 @@ class Addon(AddonModel):
|
||||
"""Stop add-on."""
|
||||
try:
|
||||
return await self.instance.stop()
|
||||
except DockerAPIError as err:
|
||||
except DockerRequestError as err:
|
||||
raise AddonsError() from err
|
||||
except DockerError as err:
|
||||
self.state = AddonState.ERROR
|
||||
raise AddonsError() from err
|
||||
else:
|
||||
@ -600,7 +607,7 @@ class Addon(AddonModel):
|
||||
"""Return stats of container."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
async def write_stdin(self, data) -> None:
|
||||
@ -614,7 +621,7 @@ class Addon(AddonModel):
|
||||
|
||||
try:
|
||||
return await self.instance.write_stdin(data)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
async def snapshot(self, tar_file: tarfile.TarFile) -> None:
|
||||
@ -626,7 +633,7 @@ class Addon(AddonModel):
|
||||
if self.need_build:
|
||||
try:
|
||||
await self.instance.export_image(temp_path.joinpath("image.tar"))
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
data = {
|
||||
@ -728,18 +735,18 @@ class Addon(AddonModel):
|
||||
|
||||
image_file = Path(temp, "image.tar")
|
||||
if image_file.is_file():
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.import_image(image_file)
|
||||
else:
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(version, restore_image)
|
||||
await self.instance.cleanup()
|
||||
elif self.instance.version != version or self.legacy:
|
||||
_LOGGER.info("Restore/Update image for addon %s", self.slug)
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.update(version, restore_image)
|
||||
else:
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.stop()
|
||||
|
||||
# Restore data
|
||||
|
@ -66,6 +66,7 @@ from ..const import (
|
||||
SECURITY_DEFAULT,
|
||||
SECURITY_DISABLE,
|
||||
SECURITY_PROFILE,
|
||||
AddonBoot,
|
||||
AddonStage,
|
||||
AddonStartup,
|
||||
)
|
||||
@ -109,7 +110,7 @@ class AddonModel(CoreSysAttributes, ABC):
|
||||
return self.data[ATTR_OPTIONS]
|
||||
|
||||
@property
|
||||
def boot(self) -> bool:
|
||||
def boot(self) -> AddonBoot:
|
||||
"""Return boot config with prio local settings."""
|
||||
return self.data[ATTR_BOOT]
|
||||
|
||||
|
@ -82,11 +82,10 @@ from ..const import (
|
||||
ATTR_VIDEO,
|
||||
ATTR_WATCHDOG,
|
||||
ATTR_WEBUI,
|
||||
BOOT_AUTO,
|
||||
BOOT_MANUAL,
|
||||
PRIVILEGED_ALL,
|
||||
ROLE_ALL,
|
||||
ROLE_DEFAULT,
|
||||
AddonBoot,
|
||||
AddonStage,
|
||||
AddonStartup,
|
||||
AddonState,
|
||||
@ -193,7 +192,7 @@ SCHEMA_ADDON_CONFIG = vol.Schema(
|
||||
vol.Optional(ATTR_MACHINE): vol.All([vol.Match(RE_MACHINE)], vol.Unique()),
|
||||
vol.Optional(ATTR_URL): vol.Url(),
|
||||
vol.Required(ATTR_STARTUP): vol.All(_simple_startup, vol.Coerce(AddonStartup)),
|
||||
vol.Required(ATTR_BOOT): vol.In([BOOT_AUTO, BOOT_MANUAL]),
|
||||
vol.Required(ATTR_BOOT): vol.Coerce(AddonBoot),
|
||||
vol.Optional(ATTR_INIT, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_ADVANCED, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_STAGE, default=AddonStage.STABLE): vol.Coerce(AddonStage),
|
||||
@ -303,7 +302,7 @@ SCHEMA_ADDON_USER = vol.Schema(
|
||||
),
|
||||
vol.Optional(ATTR_OPTIONS, default=dict): dict,
|
||||
vol.Optional(ATTR_AUTO_UPDATE, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_BOOT): vol.In([BOOT_AUTO, BOOT_MANUAL]),
|
||||
vol.Optional(ATTR_BOOT): vol.Coerce(AddonBoot),
|
||||
vol.Optional(ATTR_NETWORK): docker_ports,
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(vol.Coerce(str)),
|
||||
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(vol.Coerce(str)),
|
||||
|
@ -91,12 +91,11 @@ from ..const import (
|
||||
ATTR_VIDEO,
|
||||
ATTR_WATCHDOG,
|
||||
ATTR_WEBUI,
|
||||
BOOT_AUTO,
|
||||
BOOT_MANUAL,
|
||||
CONTENT_TYPE_BINARY,
|
||||
CONTENT_TYPE_PNG,
|
||||
CONTENT_TYPE_TEXT,
|
||||
REQUEST_FROM,
|
||||
AddonBoot,
|
||||
AddonState,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
@ -112,7 +111,7 @@ SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_OPTIONS = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_BOOT): vol.In([BOOT_AUTO, BOOT_MANUAL]),
|
||||
vol.Optional(ATTR_BOOT): vol.Coerce(AddonBoot),
|
||||
vol.Optional(ATTR_NETWORK): vol.Maybe(docker_ports),
|
||||
vol.Optional(ATTR_AUTO_UPDATE): vol.Boolean(),
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(vol.Coerce(str)),
|
||||
|
@ -1,9 +1,9 @@
|
||||
|
||||
try {
|
||||
new Function("import('/api/hassio/app/frontend_latest/entrypoint.babc4122.js')")();
|
||||
new Function("import('/api/hassio/app/frontend_latest/entrypoint.ced77c9a.js')")();
|
||||
} catch (err) {
|
||||
var el = document.createElement('script');
|
||||
el.src = '/api/hassio/app/frontend_es5/entrypoint.028a6bad.js';
|
||||
el.src = '/api/hassio/app/frontend_es5/entrypoint.871af696.js';
|
||||
document.body.appendChild(el);
|
||||
}
|
||||
|
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -0,0 +1 @@
|
||||
{"version":3,"file":"chunk.095b5a8570ff19eea467.js","sources":["webpack:///chunk.095b5a8570ff19eea467.js"],"mappings":"AAAA","sourceRoot":""}
|
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -1 +0,0 @@
|
||||
{"version":3,"file":"chunk.0a106e488ed654ffce49.js","sources":["webpack:///chunk.0a106e488ed654ffce49.js"],"mappings":"AAAA","sourceRoot":""}
|
Binary file not shown.
@ -1 +0,0 @@
|
||||
{"version":3,"file":"chunk.2b590ee397502865577d.js","sources":["webpack:///chunk.2b590ee397502865577d.js"],"mappings":"AAAA","sourceRoot":""}
|
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -0,0 +1 @@
|
||||
{"version":3,"file":"chunk.2fedc1a3cb3d8b758b92.js","sources":["webpack:///chunk.2fedc1a3cb3d8b758b92.js"],"mappings":"AAAA","sourceRoot":""}
|
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -0,0 +1 @@
|
||||
{"version":3,"file":"chunk.33da75209731c9f0b81a.js","sources":["webpack:///chunk.33da75209731c9f0b81a.js"],"mappings":"AAAA","sourceRoot":""}
|
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -0,0 +1 @@
|
||||
{"version":3,"file":"chunk.3e52d734bb60544642e8.js","sources":["webpack:///chunk.3e52d734bb60544642e8.js"],"mappings":"AAAA","sourceRoot":""}
|
Binary file not shown.
@ -1 +0,0 @@
|
||||
{"version":3,"file":"chunk.3e7c27cbbb8b44bddd5f.js","sources":["webpack:///chunk.3e7c27cbbb8b44bddd5f.js"],"mappings":";AAAA","sourceRoot":""}
|
Binary file not shown.
@ -1 +0,0 @@
|
||||
{"version":3,"file":"chunk.4e329b1d42b5358fbe1a.js","sources":["webpack:///chunk.4e329b1d42b5358fbe1a.js"],"mappings":"AAAA","sourceRoot":""}
|
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -0,0 +1 @@
|
||||
{"version":3,"file":"chunk.9861d0fdbc47c03a1a5c.js","sources":["webpack:///chunk.9861d0fdbc47c03a1a5c.js"],"mappings":";AAAA","sourceRoot":""}
|
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -1 +0,0 @@
|
||||
{"version":3,"file":"chunk.a0bbc7b092a109a89b49.js","sources":["webpack:///chunk.a0bbc7b092a109a89b49.js"],"mappings":"AAAA","sourceRoot":""}
|
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -1 +0,0 @@
|
||||
{"version":3,"file":"entrypoint.028a6bad.js","sources":["webpack:///entrypoint.028a6bad.js"],"mappings":";AAAA","sourceRoot":""}
|
3
supervisor/api/panel/frontend_es5/entrypoint.871af696.js
Normal file
3
supervisor/api/panel/frontend_es5/entrypoint.871af696.js
Normal file
File diff suppressed because one or more lines are too long
BIN
supervisor/api/panel/frontend_es5/entrypoint.871af696.js.gz
Normal file
BIN
supervisor/api/panel/frontend_es5/entrypoint.871af696.js.gz
Normal file
Binary file not shown.
@ -0,0 +1 @@
|
||||
{"version":3,"file":"entrypoint.871af696.js","sources":["webpack:///entrypoint.871af696.js"],"mappings":";AAAA","sourceRoot":""}
|
@ -1,3 +1,3 @@
|
||||
{
|
||||
"entrypoint.js": "/api/hassio/app/frontend_es5/entrypoint.028a6bad.js"
|
||||
"entrypoint.js": "/api/hassio/app/frontend_es5/entrypoint.871af696.js"
|
||||
}
|
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -1 +1 @@
|
||||
{"version":3,"file":"chunk.9a51b75455a6b146cba2.js","sources":["webpack:///chunk.9a51b75455a6b146cba2.js"],"mappings":"AAAA;;;AAoMA;AACA;;;AAGA;;;;;AAKA;;AAEA;AAEA;AACA;;;;;;;AAQA;;;;;AAKA;;AAEA;AAEA;AACA;;;;;;;AAQA;;;;;AAOA;;;;;;;;;;;;;;;;AAuBA;AAinBA;;AAEA;;AAEA;AACA;;AAIA;AA0IA;;;;AAIA;;AAEA;AACA;;;AAGA;;;;AAIA;AACA;;;;;;AAQA;;;;;;;;;;;;;;;;;;;;;;AA6HA;;;AAyGA;AACA;;;;;;;;AAQA;;AAGA;;;AAGA;;AAEA;AACA;;;;AAIA;;;;;;;AAQA;;;AAGA;;;;;AAvCA;;;;;;;;;;;;;;;AA8KA;;;AAkFA;AACA;;AAEA;;AAEA;AACA;;AAEA;AACA;;AAEA;AACA;;AAEA;;AApBA;;;;;;;;;;;AA4CA;;;AA8GA;;AAEA;;;;AARA;;;;;;;;;;;;AAiCA;;AAiCA;AACA;AACA;;;AAMA;;;;AA6IA;;;AAMA;AACA;;;AAGA;;AAEA;;AAKA;;AAEA;;AAEA;;AAIA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA6EA;AAsLA;;;;AAIA;AACA;AACA;AACA;;;AAGA;;;;;;;;AAQA;AACA;AACA;;;;AAIA;AACA;;;AAGA;;;AAGA;AACA;;;;;;;AAOA;;;;;;;;;AASA;;AAEA;AACA;;;;AAIA;;AAEA;;;;AAIA;;;AAGA;;;;AAIA;AACA;AACA;;;AAGA;;;;;;AAMA;;AAEA;AACA;;;;AAIA;;;AAGA;;AAEA;;AAEA;AACA;AAIA;;;;;;AAMA;;AAEA;AACA;;AAEA;AAKA;;AAEA;;;;AAIA;;AAEA;;;;;AAKA;;AAEA;AACA;;AAEA;;;;;AAKA;;AAEA;AACA;;AAEA;;;;;AAKA;;AAEA;AACA;;AAEA;;;AAGA;;AAEA;;AAEA;AACA;;AAEA;;;;;AAKA;;AAEA;AACA;;AAEA;;;;;AAKA;;AAEA;AACA;;AAEA;AACA;;;;;AAKA;;AAEA;AACA;;AAEA;;;;;AAKA;;AAEA;AACA;;AAEA;;;;;;AAMA;;;AAGA;;;AAGA;;AAEA;;;;;;;;AAQA;AACA;;;;;AAKA;AACA;;;;;;;;AAQA;AACA;;;;AAIA;AACA;AACA;;;;;;;;;AASA;AACA;;;;AAIA;AACA;AACA;;;;;AAKA;;;AAGA;AACA;AACA;;;;AAIA;AACA;AACA;;;;;;;;AAQA;AACA;;;;AAIA;;AAEA;AACA;;;AAGA;AACA;;;AAGA;AACA;;;;;;AAMA;AACA;;;;AAIA;AACA;;;;AAIA;;AAEA;;;;;;;;;;AAUA;AACA;AACA;;;AAGA;;;AAGA;;;;AAIA;;;AAGA;AACA;;;;AAIA;AACA;AACA;;;;;;AAMA;AACA;;;;;;;;AAQA;;;;AAIA;;;;AAIA;AAGA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAocA;;;AAoFA;AACA;AACA;;;AARA;;;;;;AA4BA;AAqGA;;AAEA;;AAEA;AACA;AACA;;;AAGA;;;AAKA;;;;;;;;;AAgBA;;;AAiGA;AACA;;;AAPA;;;;;;AA2BA;;AAmQA;AACA;AACA;AACA;;AAEA;;AAEA;;AAEA;AACA;AACA;AACA;;;AAKA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAsCA","sourceRoot":""}
|
||||
{"version":3,"file":"chunk.37f6183e2aa511d31a60.js","sources":["webpack:///chunk.37f6183e2aa511d31a60.js"],"mappings":"AAAA;;;AAoMA;AACA;;;AAGA;;;;;AAKA;;AAEA;AAEA;AACA;;;;;;;AAQA;;;;;AAKA;;AAEA;AAEA;AACA;;;;;;;AAQA;;;;;AAOA;;;;;;;;;;;;;;;;AAuBA;AAknBA;;AAEA;;AAEA;AACA;;AAIA;AA0IA;;;;AAIA;;AAEA;AACA;;;AAGA;;;;AAIA;AACA;;;;;;AAQA;;;;;;;;;;;;;;;;;;;;;;AA6HA;;;AAyGA;AACA;;;;;;;;AAQA;;AAGA;;;AAGA;;AAEA;AACA;;;;AAIA;;;;;;;AAQA;;;AAGA;;;;;AAvCA;;;;;;;;;;;;;;;AA8KA;;;AAkFA;AACA;;AAEA;;AAEA;AACA;;AAEA;AACA;;AAEA;AACA;;AAEA;;AApBA;;;;;;;;;;;AA4CA;;;AA8GA;;AAEA;;;;AARA;;;;;;;;;;;;AAiCA;;AAiCA;AACA;AACA;;;AAMA;;;;AA6IA;;;AAMA;AACA;;;AAGA;;AAEA;;AAKA;;AAEA;;AAEA;;AAIA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA6EA;AAsLA;;;;AAIA;AACA;AACA;AACA;;;AAGA;;;;;;;;AAQA;AACA;AACA;;;;AAIA;AACA;;;AAGA;;;AAGA;AACA;;;;;;;AAOA;;;;;;;;;AASA;;AAEA;AACA;;;;AAIA;;AAEA;;;;AAIA;;;AAGA;;;;AAIA;AACA;AACA;;;AAGA;;;;;;AAMA;;AAEA;AACA;;;;AAIA;;;AAGA;;AAEA;;AAEA;AACA;AAIA;;;;;;AAMA;;AAEA;AACA;;AAEA;AAKA;;AAEA;;;;AAIA;;AAEA;;;;;AAKA;;AAEA;AACA;;AAEA;;;;;AAKA;;AAEA;AACA;;AAEA;;;;;AAKA;;AAEA;AACA;;AAEA;;;AAGA;;AAEA;;AAEA;AACA;;AAEA;;;;;AAKA;;AAEA;AACA;;AAEA;;;;;AAKA;;AAEA;AACA;;AAEA;AACA;;;;;AAKA;;AAEA;AACA;;AAEA;;;;;AAKA;;AAEA;AACA;;AAEA;;;;;;AAMA;;;AAGA;;;AAGA;;AAEA;;;;;;;;AAQA;AACA;;;;;AAKA;AACA;;;;;;;;AAQA;AACA;;;;AAIA;AACA;AACA;;;;;;;;;AASA;AACA;;;;AAIA;AACA;AACA;;;;;AAKA;;;AAGA;AACA;AACA;;;;AAIA;AACA;AACA;;;;;;;;AAQA;AACA;;;;AAIA;;AAEA;AACA;;;AAGA;AACA;;;AAGA;AACA;;;;;;AAMA;AACA;;;;AAIA;AACA;;;;AAIA;;AAEA;;;;;;;;;;AAUA;AACA;AACA;;;AAGA;;;AAGA;;;;AAIA;;;AAGA;AACA;;;;AAIA;AACA;AACA;;;;;;AAMA;AACA;;;;;;;;AAQA;;;;AAIA;;;;AAIA;AAGA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAocA;;;AAoFA;AACA;AACA;;;AARA;;;;;;AA4BA;AAqGA;;AAEA;;AAEA;AACA;AACA;;;AAGA;;;AAKA;;;;;;;;;AAgBA;;;AAiGA;AACA;;;AAPA;;;;;;AA2BA;;AAmQA;AACA;AACA;AACA;;AAEA;;AAEA;;AAEA;AACA;AACA;AACA;;;AAKA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAsCA","sourceRoot":""}
|
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -0,0 +1 @@
|
||||
{"version":3,"file":"chunk.54411c889e12d1088f34.js","sources":["webpack:///chunk.54411c889e12d1088f34.js"],"mappings":"AAAA;;;AA6HA;AACA;AACA;;AAEA;;;AAGA;;AAEA;;AAIA;;AAEA;AACA;;;AAGA;AACA;AACA;AACA;AACA;;AAEA;;AAEA;AACA;AACA;;;;AAIA;AACA;;;AAGA;;;AAKA;;;;;;;;;;;;;;;;;;;;;;;;AA4FA","sourceRoot":""}
|
Binary file not shown.
Binary file not shown.
@ -1 +0,0 @@
|
||||
{"version":3,"file":"chunk.65999be5697048c06838.js","sources":["webpack:///chunk.65999be5697048c06838.js"],"mappings":"AAAA;;AAsIA;AACA;;AAEA;;AAEA;;AAEA;AAhBA;AACA;;AAEA;AAeA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA0HA","sourceRoot":""}
|
Binary file not shown.
@ -1 +0,0 @@
|
||||
{"version":3,"file":"chunk.6bb7ce5727199e27cab7.js","sources":["webpack:///chunk.6bb7ce5727199e27cab7.js"],"mappings":";AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAy7EA","sourceRoot":""}
|
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -0,0 +1 @@
|
||||
{"version":3,"file":"chunk.a1e2542b403d6c623155.js","sources":["webpack:///chunk.a1e2542b403d6c623155.js"],"mappings":";AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAy7EA","sourceRoot":""}
|
Binary file not shown.
@ -1 +0,0 @@
|
||||
{"version":3,"file":"chunk.b786cdd4432edb4df8f7.js","sources":["webpack:///chunk.b786cdd4432edb4df8f7.js"],"mappings":"AAAA;;;AA6HA;AACA;AACA;AACA;;;AAGA;;AAEA;;AAIA;;AAEA;AACA;;;AAGA;AACA;AACA;AACA;AACA;;AAEA;;AAEA;AACA;AACA;;;AAGA;AACA;;;AAKA;;;;;;;;;;;;;;;;;;;;;;;;AA4EA","sourceRoot":""}
|
@ -108,4 +108,4 @@
|
||||
margin-top: 0;
|
||||
}
|
||||
`]}},{kind:"method",key:"_updateFolders",value:function(e,t){this._folders=this._folders.map(r=>(r.slug===e.slug&&(r.checked=t),r))}},{kind:"method",key:"_updateAddons",value:function(e,t){this._addons=this._addons.map(r=>(r.slug===e.slug&&(r.checked=t),r))}},{kind:"method",key:"_passwordInput",value:function(e){this._snapshotPassword=e.detail.value}},{kind:"method",key:"_partialRestoreClicked",value:async function(){if(!(await Object(l.b)(this,{title:"Are you sure you want partially to restore this snapshot?"})))return;const e=this._addons.filter(e=>e.checked).map(e=>e.slug),t=this._folders.filter(e=>e.checked).map(e=>e.slug),r={homeassistant:this._restoreHass,addons:e,folders:t};this._snapshot.protected&&(r.password=this._snapshotPassword),this.hass.callApi("POST",`hassio/snapshots/${this._snapshot.slug}/restore/partial`,r).then(()=>{alert("Snapshot restored!"),this._closeDialog()},e=>{this._error=e.body.message})}},{kind:"method",key:"_fullRestoreClicked",value:async function(){if(!(await Object(l.b)(this,{title:"Are you sure you want to wipe your system and restore this snapshot?"})))return;const e=this._snapshot.protected?{password:this._snapshotPassword}:void 0;this.hass.callApi("POST",`hassio/snapshots/${this._snapshot.slug}/restore/full`,e).then(()=>{alert("Snapshot restored!"),this._closeDialog()},e=>{this._error=e.body.message})}},{kind:"method",key:"_deleteClicked",value:async function(){await Object(l.b)(this,{title:"Are you sure you want to delete this snapshot?"})&&this.hass.callApi("POST",`hassio/snapshots/${this._snapshot.slug}/remove`).then(()=>{this._dialogParams.onDelete(),this._closeDialog()},e=>{this._error=e.body.message})}},{kind:"method",key:"_downloadClicked",value:async function(){let e;try{e=await(t=this.hass,r=`/api/hassio/snapshots/${this._snapshot.slug}/download`,t.callWS({type:"auth/sign_path",path:r}))}catch(o){return void alert("Error: "+Object(n.a)(o))}var t,r;const s=this._computeName.replace(/[^a-z0-9]+/gi,"_"),i=document.createElement("a");i.href=e.path,i.download=`Hass_io_${s}.tar`,this.shadowRoot.appendChild(i),i.click(),this.shadowRoot.removeChild(i)}},{kind:"get",key:"_computeName",value:function(){return this._snapshot?this._snapshot.name||this._snapshot.slug:"Unnamed snapshot"}},{kind:"get",key:"_computeSize",value:function(){return Math.ceil(10*this._snapshot.size)/10+" MB"}},{kind:"method",key:"_formatDatetime",value:function(e){return new Date(e).toLocaleDateString(navigator.language,{weekday:"long",year:"numeric",month:"short",day:"numeric",hour:"numeric",minute:"2-digit"})}},{kind:"method",key:"_closeDialog",value:function(){this._dialogParams=void 0,this._snapshot=void 0,this._snapshotPassword="",this._folders=[],this._addons=[]}}]}}),i.a)}}]);
|
||||
//# sourceMappingURL=chunk.64b648e525a4fd321c8a.js.map
|
||||
//# sourceMappingURL=chunk.ba99eaff59b636b2f274.js.map
|
Binary file not shown.
@ -1 +1 @@
|
||||
{"version":3,"file":"chunk.64b648e525a4fd321c8a.js","sources":["webpack:///chunk.64b648e525a4fd321c8a.js"],"mappings":"AAAA;;;;AAqOA;AACA;;;AAGA;AACA;AACA;;;;AAIA;AACA;;AAIA;;AAEA;;;AAGA;;AAGA;AACA;;AAEA;;;;AAKA;AACA;;;AAGA;;AAGA;AACA;;AAEA;;;;AAKA;AACA;;;;;AAKA;AACA;;AAEA;AACA;AACA;;;AAGA;AACA;;;;;AAKA;;;AAGA;;;AAGA;;AAEA;;;AAGA;;;AAGA;AACA;AACA;;;;AAzFA;;;;;;;;;;;;;;;;;;;;;;;;;AA+HA","sourceRoot":""}
|
||||
{"version":3,"file":"chunk.ba99eaff59b636b2f274.js","sources":["webpack:///chunk.ba99eaff59b636b2f274.js"],"mappings":"AAAA;;;;AAqOA;AACA;;;AAGA;AACA;AACA;;;;AAIA;AACA;;AAIA;;AAEA;;;AAGA;;AAGA;AACA;;AAEA;;;;AAKA;AACA;;;AAGA;;AAGA;AACA;;AAEA;;;;AAKA;AACA;;;;;AAKA;AACA;;AAEA;AACA;AACA;;;AAGA;AACA;;;;;AAKA;;;AAGA;;;AAGA;;AAEA;;;AAGA;;;AAGA;AACA;AACA;;;;AAzFA;;;;;;;;;;;;;;;;;;;;;;;;;AA+HA","sourceRoot":""}
|
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -0,0 +1 @@
|
||||
{"version":3,"file":"chunk.e0e7eb9358333eaa4b44.js","sources":["webpack:///chunk.e0e7eb9358333eaa4b44.js"],"mappings":"AAAA;;AAsIA;AACA;;AAEA;;AAEA;;AAEA;AAhBA;AACA;;AAEA;AAeA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA0HA","sourceRoot":""}
|
Binary file not shown.
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
BIN
supervisor/api/panel/frontend_latest/entrypoint.ced77c9a.js.gz
Normal file
BIN
supervisor/api/panel/frontend_latest/entrypoint.ced77c9a.js.gz
Normal file
Binary file not shown.
File diff suppressed because one or more lines are too long
@ -1,3 +1,3 @@
|
||||
{
|
||||
"entrypoint.js": "/api/hassio/app/frontend_latest/entrypoint.babc4122.js"
|
||||
"entrypoint.js": "/api/hassio/app/frontend_latest/entrypoint.ced77c9a.js"
|
||||
}
|
@ -90,6 +90,7 @@ ADDONS_ROLE_ACCESS = {
|
||||
r"|/host/.+"
|
||||
r"|/multicast/.+"
|
||||
r"|/network/.+"
|
||||
r"|/observer/.+"
|
||||
r"|/os/.+"
|
||||
r"|/snapshots.*"
|
||||
r"|/supervisor/.+"
|
||||
|
@ -3,7 +3,7 @@ from enum import Enum
|
||||
from ipaddress import ip_network
|
||||
from pathlib import Path
|
||||
|
||||
SUPERVISOR_VERSION = "243"
|
||||
SUPERVISOR_VERSION = "244"
|
||||
|
||||
URL_HASSIO_ADDONS = "https://github.com/home-assistant/hassio-addons"
|
||||
URL_HASSIO_APPARMOR = "https://version.home-assistant.io/apparmor.txt"
|
||||
@ -277,9 +277,6 @@ PROVIDE_SERVICE = "provide"
|
||||
NEED_SERVICE = "need"
|
||||
WANT_SERVICE = "want"
|
||||
|
||||
BOOT_AUTO = "auto"
|
||||
BOOT_MANUAL = "manual"
|
||||
|
||||
|
||||
MAP_CONFIG = "config"
|
||||
MAP_SSL = "ssl"
|
||||
@ -352,6 +349,13 @@ CHAN_TYPE = "chan_type"
|
||||
SUPERVISED_SUPPORTED_OS = ["Debian GNU/Linux 10 (buster)"]
|
||||
|
||||
|
||||
class AddonBoot(str, Enum):
|
||||
"""Boot mode for the add-on."""
|
||||
|
||||
AUTO = "auto"
|
||||
MANUAL = "manual"
|
||||
|
||||
|
||||
class AddonStartup(str, Enum):
|
||||
"""Startup types of Add-on."""
|
||||
|
||||
|
@ -15,7 +15,7 @@ from .const import (
|
||||
)
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .exceptions import (
|
||||
DockerAPIError,
|
||||
DockerError,
|
||||
HassioError,
|
||||
HomeAssistantError,
|
||||
SupervisorUpdateError,
|
||||
@ -177,7 +177,7 @@ class Core(CoreSysAttributes):
|
||||
if await self.sys_run_in_executor(self.sys_docker.check_denylist_images):
|
||||
self.supported = False
|
||||
self.healthy = False
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
self.healthy = False
|
||||
|
||||
async def start(self):
|
||||
|
@ -11,7 +11,7 @@ from packaging import version as pkg_version
|
||||
import requests
|
||||
|
||||
from ..const import DNS_SUFFIX, DOCKER_IMAGE_DENYLIST, SOCKET_DOCKER
|
||||
from ..exceptions import DockerAPIError
|
||||
from ..exceptions import DockerAPIError, DockerError, DockerNotFound, DockerRequestError
|
||||
from .network import DockerNetwork
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@ -129,27 +129,36 @@ class DockerAPI:
|
||||
container = self.docker.containers.create(
|
||||
f"{image}:{version}", use_config_proxy=False, **kwargs
|
||||
)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
except docker.errors.NotFound as err:
|
||||
_LOGGER.error("Image %s not exists for %s", image, name)
|
||||
raise DockerNotFound() from err
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't create container from %s: %s", name, err)
|
||||
raise DockerAPIError() from err
|
||||
except requests.RequestException as err:
|
||||
_LOGGER.error("Dockerd connection issue for %s: %s", name, err)
|
||||
raise DockerRequestError() from err
|
||||
|
||||
# Attach network
|
||||
if not network_mode:
|
||||
alias = [hostname] if hostname else None
|
||||
try:
|
||||
self.network.attach_container(container, alias=alias, ipv4=ipv4)
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
_LOGGER.warning("Can't attach %s to hassio-net!", name)
|
||||
else:
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
self.network.detach_default_bridge(container)
|
||||
|
||||
# Run container
|
||||
try:
|
||||
container.start()
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't start %s: %s", name, err)
|
||||
raise DockerAPIError(err) from err
|
||||
raise DockerAPIError() from err
|
||||
except requests.RequestException as err:
|
||||
_LOGGER.error("Dockerd connection issue for %s: %s", name, err)
|
||||
raise DockerRequestError() from err
|
||||
|
||||
# Update metadata
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
@ -187,7 +196,7 @@ class DockerAPI:
|
||||
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't execute command: %s", err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
finally:
|
||||
# cleanup container
|
||||
@ -249,7 +258,7 @@ class DockerAPI:
|
||||
denied_images.add(image_name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Corrupt docker overlayfs detect: %s", err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
if not denied_images:
|
||||
return False
|
||||
|
@ -26,7 +26,7 @@ from ..const import (
|
||||
SECURITY_PROFILE,
|
||||
)
|
||||
from ..coresys import CoreSys
|
||||
from ..exceptions import CoreDNSError, DockerAPIError
|
||||
from ..exceptions import CoreDNSError, DockerError
|
||||
from ..utils import process_lock
|
||||
from .interface import DockerInterface
|
||||
|
||||
@ -419,7 +419,7 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't build %s:%s: %s", self.image, tag, err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Build %s:%s done", self.image, tag)
|
||||
|
||||
@ -437,7 +437,7 @@ class DockerAddon(DockerInterface):
|
||||
image = self.sys_docker.api.get_image(f"{self.image}:{self.version}")
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't fetch image %s: %s", self.image, err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Export image %s to %s", self.image, tar_file)
|
||||
try:
|
||||
@ -446,7 +446,7 @@ class DockerAddon(DockerInterface):
|
||||
write_tar.write(chunk)
|
||||
except (OSError, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't write tar file %s: %s", tar_file, err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Export image %s done", self.image)
|
||||
|
||||
@ -467,12 +467,12 @@ class DockerAddon(DockerInterface):
|
||||
docker_image = self.sys_docker.images.get(f"{self.image}:{self.version}")
|
||||
except (docker.errors.DockerException, OSError) as err:
|
||||
_LOGGER.error("Can't import image %s: %s", self.image, err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
self._meta = docker_image.attrs
|
||||
_LOGGER.info("Import image %s and version %s", tar_file, self.version)
|
||||
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
self._cleanup()
|
||||
|
||||
@process_lock
|
||||
@ -486,7 +486,7 @@ class DockerAddon(DockerInterface):
|
||||
Need run inside executor.
|
||||
"""
|
||||
if not self._is_running():
|
||||
raise DockerAPIError()
|
||||
raise DockerError()
|
||||
|
||||
try:
|
||||
# Load needed docker objects
|
||||
@ -494,7 +494,7 @@ class DockerAddon(DockerInterface):
|
||||
socket = container.attach_socket(params={"stdin": 1, "stream": 1})
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't attach to %s stdin: %s", self.name, err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
try:
|
||||
# Write to stdin
|
||||
@ -503,7 +503,7 @@ class DockerAddon(DockerInterface):
|
||||
socket.close()
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't write to %s stdin: %s", self.name, err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
def _stop(self, remove_container=True) -> None:
|
||||
"""Stop/remove Docker container.
|
||||
|
@ -7,7 +7,7 @@ import docker
|
||||
import requests
|
||||
|
||||
from ..const import ENV_TIME, ENV_TOKEN, ENV_TOKEN_HASSIO, LABEL_MACHINE, MACHINE_ID
|
||||
from ..exceptions import DockerAPIError
|
||||
from ..exceptions import DockerError
|
||||
from .interface import CommandReturn, DockerInterface
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@ -177,7 +177,7 @@ class DockerHomeAssistant(DockerInterface):
|
||||
except docker.errors.NotFound:
|
||||
return False
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
return DockerAPIError()
|
||||
return DockerError()
|
||||
|
||||
# we run on an old image, stop and start it
|
||||
if docker_container.image.id != docker_image.id:
|
||||
|
@ -11,7 +11,7 @@ import requests
|
||||
from . import CommandReturn
|
||||
from ..const import LABEL_ARCH, LABEL_VERSION
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import DockerAPIError
|
||||
from ..exceptions import DockerAPIError, DockerError, DockerNotFound, DockerRequestError
|
||||
from ..utils import process_lock
|
||||
from .stats import DockerStats
|
||||
|
||||
@ -108,11 +108,11 @@ class DockerInterface(CoreSysAttributes):
|
||||
"Available space in /data is: %s GiB",
|
||||
free_space,
|
||||
)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Unknown error with %s:%s -> %s", image, tag, err)
|
||||
self.sys_capture_exception(err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
else:
|
||||
self._meta = docker_image.attrs
|
||||
|
||||
@ -146,8 +146,10 @@ class DockerInterface(CoreSysAttributes):
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except docker.errors.NotFound:
|
||||
return False
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
except docker.errors.DockerException as err:
|
||||
raise DockerAPIError() from err
|
||||
except requests.RequestException as err:
|
||||
raise DockerRequestError() from err
|
||||
|
||||
return docker_container.status == "running"
|
||||
|
||||
@ -170,7 +172,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
# Successfull?
|
||||
if not self._meta:
|
||||
raise DockerAPIError()
|
||||
raise DockerError()
|
||||
_LOGGER.info("Attach to %s with version %s", self.image, self.version)
|
||||
|
||||
@process_lock
|
||||
@ -200,7 +202,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
except docker.errors.NotFound:
|
||||
return
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
if docker_container.status == "running":
|
||||
_LOGGER.info("Stop %s application", self.name)
|
||||
@ -226,14 +228,14 @@ class DockerInterface(CoreSysAttributes):
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("%s not found for starting up", self.name)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Start %s", self.name)
|
||||
try:
|
||||
docker_container.start()
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't start %s: %s", self.name, err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
@process_lock
|
||||
def remove(self) -> Awaitable[None]:
|
||||
@ -246,7 +248,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
Needs run inside executor.
|
||||
"""
|
||||
# Cleanup container
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
self._stop()
|
||||
|
||||
_LOGGER.info("Remove image %s with latest and %s", self.image, self.version)
|
||||
@ -262,7 +264,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.warning("Can't remove image %s: %s", self.image, err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
self._meta = None
|
||||
|
||||
@ -290,7 +292,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
self._install(tag, image=image, latest=latest)
|
||||
|
||||
# Stop container & cleanup
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
self._stop()
|
||||
|
||||
def logs(self) -> Awaitable[bytes]:
|
||||
@ -331,14 +333,14 @@ class DockerInterface(CoreSysAttributes):
|
||||
origin = self.sys_docker.images.get(f"{self.image}:{self.version}")
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.warning("Can't find %s for cleanup", self.image)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
# Cleanup Current
|
||||
try:
|
||||
images_list = self.sys_docker.images.list(name=self.image)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.waring("Corrupt docker overlayfs found: %s", err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
for image in images_list:
|
||||
if origin.id == image.id:
|
||||
@ -356,7 +358,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
images_list = self.sys_docker.images.list(name=old_image)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.waring("Corrupt docker overlayfs found: %s", err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
for image in images_list:
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
@ -376,14 +378,14 @@ class DockerInterface(CoreSysAttributes):
|
||||
try:
|
||||
container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Restart %s", self.image)
|
||||
try:
|
||||
container.restart(timeout=self.timeout)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.warning("Can't restart %s: %s", self.image, err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
@process_lock
|
||||
def execute_command(self, command: str) -> Awaitable[CommandReturn]:
|
||||
@ -409,14 +411,14 @@ class DockerInterface(CoreSysAttributes):
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
try:
|
||||
stats = docker_container.stats(stream=False)
|
||||
return DockerStats(stats)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't read stats from %s: %s", self.name, err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
def is_failed(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker is failing state.
|
||||
@ -435,7 +437,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
except docker.errors.NotFound:
|
||||
return False
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
# container is not running
|
||||
if docker_container.status != "exited":
|
||||
@ -469,7 +471,10 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
except (docker.errors.DockerException, ValueError) as err:
|
||||
_LOGGER.debug("No version found for %s", self.image)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerNotFound() from err
|
||||
except requests.RequestException as err:
|
||||
_LOGGER.warning("Communication issues with dockerd on Host: %s", err)
|
||||
raise DockerRequestError() from err
|
||||
else:
|
||||
_LOGGER.debug("Found %s versions: %s", self.image, available_version)
|
||||
|
||||
|
@ -8,7 +8,7 @@ import docker
|
||||
import requests
|
||||
|
||||
from ..const import DOCKER_NETWORK, DOCKER_NETWORK_MASK, DOCKER_NETWORK_RANGE
|
||||
from ..exceptions import DockerAPIError
|
||||
from ..exceptions import DockerError
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@ -113,7 +113,7 @@ class DockerNetwork:
|
||||
self.network.connect(container, aliases=alias, ipv4_address=ipv4_address)
|
||||
except docker.errors.APIError as err:
|
||||
_LOGGER.error("Can't link container to hassio-net: %s", err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
self.network.reload()
|
||||
|
||||
@ -133,7 +133,7 @@ class DockerNetwork:
|
||||
|
||||
except docker.errors.APIError as err:
|
||||
_LOGGER.warning("Can't disconnect container from default: %s", err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
def stale_cleanup(self, container_name: str):
|
||||
"""Remove force a container from Network.
|
||||
|
@ -8,7 +8,7 @@ import docker
|
||||
import requests
|
||||
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerAPIError
|
||||
from ..exceptions import DockerError
|
||||
from .interface import DockerInterface
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@ -40,7 +40,7 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
@ -77,7 +77,7 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
docker_container.image.tag(self.image, tag="latest")
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't retag supervisor version: %s", err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
||||
def update_start_tag(self, image: str, version: str) -> Awaitable[None]:
|
||||
"""Update start tag to new version."""
|
||||
@ -104,4 +104,4 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't fix start tag: %s", err)
|
||||
raise DockerAPIError() from err
|
||||
raise DockerError() from err
|
||||
|
@ -238,10 +238,22 @@ class JsonFileError(HassioError):
|
||||
# docker/api
|
||||
|
||||
|
||||
class DockerAPIError(HassioError):
|
||||
class DockerError(HassioError):
|
||||
"""Docker API/Transport errors."""
|
||||
|
||||
|
||||
class DockerAPIError(DockerError):
|
||||
"""Docker API error."""
|
||||
|
||||
|
||||
class DockerRequestError(DockerError):
|
||||
"""Dockerd OS issues."""
|
||||
|
||||
|
||||
class DockerNotFound(DockerError):
|
||||
"""Docker object don't Exists."""
|
||||
|
||||
|
||||
# Hardware
|
||||
|
||||
|
||||
|
@ -15,7 +15,7 @@ from packaging import version as pkg_version
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..docker.homeassistant import DockerHomeAssistant
|
||||
from ..docker.stats import DockerStats
|
||||
from ..exceptions import DockerAPIError, HomeAssistantError, HomeAssistantUpdateError
|
||||
from ..exceptions import DockerError, HomeAssistantError, HomeAssistantUpdateError
|
||||
from ..utils import convert_to_ascii, process_lock
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@ -58,7 +58,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
)
|
||||
|
||||
await self.instance.attach(tag=self.sys_homeassistant.version)
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
_LOGGER.info(
|
||||
"No Home Assistant Docker image %s found.", self.sys_homeassistant.image
|
||||
)
|
||||
@ -85,7 +85,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
await self.instance.install(
|
||||
LANDINGPAGE, image=self.sys_updater.image_homeassistant
|
||||
)
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
_LOGGER.warning("Fails install landingpage, retry after 30sec")
|
||||
await asyncio.sleep(30)
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
@ -117,7 +117,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
tag, image=self.sys_updater.image_homeassistant
|
||||
)
|
||||
break
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
pass
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
self.sys_capture_exception(err)
|
||||
@ -138,7 +138,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
_LOGGER.error("Can't start Home Assistant!")
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup()
|
||||
|
||||
@process_lock
|
||||
@ -162,7 +162,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
await self.instance.update(
|
||||
to_version, image=self.sys_updater.image_homeassistant
|
||||
)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.warning("Update Home Assistant image failed")
|
||||
raise HomeAssistantUpdateError() from err
|
||||
else:
|
||||
@ -175,7 +175,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
|
||||
# Successfull - last step
|
||||
self.sys_homeassistant.save_data()
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Update Home Assistant
|
||||
@ -212,7 +212,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
@ -228,7 +228,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
if await self.instance.is_initialize():
|
||||
try:
|
||||
await self.instance.start()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
@ -244,7 +244,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
"""
|
||||
try:
|
||||
return await self.instance.stop(remove_container=False)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
@process_lock
|
||||
@ -252,7 +252,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
"""Restart Home Assistant Docker."""
|
||||
try:
|
||||
await self.instance.restart()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
@ -260,7 +260,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
@process_lock
|
||||
async def rebuild(self) -> None:
|
||||
"""Rebuild Home Assistant Docker container."""
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.stop()
|
||||
await self._start()
|
||||
|
||||
@ -278,7 +278,7 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
"""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
@ -407,5 +407,5 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
# Pull image
|
||||
try:
|
||||
await self.instance.install(self.sys_homeassistant.version)
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
_LOGGER.error("Repairing of Home Assistant failed")
|
||||
|
@ -71,6 +71,7 @@ def filter_data(coresys: CoreSys, event: dict, hint: dict) -> dict:
|
||||
"dns": coresys.plugins.dns.version,
|
||||
"docker": coresys.docker.info.version,
|
||||
"multicast": coresys.plugins.multicast.version,
|
||||
"observer": coresys.plugins.observer.version,
|
||||
"os": coresys.hassos.version,
|
||||
"supervisor": coresys.supervisor.version,
|
||||
},
|
||||
|
@ -15,7 +15,7 @@ from ..const import ATTR_IMAGE, ATTR_VERSION
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..docker.audio import DockerAudio
|
||||
from ..docker.stats import DockerStats
|
||||
from ..exceptions import AudioError, AudioUpdateError, DockerAPIError
|
||||
from ..exceptions import AudioError, AudioUpdateError, DockerError
|
||||
from ..utils.json import JsonConfig
|
||||
from .const import FILE_HASSIO_AUDIO
|
||||
from .validate import SCHEMA_AUDIO_CONFIG
|
||||
@ -92,7 +92,7 @@ class Audio(JsonConfig, CoreSysAttributes):
|
||||
self.version = await self.instance.get_latest_version()
|
||||
|
||||
await self.instance.attach(tag=self.version)
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
_LOGGER.info("No Audio plugin Docker image %s found.", self.instance.image)
|
||||
|
||||
# Install PulseAudio
|
||||
@ -131,7 +131,7 @@ class Audio(JsonConfig, CoreSysAttributes):
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.latest_version:
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
self.latest_version, image=self.sys_updater.image_audio
|
||||
)
|
||||
@ -155,7 +155,7 @@ class Audio(JsonConfig, CoreSysAttributes):
|
||||
|
||||
try:
|
||||
await self.instance.update(version, image=self.sys_updater.image_audio)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Audio update failed")
|
||||
raise AudioUpdateError() from err
|
||||
else:
|
||||
@ -164,7 +164,7 @@ class Audio(JsonConfig, CoreSysAttributes):
|
||||
self.save_data()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Start Audio
|
||||
@ -175,7 +175,7 @@ class Audio(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Restart Audio plugin")
|
||||
try:
|
||||
await self.instance.restart()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Can't start Audio plugin")
|
||||
raise AudioError() from err
|
||||
|
||||
@ -184,7 +184,7 @@ class Audio(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Start Audio plugin")
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Can't start Audio plugin")
|
||||
raise AudioError() from err
|
||||
|
||||
@ -193,7 +193,7 @@ class Audio(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Stop Audio plugin")
|
||||
try:
|
||||
await self.instance.stop()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Can't stop Audio plugin")
|
||||
raise AudioError() from err
|
||||
|
||||
@ -208,7 +208,7 @@ class Audio(JsonConfig, CoreSysAttributes):
|
||||
"""Return stats of CoreDNS."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise AudioError() from err
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
@ -226,7 +226,7 @@ class Audio(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Repair Audio %s", self.version)
|
||||
try:
|
||||
await self.instance.install(self.version)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Repairing of Audio failed")
|
||||
self.sys_capture_exception(err)
|
||||
|
||||
|
@ -12,7 +12,7 @@ from ..const import ATTR_ACCESS_TOKEN, ATTR_IMAGE, ATTR_VERSION
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..docker.cli import DockerCli
|
||||
from ..docker.stats import DockerStats
|
||||
from ..exceptions import CliError, CliUpdateError, DockerAPIError
|
||||
from ..exceptions import CliError, CliUpdateError, DockerError
|
||||
from ..utils.json import JsonConfig
|
||||
from .const import FILE_HASSIO_CLI
|
||||
from .validate import SCHEMA_CLI_CONFIG
|
||||
@ -80,7 +80,7 @@ class HaCli(CoreSysAttributes, JsonConfig):
|
||||
self.version = await self.instance.get_latest_version()
|
||||
|
||||
await self.instance.attach(tag=self.version)
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
_LOGGER.info("No cli plugin Docker image %s found.", self.instance.image)
|
||||
|
||||
# Install cli
|
||||
@ -105,7 +105,7 @@ class HaCli(CoreSysAttributes, JsonConfig):
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.latest_version:
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
self.latest_version,
|
||||
image=self.sys_updater.image_cli,
|
||||
@ -133,7 +133,7 @@ class HaCli(CoreSysAttributes, JsonConfig):
|
||||
await self.instance.update(
|
||||
version, image=self.sys_updater.image_cli, latest=True
|
||||
)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("HA cli update failed")
|
||||
raise CliUpdateError() from err
|
||||
else:
|
||||
@ -142,7 +142,7 @@ class HaCli(CoreSysAttributes, JsonConfig):
|
||||
self.save_data()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Start cli
|
||||
@ -158,7 +158,7 @@ class HaCli(CoreSysAttributes, JsonConfig):
|
||||
_LOGGER.info("Start cli plugin")
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Can't start cli plugin")
|
||||
raise CliError() from err
|
||||
|
||||
@ -167,7 +167,7 @@ class HaCli(CoreSysAttributes, JsonConfig):
|
||||
_LOGGER.info("Stop cli plugin")
|
||||
try:
|
||||
await self.instance.stop()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Can't stop cli plugin")
|
||||
raise CliError() from err
|
||||
|
||||
@ -175,7 +175,7 @@ class HaCli(CoreSysAttributes, JsonConfig):
|
||||
"""Return stats of cli."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise CliError() from err
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
@ -193,6 +193,6 @@ class HaCli(CoreSysAttributes, JsonConfig):
|
||||
_LOGGER.info("Repair HA cli %s", self.version)
|
||||
try:
|
||||
await self.instance.install(self.version, latest=True)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Repairing of HA cli failed")
|
||||
self.sys_capture_exception(err)
|
||||
|
@ -17,7 +17,7 @@ from ..const import ATTR_IMAGE, ATTR_SERVERS, ATTR_VERSION, DNS_SUFFIX, LogLevel
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..docker.dns import DockerDNS
|
||||
from ..docker.stats import DockerStats
|
||||
from ..exceptions import CoreDNSError, CoreDNSUpdateError, DockerAPIError
|
||||
from ..exceptions import CoreDNSError, CoreDNSUpdateError, DockerError
|
||||
from ..utils.json import JsonConfig
|
||||
from ..validate import dns_url
|
||||
from .const import FILE_HASSIO_DNS
|
||||
@ -120,7 +120,7 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
self.version = await self.instance.get_latest_version()
|
||||
|
||||
await self.instance.attach(tag=self.version)
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
_LOGGER.info(
|
||||
"No CoreDNS plugin Docker image %s found.", self.instance.image
|
||||
)
|
||||
@ -162,7 +162,7 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.latest_version:
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
self.latest_version, image=self.sys_updater.image_dns
|
||||
)
|
||||
@ -190,7 +190,7 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
# Update
|
||||
try:
|
||||
await self.instance.update(version, image=self.sys_updater.image_dns)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("CoreDNS update failed")
|
||||
raise CoreDNSUpdateError() from err
|
||||
else:
|
||||
@ -199,7 +199,7 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
self.save_data()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Start CoreDNS
|
||||
@ -211,7 +211,7 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Restart CoreDNS plugin")
|
||||
try:
|
||||
await self.instance.restart()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Can't start CoreDNS plugin")
|
||||
raise CoreDNSError() from err
|
||||
|
||||
@ -223,7 +223,7 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Start CoreDNS plugin")
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Can't start CoreDNS plugin")
|
||||
raise CoreDNSError() from err
|
||||
|
||||
@ -232,7 +232,7 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Stop CoreDNS plugin")
|
||||
try:
|
||||
await self.instance.stop()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Can't stop CoreDNS plugin")
|
||||
raise CoreDNSError() from err
|
||||
|
||||
@ -389,7 +389,7 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
"""Return stats of CoreDNS."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise CoreDNSError() from err
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
@ -414,7 +414,7 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Repair CoreDNS %s", self.version)
|
||||
try:
|
||||
await self.instance.install(self.version)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Repairing of CoreDNS failed")
|
||||
self.sys_capture_exception(err)
|
||||
|
||||
|
@ -11,7 +11,7 @@ from ..const import ATTR_IMAGE, ATTR_VERSION
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..docker.multicast import DockerMulticast
|
||||
from ..docker.stats import DockerStats
|
||||
from ..exceptions import DockerAPIError, MulticastError, MulticastUpdateError
|
||||
from ..exceptions import DockerError, MulticastError, MulticastUpdateError
|
||||
from ..utils.json import JsonConfig
|
||||
from .const import FILE_HASSIO_MULTICAST
|
||||
from .validate import SCHEMA_MULTICAST_CONFIG
|
||||
@ -74,7 +74,7 @@ class Multicast(JsonConfig, CoreSysAttributes):
|
||||
self.version = await self.instance.get_latest_version()
|
||||
|
||||
await self.instance.attach(tag=self.version)
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
_LOGGER.info(
|
||||
"No Multicast plugin Docker image %s found.", self.instance.image
|
||||
)
|
||||
@ -103,7 +103,7 @@ class Multicast(JsonConfig, CoreSysAttributes):
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.latest_version:
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
self.latest_version, image=self.sys_updater.image_multicast
|
||||
)
|
||||
@ -128,7 +128,7 @@ class Multicast(JsonConfig, CoreSysAttributes):
|
||||
# Update
|
||||
try:
|
||||
await self.instance.update(version, image=self.sys_updater.image_multicast)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Multicast update failed")
|
||||
raise MulticastUpdateError() from err
|
||||
else:
|
||||
@ -137,7 +137,7 @@ class Multicast(JsonConfig, CoreSysAttributes):
|
||||
self.save_data()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Start Multicast plugin
|
||||
@ -148,7 +148,7 @@ class Multicast(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Restart Multicast plugin")
|
||||
try:
|
||||
await self.instance.restart()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Can't start Multicast plugin")
|
||||
raise MulticastError() from err
|
||||
|
||||
@ -157,7 +157,7 @@ class Multicast(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Start Multicast plugin")
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Can't start Multicast plugin")
|
||||
raise MulticastError() from err
|
||||
|
||||
@ -166,7 +166,7 @@ class Multicast(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Stop Multicast plugin")
|
||||
try:
|
||||
await self.instance.stop()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Can't stop Multicast plugin")
|
||||
raise MulticastError() from err
|
||||
|
||||
@ -181,7 +181,7 @@ class Multicast(JsonConfig, CoreSysAttributes):
|
||||
"""Return stats of Multicast."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise MulticastError() from err
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
@ -206,6 +206,6 @@ class Multicast(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Repair Multicast %s", self.version)
|
||||
try:
|
||||
await self.instance.install(self.version)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Repairing of Multicast failed")
|
||||
self.sys_capture_exception(err)
|
||||
|
@ -12,7 +12,7 @@ from ..const import ATTR_ACCESS_TOKEN, ATTR_IMAGE, ATTR_VERSION
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..docker.observer import DockerObserver
|
||||
from ..docker.stats import DockerStats
|
||||
from ..exceptions import DockerAPIError, ObserverError, ObserverUpdateError
|
||||
from ..exceptions import DockerError, ObserverError, ObserverUpdateError
|
||||
from ..utils.json import JsonConfig
|
||||
from .const import FILE_HASSIO_OBSERVER
|
||||
from .validate import SCHEMA_OBSERVER_CONFIG
|
||||
@ -80,7 +80,7 @@ class Observer(CoreSysAttributes, JsonConfig):
|
||||
self.version = await self.instance.get_latest_version()
|
||||
|
||||
await self.instance.attach(tag=self.version)
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
_LOGGER.info(
|
||||
"No observer plugin Docker image %s found.", self.instance.image
|
||||
)
|
||||
@ -107,7 +107,7 @@ class Observer(CoreSysAttributes, JsonConfig):
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.latest_version:
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
self.latest_version, image=self.sys_updater.image_observer
|
||||
)
|
||||
@ -131,7 +131,7 @@ class Observer(CoreSysAttributes, JsonConfig):
|
||||
|
||||
try:
|
||||
await self.instance.update(version, image=self.sys_updater.image_observer)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("HA observer update failed")
|
||||
raise ObserverUpdateError() from err
|
||||
else:
|
||||
@ -140,7 +140,7 @@ class Observer(CoreSysAttributes, JsonConfig):
|
||||
self.save_data()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Start observer
|
||||
@ -157,7 +157,7 @@ class Observer(CoreSysAttributes, JsonConfig):
|
||||
_LOGGER.info("Start observer plugin")
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Can't start observer plugin")
|
||||
raise ObserverError() from err
|
||||
|
||||
@ -165,7 +165,7 @@ class Observer(CoreSysAttributes, JsonConfig):
|
||||
"""Return stats of observer."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise ObserverError() from err
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
@ -183,6 +183,6 @@ class Observer(CoreSysAttributes, JsonConfig):
|
||||
_LOGGER.info("Repair HA observer %s", self.version)
|
||||
try:
|
||||
await self.instance.install(self.version)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Repairing of HA observer failed")
|
||||
self.sys_capture_exception(err)
|
||||
|
@ -14,7 +14,7 @@ from .coresys import CoreSys, CoreSysAttributes
|
||||
from .docker.stats import DockerStats
|
||||
from .docker.supervisor import DockerSupervisor
|
||||
from .exceptions import (
|
||||
DockerAPIError,
|
||||
DockerError,
|
||||
HostAppArmorError,
|
||||
SupervisorError,
|
||||
SupervisorUpdateError,
|
||||
@ -35,10 +35,10 @@ class Supervisor(CoreSysAttributes):
|
||||
"""Prepare Home Assistant object."""
|
||||
try:
|
||||
await self.instance.attach(tag="latest")
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
_LOGGER.critical("Can't setup Supervisor Docker container!")
|
||||
|
||||
with suppress(DockerAPIError):
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup()
|
||||
|
||||
@property
|
||||
@ -115,7 +115,7 @@ class Supervisor(CoreSysAttributes):
|
||||
await self.instance.update_start_tag(
|
||||
self.sys_updater.image_supervisor, version
|
||||
)
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
_LOGGER.error("Update of Supervisor failed!")
|
||||
raise SupervisorUpdateError() from err
|
||||
else:
|
||||
@ -142,7 +142,7 @@ class Supervisor(CoreSysAttributes):
|
||||
"""Return stats of Supervisor."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError as err:
|
||||
except DockerError as err:
|
||||
raise SupervisorError() from err
|
||||
|
||||
async def repair(self):
|
||||
@ -153,5 +153,5 @@ class Supervisor(CoreSysAttributes):
|
||||
_LOGGER.info("Repair Supervisor %s", self.version)
|
||||
try:
|
||||
await self.instance.retag()
|
||||
except DockerAPIError:
|
||||
except DockerError:
|
||||
_LOGGER.error("Repairing of Supervisor failed")
|
||||
|
@ -52,18 +52,15 @@ RE_MONITOR_OUTPUT: re.Pattern[Any] = re.compile(r".+?: (?P<signal>[^ ].+) (?P<da
|
||||
# Map GDBus to errors
|
||||
MAP_GDBUS_ERROR: Dict[str, Any] = {
|
||||
"GDBus.Error:org.freedesktop.DBus.Error.ServiceUnknown": DBusInterfaceError,
|
||||
"GDBus.Error:org.freedesktop.DBus.Error.Spawn.ChildExited": DBusFatalError,
|
||||
"No such file or directory": DBusNotConnectedError,
|
||||
}
|
||||
|
||||
# Commands for dbus
|
||||
INTROSPECT: str = (
|
||||
"gdbus introspect --system --dest {bus} " "--object-path {object} --xml"
|
||||
)
|
||||
CALL: str = (
|
||||
"gdbus call --system --dest {bus} --object-path {object} "
|
||||
"--method {method} {args}"
|
||||
)
|
||||
INTROSPECT: str = "gdbus introspect --system --dest {bus} --object-path {object} --xml"
|
||||
CALL: str = "gdbus call --system --dest {bus} --object-path {object} --timeout 10 --method {method} {args}"
|
||||
MONITOR: str = "gdbus monitor --system --dest {bus}"
|
||||
WAIT: str = "gdbus wait --system --activate {bus} --timeout 5 {bus}"
|
||||
|
||||
DBUS_METHOD_GETALL: str = "org.freedesktop.DBus.Properties.GetAll"
|
||||
|
||||
@ -104,12 +101,15 @@ class DBus:
|
||||
|
||||
async def _init_proxy(self) -> None:
|
||||
"""Read interface data."""
|
||||
command = shlex.split(
|
||||
# Wait for dbus object to be available after restart
|
||||
command_wait = shlex.split(WAIT.format(bus=self.bus_name))
|
||||
await self._send(command_wait, silent=True)
|
||||
|
||||
# Introspect object & Parse XML
|
||||
command_introspect = shlex.split(
|
||||
INTROSPECT.format(bus=self.bus_name, object=self.object_path)
|
||||
)
|
||||
|
||||
# Parse XML
|
||||
data = await self._send(command)
|
||||
data = await self._send(command_introspect)
|
||||
try:
|
||||
xml = ET.fromstring(data)
|
||||
except ET.ParseError as err:
|
||||
@ -219,7 +219,7 @@ class DBus:
|
||||
_LOGGER.error("No attributes returned for %s", interface)
|
||||
raise DBusFatalError() from err
|
||||
|
||||
async def _send(self, command: List[str]) -> str:
|
||||
async def _send(self, command: List[str], silent=False) -> str:
|
||||
"""Send command over dbus."""
|
||||
# Run command
|
||||
_LOGGER.debug("Send dbus command: %s", command)
|
||||
@ -237,7 +237,7 @@ class DBus:
|
||||
raise DBusFatalError() from err
|
||||
|
||||
# Success?
|
||||
if proc.returncode == 0:
|
||||
if proc.returncode == 0 or silent:
|
||||
return data.decode()
|
||||
|
||||
# Filter error
|
||||
@ -248,7 +248,7 @@ class DBus:
|
||||
raise exception()
|
||||
|
||||
# General
|
||||
_LOGGER.error("DBus return error: %s", error.strip())
|
||||
_LOGGER.error("DBus return: %s", error.strip())
|
||||
raise DBusFatalError()
|
||||
|
||||
def attach_signals(self, filters=None):
|
||||
|
@ -1,15 +1,22 @@
|
||||
"""Custom log messages."""
|
||||
import logging
|
||||
import re
|
||||
|
||||
import sentry_sdk
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
RE_BIND_FAILED = re.compile(r".*Bind for.*:(\d*) failed: port is already allocated.*")
|
||||
|
||||
|
||||
def format_message(message: str) -> str:
|
||||
"""Return a formated message if it's known."""
|
||||
match = RE_BIND_FAILED.match(message)
|
||||
if match:
|
||||
return (
|
||||
f"Port '{match.group(1)}' is already in use by something else on the host."
|
||||
)
|
||||
try:
|
||||
match = RE_BIND_FAILED.match(message)
|
||||
if match:
|
||||
return f"Port '{match.group(1)}' is already in use by something else on the host."
|
||||
except TypeError as err:
|
||||
_LOGGER.error("Type of message is not string - %s", err)
|
||||
sentry_sdk.capture_exception(err)
|
||||
|
||||
return message
|
||||
|
@ -44,7 +44,10 @@ def dbus() -> DBus:
|
||||
async def mock_get_properties(_, interface):
|
||||
return load_json_fixture(f"{interface.replace('.', '_')}.json")
|
||||
|
||||
async def mock_send(_, command):
|
||||
async def mock_send(_, command, silent=False):
|
||||
if silent:
|
||||
return ""
|
||||
|
||||
filetype = "xml" if "--xml" in command else "fixture"
|
||||
fixture = f"{command[6].replace('/', '_')[1:]}.{filetype}"
|
||||
return load_fixture(fixture)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user