mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-07-24 09:36:31 +00:00
commit
82b2f66920
16
.github/main.workflow
vendored
16
.github/main.workflow
vendored
@ -1,16 +0,0 @@
|
||||
workflow "tox" {
|
||||
on = "push"
|
||||
resolves = [
|
||||
"Python 3.7",
|
||||
"Json Files",
|
||||
]
|
||||
}
|
||||
|
||||
action "Python 3.7" {
|
||||
uses = "home-assistant/actions/py37-tox@master"
|
||||
}
|
||||
|
||||
action "Json Files" {
|
||||
uses = "home-assistant/actions/jq@master"
|
||||
args = "**/*.json"
|
||||
}
|
26
API.md
26
API.md
@ -41,6 +41,7 @@ The addons from `addons` are only installed one.
|
||||
"arch": "armhf|aarch64|i386|amd64",
|
||||
"channel": "stable|beta|dev",
|
||||
"timezone": "TIMEZONE",
|
||||
"ip_address": "ip address",
|
||||
"wait_boot": "int",
|
||||
"addons": [
|
||||
{
|
||||
@ -348,6 +349,7 @@ Load host configs from a USB stick.
|
||||
"last_version": "LAST_VERSION",
|
||||
"arch": "arch",
|
||||
"machine": "Image machine type",
|
||||
"ip_address": "ip address",
|
||||
"image": "str",
|
||||
"custom": "bool -> if custom image",
|
||||
"boot": "bool",
|
||||
@ -469,6 +471,7 @@ Get all available addons.
|
||||
"available": "bool",
|
||||
"arch": ["armhf", "aarch64", "i386", "amd64"],
|
||||
"machine": "[raspberrypi2, tinker]",
|
||||
"homeassistant": "null|min Home Assistant version",
|
||||
"repository": "12345678|null",
|
||||
"version": "null|VERSION_INSTALLED",
|
||||
"last_version": "LAST_VERSION",
|
||||
@ -505,7 +508,11 @@ Get all available addons.
|
||||
"audio_input": "null|0,0",
|
||||
"audio_output": "null|0,0",
|
||||
"services_role": "['service:access']",
|
||||
"discovery": "['service']"
|
||||
"discovery": "['service']",
|
||||
"ip_address": "ip address",
|
||||
"ingress": "bool",
|
||||
"ingress_entry": "null|/api/hassio_ingress/slug",
|
||||
"ingress_url": "null|/api/hassio_ingress/slug/entry.html"
|
||||
}
|
||||
```
|
||||
|
||||
@ -579,6 +586,23 @@ Write data to add-on stdin
|
||||
}
|
||||
```
|
||||
|
||||
### ingress
|
||||
|
||||
- POST `/ingress/session`
|
||||
|
||||
Create a new Session for access to ingress service.
|
||||
|
||||
```json
|
||||
{
|
||||
"session": "token"
|
||||
}
|
||||
```
|
||||
|
||||
- VIEW `/ingress/{token}`
|
||||
|
||||
Ingress WebUI for this Add-on. The addon need support HASS Auth!
|
||||
Need ingress session as cookie.
|
||||
|
||||
### discovery
|
||||
|
||||
- GET `/discovery`
|
||||
|
45
azure-pipelines.yml
Normal file
45
azure-pipelines.yml
Normal file
@ -0,0 +1,45 @@
|
||||
# Python package
|
||||
# Create and test a Python package on multiple Python versions.
|
||||
# Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more:
|
||||
# https://docs.microsoft.com/azure/devops/pipelines/languages/python
|
||||
|
||||
trigger:
|
||||
- master
|
||||
- dev
|
||||
|
||||
pr:
|
||||
- dev
|
||||
|
||||
jobs:
|
||||
|
||||
- job: "Tox"
|
||||
|
||||
pool:
|
||||
vmImage: 'ubuntu-16.04'
|
||||
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
displayName: 'Use Python $(python.version)'
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
|
||||
- script: pip install tox
|
||||
displayName: 'Install Tox'
|
||||
|
||||
- script: tox
|
||||
displayName: 'Run Tox'
|
||||
|
||||
|
||||
- job: "JQ"
|
||||
|
||||
pool:
|
||||
vmImage: 'ubuntu-16.04'
|
||||
|
||||
steps:
|
||||
- script: sudo apt-get install -y jq
|
||||
displayName: 'Install JQ'
|
||||
|
||||
- bash: |
|
||||
shopt -s globstar
|
||||
cat **/*.json | jq '.'
|
||||
displayName: 'Run JQ'
|
@ -13,7 +13,8 @@ def initialize_event_loop():
|
||||
"""Attempt to use uvloop."""
|
||||
try:
|
||||
import uvloop
|
||||
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
|
||||
|
||||
uvloop.install()
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
@ -1,41 +1,105 @@
|
||||
"""Init file for Hass.io add-ons."""
|
||||
from contextlib import suppress
|
||||
from copy import deepcopy
|
||||
from distutils.version import StrictVersion
|
||||
from ipaddress import IPv4Address, ip_address
|
||||
import logging
|
||||
from pathlib import Path, PurePath
|
||||
import re
|
||||
import secrets
|
||||
import shutil
|
||||
import tarfile
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Dict, Any
|
||||
from typing import Any, Awaitable, Dict, Optional
|
||||
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
from ..const import (
|
||||
ATTR_ACCESS_TOKEN, ATTR_APPARMOR, ATTR_ARCH, ATTR_AUDIO, ATTR_AUDIO_INPUT,
|
||||
ATTR_AUDIO_OUTPUT, ATTR_AUTH_API, ATTR_AUTO_UART, ATTR_AUTO_UPDATE,
|
||||
ATTR_BOOT, ATTR_DESCRIPTON, ATTR_DEVICES, ATTR_DEVICETREE, ATTR_DISCOVERY,
|
||||
ATTR_DOCKER_API, ATTR_ENVIRONMENT, ATTR_FULL_ACCESS, ATTR_GPIO,
|
||||
ATTR_HASSIO_API, ATTR_HASSIO_ROLE, ATTR_HOMEASSISTANT_API, ATTR_HOST_DBUS,
|
||||
ATTR_HOST_IPC, ATTR_HOST_NETWORK, ATTR_HOST_PID, ATTR_IMAGE,
|
||||
ATTR_KERNEL_MODULES, ATTR_LEGACY, ATTR_LOCATON, ATTR_MACHINE, ATTR_MAP,
|
||||
ATTR_NAME, ATTR_NETWORK, ATTR_OPTIONS, ATTR_PORTS, ATTR_PRIVILEGED,
|
||||
ATTR_PROTECTED, ATTR_REPOSITORY, ATTR_SCHEMA, ATTR_SERVICES, ATTR_SLUG,
|
||||
ATTR_STARTUP, ATTR_STATE, ATTR_STDIN, ATTR_SYSTEM, ATTR_TIMEOUT,
|
||||
ATTR_TMPFS, ATTR_URL, ATTR_USER, ATTR_UUID, ATTR_VERSION, ATTR_WEBUI,
|
||||
SECURITY_DEFAULT, SECURITY_DISABLE, SECURITY_PROFILE, STATE_NONE,
|
||||
STATE_STARTED, STATE_STOPPED)
|
||||
from ..coresys import CoreSysAttributes
|
||||
ATTR_ACCESS_TOKEN,
|
||||
ATTR_APPARMOR,
|
||||
ATTR_ARCH,
|
||||
ATTR_AUDIO,
|
||||
ATTR_AUDIO_INPUT,
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_AUTH_API,
|
||||
ATTR_AUTO_UART,
|
||||
ATTR_AUTO_UPDATE,
|
||||
ATTR_BOOT,
|
||||
ATTR_DESCRIPTON,
|
||||
ATTR_DEVICES,
|
||||
ATTR_DEVICETREE,
|
||||
ATTR_DISCOVERY,
|
||||
ATTR_DOCKER_API,
|
||||
ATTR_ENVIRONMENT,
|
||||
ATTR_FULL_ACCESS,
|
||||
ATTR_GPIO,
|
||||
ATTR_HASSIO_API,
|
||||
ATTR_HASSIO_ROLE,
|
||||
ATTR_HOMEASSISTANT,
|
||||
ATTR_HOMEASSISTANT_API,
|
||||
ATTR_HOST_DBUS,
|
||||
ATTR_HOST_IPC,
|
||||
ATTR_HOST_NETWORK,
|
||||
ATTR_HOST_PID,
|
||||
ATTR_IMAGE,
|
||||
ATTR_INGRESS,
|
||||
ATTR_INGRESS_ENTRY,
|
||||
ATTR_INGRESS_PORT,
|
||||
ATTR_INGRESS_TOKEN,
|
||||
ATTR_KERNEL_MODULES,
|
||||
ATTR_LEGACY,
|
||||
ATTR_LOCATON,
|
||||
ATTR_MACHINE,
|
||||
ATTR_MAP,
|
||||
ATTR_NAME,
|
||||
ATTR_NETWORK,
|
||||
ATTR_OPTIONS,
|
||||
ATTR_PORTS,
|
||||
ATTR_PRIVILEGED,
|
||||
ATTR_PROTECTED,
|
||||
ATTR_REPOSITORY,
|
||||
ATTR_SCHEMA,
|
||||
ATTR_SERVICES,
|
||||
ATTR_SLUG,
|
||||
ATTR_STARTUP,
|
||||
ATTR_STATE,
|
||||
ATTR_STDIN,
|
||||
ATTR_SYSTEM,
|
||||
ATTR_TIMEOUT,
|
||||
ATTR_TMPFS,
|
||||
ATTR_URL,
|
||||
ATTR_USER,
|
||||
ATTR_UUID,
|
||||
ATTR_VERSION,
|
||||
ATTR_WEBUI,
|
||||
SECURITY_DEFAULT,
|
||||
SECURITY_DISABLE,
|
||||
SECURITY_PROFILE,
|
||||
STATE_NONE,
|
||||
STATE_STARTED,
|
||||
STATE_STOPPED,
|
||||
)
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..docker.addon import DockerAddon
|
||||
from ..exceptions import HostAppArmorError, JsonFileError
|
||||
from ..utils import create_token
|
||||
from ..docker.stats import DockerStats
|
||||
from ..exceptions import (
|
||||
AddonsError,
|
||||
AddonsNotSupportedError,
|
||||
DockerAPIError,
|
||||
HostAppArmorError,
|
||||
JsonFileError,
|
||||
)
|
||||
from ..utils.apparmor import adjust_profile
|
||||
from ..utils.json import read_json_file, write_json_file
|
||||
from .utils import check_installed, remove_data
|
||||
from .validate import (
|
||||
MACHINE_ALL, RE_SERVICE, RE_VOLUME, SCHEMA_ADDON_SNAPSHOT,
|
||||
validate_options)
|
||||
MACHINE_ALL,
|
||||
RE_SERVICE,
|
||||
RE_VOLUME,
|
||||
SCHEMA_ADDON_SNAPSHOT,
|
||||
validate_options,
|
||||
)
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@ -47,21 +111,28 @@ RE_WEBUI = re.compile(
|
||||
class Addon(CoreSysAttributes):
|
||||
"""Hold data for add-on inside Hass.io."""
|
||||
|
||||
def __init__(self, coresys, slug):
|
||||
def __init__(self, coresys: CoreSys, slug: str):
|
||||
"""Initialize data holder."""
|
||||
self.coresys = coresys
|
||||
self.instance = DockerAddon(coresys, slug)
|
||||
self.coresys: CoreSys = coresys
|
||||
self.instance: DockerAddon = DockerAddon(coresys, slug)
|
||||
self._id: str = slug
|
||||
|
||||
self._id = slug
|
||||
|
||||
async def load(self):
|
||||
async def load(self) -> None:
|
||||
"""Async initialize of object."""
|
||||
if not self.is_installed:
|
||||
return
|
||||
await self.instance.attach()
|
||||
with suppress(DockerAPIError):
|
||||
await self.instance.attach()
|
||||
|
||||
@property
|
||||
def slug(self):
|
||||
def ip_address(self) -> IPv4Address:
|
||||
"""Return IP of Add-on instance."""
|
||||
if not self.is_installed:
|
||||
return ip_address("0.0.0.0")
|
||||
return self.instance.ip_address
|
||||
|
||||
@property
|
||||
def slug(self) -> str:
|
||||
"""Return slug/id of add-on."""
|
||||
return self._id
|
||||
|
||||
@ -76,30 +147,41 @@ class Addon(CoreSysAttributes):
|
||||
return self.sys_addons.data
|
||||
|
||||
@property
|
||||
def is_installed(self):
|
||||
def is_installed(self) -> bool:
|
||||
"""Return True if an add-on is installed."""
|
||||
return self._id in self._data.system
|
||||
|
||||
@property
|
||||
def is_detached(self):
|
||||
def is_detached(self) -> bool:
|
||||
"""Return True if add-on is detached."""
|
||||
return self._id not in self._data.cache
|
||||
|
||||
@property
|
||||
def available(self):
|
||||
def available(self) -> bool:
|
||||
"""Return True if this add-on is available on this platform."""
|
||||
if self.is_detached:
|
||||
addon_data = self._data.system.get(self._id)
|
||||
else:
|
||||
addon_data = self._data.cache.get(self._id)
|
||||
|
||||
# Architecture
|
||||
if not self.sys_arch.is_supported(self.supported_arch):
|
||||
if not self.sys_arch.is_supported(addon_data[ATTR_ARCH]):
|
||||
return False
|
||||
|
||||
# Machine / Hardware
|
||||
if self.sys_machine not in self.supported_machine:
|
||||
machine = addon_data.get(ATTR_MACHINE) or MACHINE_ALL
|
||||
if self.sys_machine not in machine:
|
||||
return False
|
||||
|
||||
# Home Assistant
|
||||
version = addon_data.get(ATTR_HOMEASSISTANT) or self.sys_homeassistant.version
|
||||
if StrictVersion(self.sys_homeassistant.version) < StrictVersion(version):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@property
|
||||
def version_installed(self):
|
||||
def version_installed(self) -> Optional[str]:
|
||||
"""Return installed version."""
|
||||
return self._data.user.get(self._id, {}).get(ATTR_VERSION)
|
||||
|
||||
@ -202,6 +284,20 @@ class Addon(CoreSysAttributes):
|
||||
return self._data.user[self._id].get(ATTR_ACCESS_TOKEN)
|
||||
return None
|
||||
|
||||
@property
|
||||
def ingress_token(self):
|
||||
"""Return access token for Hass.io API."""
|
||||
if self.is_installed:
|
||||
return self._data.user[self._id].get(ATTR_INGRESS_TOKEN)
|
||||
return None
|
||||
|
||||
@property
|
||||
def ingress_entry(self):
|
||||
"""Return ingress external URL."""
|
||||
if self.is_installed and self.with_ingress:
|
||||
return f"/api/hassio_ingress/{self.ingress_token}"
|
||||
return None
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
"""Return description of add-on."""
|
||||
@ -292,6 +388,17 @@ class Addon(CoreSysAttributes):
|
||||
|
||||
self._data.user[self._id][ATTR_NETWORK] = new_ports
|
||||
|
||||
@property
|
||||
def ingress_url(self):
|
||||
"""Return URL to ingress url."""
|
||||
if not self.is_installed or not self.with_ingress:
|
||||
return None
|
||||
|
||||
webui = f"/api/hassio_ingress/{self.ingress_token}/"
|
||||
if ATTR_INGRESS_ENTRY in self._mesh:
|
||||
return f"{webui}{self._mesh[ATTR_INGRESS_ENTRY]}"
|
||||
return webui
|
||||
|
||||
@property
|
||||
def webui(self):
|
||||
"""Return URL to webui or None."""
|
||||
@ -323,6 +430,11 @@ class Addon(CoreSysAttributes):
|
||||
|
||||
return f"{proto}://[HOST]:{port}{s_suffix}"
|
||||
|
||||
@property
|
||||
def ingress_internal(self):
|
||||
"""Return Ingress host URL."""
|
||||
return f"http://{self.ip_address}:{self._mesh[ATTR_INGRESS_PORT]}"
|
||||
|
||||
@property
|
||||
def host_network(self):
|
||||
"""Return True if add-on run on host network."""
|
||||
@ -407,6 +519,11 @@ class Addon(CoreSysAttributes):
|
||||
"""Return True if the add-on access use stdin input."""
|
||||
return self._mesh[ATTR_STDIN]
|
||||
|
||||
@property
|
||||
def with_ingress(self):
|
||||
"""Return True if the add-on access support ingress."""
|
||||
return self._mesh[ATTR_INGRESS]
|
||||
|
||||
@property
|
||||
def with_gpio(self):
|
||||
"""Return True if the add-on access to GPIO interface."""
|
||||
@ -437,6 +554,11 @@ class Addon(CoreSysAttributes):
|
||||
"""Return True if the add-on access to audio."""
|
||||
return self._mesh[ATTR_AUDIO]
|
||||
|
||||
@property
|
||||
def homeassistant_version(self) -> Optional[str]:
|
||||
"""Return min Home Assistant version they needed by Add-on."""
|
||||
return self._mesh.get(ATTR_HOMEASSISTANT)
|
||||
|
||||
@property
|
||||
def audio_output(self):
|
||||
"""Return ALSA config for output or None."""
|
||||
@ -642,7 +764,7 @@ class Addon(CoreSysAttributes):
|
||||
|
||||
return True
|
||||
|
||||
async def _install_apparmor(self):
|
||||
async def _install_apparmor(self) -> None:
|
||||
"""Install or Update AppArmor profile for Add-on."""
|
||||
exists_local = self.sys_host.apparmor.exists(self.slug)
|
||||
exists_addon = self.path_apparmor.exists()
|
||||
@ -664,7 +786,7 @@ class Addon(CoreSysAttributes):
|
||||
await self.sys_host.apparmor.load_profile(self.slug, profile_file)
|
||||
|
||||
@property
|
||||
def schema(self):
|
||||
def schema(self) -> vol.Schema:
|
||||
"""Create a schema for add-on options."""
|
||||
raw_schema = self._mesh[ATTR_SCHEMA]
|
||||
|
||||
@ -672,7 +794,7 @@ class Addon(CoreSysAttributes):
|
||||
return vol.Schema(dict)
|
||||
return vol.Schema(vol.All(dict, validate_options(raw_schema)))
|
||||
|
||||
def test_update_schema(self):
|
||||
def test_update_schema(self) -> bool:
|
||||
"""Check if the existing configuration is valid after update."""
|
||||
if not self.is_installed or self.is_detached:
|
||||
return True
|
||||
@ -702,17 +824,17 @@ class Addon(CoreSysAttributes):
|
||||
return False
|
||||
return True
|
||||
|
||||
async def install(self):
|
||||
async def install(self) -> None:
|
||||
"""Install an add-on."""
|
||||
if not self.available:
|
||||
_LOGGER.error(
|
||||
"Add-on %s not supported on %s with %s architecture",
|
||||
self._id, self.sys_machine, self.sys_arch.supported)
|
||||
return False
|
||||
raise AddonsNotSupportedError()
|
||||
|
||||
if self.is_installed:
|
||||
_LOGGER.error("Add-on %s is already installed", self._id)
|
||||
return False
|
||||
_LOGGER.warning("Add-on %s is already installed", self._id)
|
||||
return
|
||||
|
||||
if not self.path_data.is_dir():
|
||||
_LOGGER.info(
|
||||
@ -722,18 +844,20 @@ class Addon(CoreSysAttributes):
|
||||
# Setup/Fix AppArmor profile
|
||||
await self._install_apparmor()
|
||||
|
||||
if not await self.instance.install(
|
||||
self.last_version, self.image_next):
|
||||
return False
|
||||
|
||||
self._set_install(self.image_next, self.last_version)
|
||||
return True
|
||||
try:
|
||||
await self.instance.install(self.last_version, self.image_next)
|
||||
except DockerAPIError:
|
||||
raise AddonsError() from None
|
||||
else:
|
||||
self._set_install(self.image_next, self.last_version)
|
||||
|
||||
@check_installed
|
||||
async def uninstall(self):
|
||||
async def uninstall(self) -> None:
|
||||
"""Remove an add-on."""
|
||||
if not await self.instance.remove():
|
||||
return False
|
||||
try:
|
||||
await self.instance.remove()
|
||||
except DockerAPIError:
|
||||
raise AddonsError() from None
|
||||
|
||||
if self.path_data.is_dir():
|
||||
_LOGGER.info(
|
||||
@ -750,13 +874,11 @@ class Addon(CoreSysAttributes):
|
||||
with suppress(HostAppArmorError):
|
||||
await self.sys_host.apparmor.remove_profile(self.slug)
|
||||
|
||||
# Remove discovery messages
|
||||
# Cleanup internal data
|
||||
self.remove_discovery()
|
||||
|
||||
self._set_uninstall()
|
||||
return True
|
||||
|
||||
async def state(self):
|
||||
async def state(self) -> str:
|
||||
"""Return running state of add-on."""
|
||||
if not self.is_installed:
|
||||
return STATE_NONE
|
||||
@ -766,46 +888,57 @@ class Addon(CoreSysAttributes):
|
||||
return STATE_STOPPED
|
||||
|
||||
@check_installed
|
||||
async def start(self):
|
||||
async def start(self) -> None:
|
||||
"""Set options and start add-on."""
|
||||
if await self.instance.is_running():
|
||||
_LOGGER.warning("%s already running!", self.slug)
|
||||
return
|
||||
|
||||
# Access Token
|
||||
self._data.user[self._id][ATTR_ACCESS_TOKEN] = create_token()
|
||||
self._data.user[self._id][ATTR_ACCESS_TOKEN] = secrets.token_hex(56)
|
||||
self.save_data()
|
||||
|
||||
# Options
|
||||
if not self.write_options():
|
||||
return False
|
||||
raise AddonsError()
|
||||
|
||||
# Sound
|
||||
if self.with_audio and not self.write_asound():
|
||||
return False
|
||||
raise AddonsError()
|
||||
|
||||
return await self.instance.run()
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError:
|
||||
raise AddonsError() from None
|
||||
|
||||
@check_installed
|
||||
def stop(self):
|
||||
"""Stop add-on.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.stop()
|
||||
async def stop(self) -> None:
|
||||
"""Stop add-on."""
|
||||
try:
|
||||
return await self.instance.stop()
|
||||
except DockerAPIError:
|
||||
raise AddonsError() from None
|
||||
|
||||
@check_installed
|
||||
async def update(self):
|
||||
async def update(self) -> None:
|
||||
"""Update add-on."""
|
||||
last_state = await self.state()
|
||||
|
||||
if self.last_version == self.version_installed:
|
||||
_LOGGER.warning("No update available for add-on %s", self._id)
|
||||
return False
|
||||
return
|
||||
|
||||
if not await self.instance.update(
|
||||
self.last_version, self.image_next):
|
||||
return False
|
||||
# Check if available, Maybe something have changed
|
||||
if not self.available:
|
||||
_LOGGER.error(
|
||||
"Add-on %s not supported on %s with %s architecture",
|
||||
self._id, self.sys_machine, self.sys_arch.supported)
|
||||
raise AddonsNotSupportedError()
|
||||
|
||||
# Update instance
|
||||
last_state = await self.state()
|
||||
try:
|
||||
await self.instance.update(self.last_version, self.image_next)
|
||||
except DockerAPIError:
|
||||
raise AddonsError() from None
|
||||
self._set_update(self.image_next, self.last_version)
|
||||
|
||||
# Setup/Fix AppArmor profile
|
||||
@ -814,16 +947,16 @@ class Addon(CoreSysAttributes):
|
||||
# restore state
|
||||
if last_state == STATE_STARTED:
|
||||
await self.start()
|
||||
return True
|
||||
|
||||
@check_installed
|
||||
async def restart(self):
|
||||
async def restart(self) -> None:
|
||||
"""Restart add-on."""
|
||||
await self.stop()
|
||||
return await self.start()
|
||||
with suppress(AddonsError):
|
||||
await self.stop()
|
||||
await self.start()
|
||||
|
||||
@check_installed
|
||||
def logs(self):
|
||||
def logs(self) -> Awaitable[bytes]:
|
||||
"""Return add-ons log output.
|
||||
|
||||
Return a coroutine.
|
||||
@ -831,33 +964,32 @@ class Addon(CoreSysAttributes):
|
||||
return self.instance.logs()
|
||||
|
||||
@check_installed
|
||||
def stats(self):
|
||||
"""Return stats of container.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.stats()
|
||||
async def stats(self) -> DockerStats:
|
||||
"""Return stats of container."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError:
|
||||
raise AddonsError() from None
|
||||
|
||||
@check_installed
|
||||
async def rebuild(self):
|
||||
async def rebuild(self) -> None:
|
||||
"""Perform a rebuild of local build add-on."""
|
||||
last_state = await self.state()
|
||||
|
||||
if not self.need_build:
|
||||
_LOGGER.error("Can't rebuild a none local build add-on!")
|
||||
return False
|
||||
raise AddonsNotSupportedError()
|
||||
|
||||
# remove docker container but not addon config
|
||||
if not await self.instance.remove():
|
||||
return False
|
||||
|
||||
if not await self.instance.install(self.version_installed):
|
||||
return False
|
||||
try:
|
||||
await self.instance.remove()
|
||||
await self.instance.install(self.version_installed)
|
||||
except DockerAPIError:
|
||||
raise AddonsError() from None
|
||||
|
||||
# restore state
|
||||
if last_state == STATE_STARTED:
|
||||
await self.start()
|
||||
return True
|
||||
|
||||
@check_installed
|
||||
async def write_stdin(self, data):
|
||||
@ -867,18 +999,23 @@ class Addon(CoreSysAttributes):
|
||||
"""
|
||||
if not self.with_stdin:
|
||||
_LOGGER.error("Add-on don't support write to stdin!")
|
||||
return False
|
||||
raise AddonsNotSupportedError()
|
||||
|
||||
return await self.instance.write_stdin(data)
|
||||
try:
|
||||
return await self.instance.write_stdin(data)
|
||||
except DockerAPIError:
|
||||
raise AddonsError() from None
|
||||
|
||||
@check_installed
|
||||
async def snapshot(self, tar_file):
|
||||
async def snapshot(self, tar_file: tarfile.TarFile) -> None:
|
||||
"""Snapshot state of an add-on."""
|
||||
with TemporaryDirectory(dir=str(self.sys_config.path_tmp)) as temp:
|
||||
# store local image
|
||||
if self.need_build and not await \
|
||||
self.instance.export_image(Path(temp, 'image.tar')):
|
||||
return False
|
||||
if self.need_build:
|
||||
try:
|
||||
await self.instance.export_image(Path(temp, 'image.tar'))
|
||||
except DockerAPIError:
|
||||
raise AddonsError() from None
|
||||
|
||||
data = {
|
||||
ATTR_USER: self._data.user.get(self._id, {}),
|
||||
@ -892,7 +1029,7 @@ class Addon(CoreSysAttributes):
|
||||
write_json_file(Path(temp, 'addon.json'), data)
|
||||
except JsonFileError:
|
||||
_LOGGER.error("Can't save meta for %s", self._id)
|
||||
return False
|
||||
raise AddonsError() from None
|
||||
|
||||
# Store AppArmor Profile
|
||||
if self.sys_host.apparmor.exists(self.slug):
|
||||
@ -901,7 +1038,7 @@ class Addon(CoreSysAttributes):
|
||||
self.sys_host.apparmor.backup_profile(self.slug, profile)
|
||||
except HostAppArmorError:
|
||||
_LOGGER.error("Can't backup AppArmor profile")
|
||||
return False
|
||||
raise AddonsError() from None
|
||||
|
||||
# write into tarfile
|
||||
def _write_tarfile():
|
||||
@ -915,12 +1052,11 @@ class Addon(CoreSysAttributes):
|
||||
await self.sys_run_in_executor(_write_tarfile)
|
||||
except (tarfile.TarError, OSError) as err:
|
||||
_LOGGER.error("Can't write tarfile %s: %s", tar_file, err)
|
||||
return False
|
||||
raise AddonsError() from None
|
||||
|
||||
_LOGGER.info("Finish snapshot for addon %s", self._id)
|
||||
return True
|
||||
|
||||
async def restore(self, tar_file):
|
||||
async def restore(self, tar_file: tarfile.TarFile) -> None:
|
||||
"""Restore state of an add-on."""
|
||||
with TemporaryDirectory(dir=str(self.sys_config.path_tmp)) as temp:
|
||||
# extract snapshot
|
||||
@ -933,13 +1069,13 @@ class Addon(CoreSysAttributes):
|
||||
await self.sys_run_in_executor(_extract_tarfile)
|
||||
except tarfile.TarError as err:
|
||||
_LOGGER.error("Can't read tarfile %s: %s", tar_file, err)
|
||||
return False
|
||||
raise AddonsError() from None
|
||||
|
||||
# Read snapshot data
|
||||
try:
|
||||
data = read_json_file(Path(temp, 'addon.json'))
|
||||
except JsonFileError:
|
||||
return False
|
||||
raise AddonsError() from None
|
||||
|
||||
# Validate
|
||||
try:
|
||||
@ -947,7 +1083,7 @@ class Addon(CoreSysAttributes):
|
||||
except vol.Invalid as err:
|
||||
_LOGGER.error("Can't validate %s, snapshot data: %s",
|
||||
self._id, humanize_error(data, err))
|
||||
return False
|
||||
raise AddonsError() from None
|
||||
|
||||
# Restore local add-on informations
|
||||
_LOGGER.info("Restore config for addon %s", self._id)
|
||||
@ -961,15 +1097,19 @@ class Addon(CoreSysAttributes):
|
||||
|
||||
image_file = Path(temp, 'image.tar')
|
||||
if image_file.is_file():
|
||||
await self.instance.import_image(image_file, version)
|
||||
with suppress(DockerAPIError):
|
||||
await self.instance.import_image(image_file, version)
|
||||
else:
|
||||
if await self.instance.install(version, restore_image):
|
||||
with suppress(DockerAPIError):
|
||||
await self.instance.install(version, restore_image)
|
||||
await self.instance.cleanup()
|
||||
elif self.instance.version != version or self.legacy:
|
||||
_LOGGER.info("Restore/Update image for addon %s", self._id)
|
||||
await self.instance.update(version, restore_image)
|
||||
with suppress(DockerAPIError):
|
||||
await self.instance.update(version, restore_image)
|
||||
else:
|
||||
await self.instance.stop()
|
||||
with suppress(DockerAPIError):
|
||||
await self.instance.stop()
|
||||
|
||||
# Restore data
|
||||
def _restore_data():
|
||||
@ -983,7 +1123,7 @@ class Addon(CoreSysAttributes):
|
||||
await self.sys_run_in_executor(_restore_data)
|
||||
except shutil.Error as err:
|
||||
_LOGGER.error("Can't restore origin data: %s", err)
|
||||
return False
|
||||
raise AddonsError() from None
|
||||
|
||||
# Restore AppArmor
|
||||
profile_file = Path(temp, 'apparmor.txt')
|
||||
@ -993,11 +1133,10 @@ class Addon(CoreSysAttributes):
|
||||
self.slug, profile_file)
|
||||
except HostAppArmorError:
|
||||
_LOGGER.error("Can't restore AppArmor profile")
|
||||
return False
|
||||
raise AddonsError() from None
|
||||
|
||||
# Run add-on
|
||||
if data[ATTR_STATE] == STATE_STARTED:
|
||||
return await self.start()
|
||||
|
||||
_LOGGER.info("Finish restore for add-on %s", self._id)
|
||||
return True
|
||||
|
@ -20,6 +20,7 @@ from ..const import (
|
||||
SECURITY_DISABLE,
|
||||
SECURITY_PROFILE,
|
||||
)
|
||||
from ..exceptions import AddonsNotSupportedError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .addon import Addon
|
||||
@ -107,7 +108,7 @@ def check_installed(method):
|
||||
"""Return False if not installed or the function."""
|
||||
if not addon.is_installed:
|
||||
_LOGGER.error("Addon %s is not installed", addon.slug)
|
||||
return False
|
||||
raise AddonsNotSupportedError()
|
||||
return await method(addon, *args, **kwargs)
|
||||
|
||||
return wrap_check
|
||||
|
@ -1,29 +1,87 @@
|
||||
"""Validate add-ons options schema."""
|
||||
import logging
|
||||
import re
|
||||
import secrets
|
||||
import uuid
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from ..const import (
|
||||
ARCH_ALL, ATTR_ACCESS_TOKEN, ATTR_APPARMOR, ATTR_ARCH, ATTR_ARGS,
|
||||
ATTR_AUDIO, ATTR_AUDIO_INPUT, ATTR_AUDIO_OUTPUT, ATTR_AUTH_API,
|
||||
ATTR_AUTO_UART, ATTR_AUTO_UPDATE, ATTR_BOOT, ATTR_BUILD_FROM,
|
||||
ATTR_DESCRIPTON, ATTR_DEVICES, ATTR_DEVICETREE, ATTR_DISCOVERY,
|
||||
ATTR_DOCKER_API, ATTR_ENVIRONMENT, ATTR_FULL_ACCESS, ATTR_GPIO,
|
||||
ATTR_HASSIO_API, ATTR_HASSIO_ROLE, ATTR_HOMEASSISTANT_API, ATTR_HOST_DBUS,
|
||||
ATTR_HOST_IPC, ATTR_HOST_NETWORK, ATTR_HOST_PID, ATTR_IMAGE,
|
||||
ATTR_KERNEL_MODULES, ATTR_LEGACY, ATTR_LOCATON, ATTR_MACHINE,
|
||||
ATTR_MAINTAINER, ATTR_MAP, ATTR_NAME, ATTR_NETWORK, ATTR_OPTIONS,
|
||||
ATTR_PORTS, ATTR_PRIVILEGED, ATTR_PROTECTED, ATTR_REPOSITORY, ATTR_SCHEMA,
|
||||
ATTR_SERVICES, ATTR_SLUG, ATTR_SQUASH, ATTR_STARTUP, ATTR_STATE,
|
||||
ATTR_STDIN, ATTR_SYSTEM, ATTR_TIMEOUT, ATTR_TMPFS, ATTR_URL, ATTR_USER,
|
||||
ATTR_UUID, ATTR_VERSION, ATTR_WEBUI, BOOT_AUTO, BOOT_MANUAL,
|
||||
PRIVILEGED_ALL, ROLE_ALL, ROLE_DEFAULT, STARTUP_ALL, STARTUP_APPLICATION,
|
||||
STARTUP_SERVICES, STATE_STARTED, STATE_STOPPED)
|
||||
ARCH_ALL,
|
||||
ATTR_ACCESS_TOKEN,
|
||||
ATTR_APPARMOR,
|
||||
ATTR_ARCH,
|
||||
ATTR_ARGS,
|
||||
ATTR_AUDIO,
|
||||
ATTR_AUDIO_INPUT,
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_AUTH_API,
|
||||
ATTR_AUTO_UART,
|
||||
ATTR_AUTO_UPDATE,
|
||||
ATTR_BOOT,
|
||||
ATTR_BUILD_FROM,
|
||||
ATTR_DESCRIPTON,
|
||||
ATTR_DEVICES,
|
||||
ATTR_DEVICETREE,
|
||||
ATTR_DISCOVERY,
|
||||
ATTR_DOCKER_API,
|
||||
ATTR_ENVIRONMENT,
|
||||
ATTR_FULL_ACCESS,
|
||||
ATTR_GPIO,
|
||||
ATTR_HASSIO_API,
|
||||
ATTR_HASSIO_ROLE,
|
||||
ATTR_HOMEASSISTANT_API,
|
||||
ATTR_HOMEASSISTANT,
|
||||
ATTR_HOST_DBUS,
|
||||
ATTR_HOST_IPC,
|
||||
ATTR_HOST_NETWORK,
|
||||
ATTR_HOST_PID,
|
||||
ATTR_IMAGE,
|
||||
ATTR_INGRESS,
|
||||
ATTR_INGRESS_ENTRY,
|
||||
ATTR_INGRESS_PORT,
|
||||
ATTR_INGRESS_TOKEN,
|
||||
ATTR_KERNEL_MODULES,
|
||||
ATTR_LEGACY,
|
||||
ATTR_LOCATON,
|
||||
ATTR_MACHINE,
|
||||
ATTR_MAINTAINER,
|
||||
ATTR_MAP,
|
||||
ATTR_NAME,
|
||||
ATTR_NETWORK,
|
||||
ATTR_OPTIONS,
|
||||
ATTR_PORTS,
|
||||
ATTR_PRIVILEGED,
|
||||
ATTR_PROTECTED,
|
||||
ATTR_REPOSITORY,
|
||||
ATTR_SCHEMA,
|
||||
ATTR_SERVICES,
|
||||
ATTR_SLUG,
|
||||
ATTR_SQUASH,
|
||||
ATTR_STARTUP,
|
||||
ATTR_STATE,
|
||||
ATTR_STDIN,
|
||||
ATTR_SYSTEM,
|
||||
ATTR_TIMEOUT,
|
||||
ATTR_TMPFS,
|
||||
ATTR_URL,
|
||||
ATTR_USER,
|
||||
ATTR_UUID,
|
||||
ATTR_VERSION,
|
||||
ATTR_WEBUI,
|
||||
BOOT_AUTO,
|
||||
BOOT_MANUAL,
|
||||
PRIVILEGED_ALL,
|
||||
ROLE_ALL,
|
||||
ROLE_DEFAULT,
|
||||
STARTUP_ALL,
|
||||
STARTUP_APPLICATION,
|
||||
STARTUP_SERVICES,
|
||||
STATE_STARTED,
|
||||
STATE_STOPPED,
|
||||
)
|
||||
from ..discovery.validate import valid_discovery_service
|
||||
from ..validate import (
|
||||
ALSA_DEVICE, DOCKER_PORTS, NETWORK_PORT, SHA256, UUID_MATCH)
|
||||
from ..validate import ALSA_DEVICE, DOCKER_PORTS, NETWORK_PORT, TOKEN, UUID_MATCH
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@ -89,6 +147,10 @@ SCHEMA_ADDON_CONFIG = vol.Schema({
|
||||
vol.Optional(ATTR_PORTS): DOCKER_PORTS,
|
||||
vol.Optional(ATTR_WEBUI):
|
||||
vol.Match(r"^(?:https?|\[PROTO:\w+\]):\/\/\[HOST\]:\[PORT:\d+\].*$"),
|
||||
vol.Optional(ATTR_INGRESS, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_INGRESS_PORT, default=8099): NETWORK_PORT,
|
||||
vol.Optional(ATTR_INGRESS_ENTRY): vol.Coerce(str),
|
||||
vol.Optional(ATTR_HOMEASSISTANT): vol.Maybe(vol.Coerce(str)),
|
||||
vol.Optional(ATTR_HOST_NETWORK, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_HOST_PID, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_HOST_IPC, default=False): vol.Boolean(),
|
||||
@ -158,7 +220,8 @@ SCHEMA_ADDON_USER = vol.Schema({
|
||||
vol.Required(ATTR_VERSION): vol.Coerce(str),
|
||||
vol.Optional(ATTR_IMAGE): vol.Coerce(str),
|
||||
vol.Optional(ATTR_UUID, default=lambda: uuid.uuid4().hex): UUID_MATCH,
|
||||
vol.Optional(ATTR_ACCESS_TOKEN): SHA256,
|
||||
vol.Optional(ATTR_ACCESS_TOKEN): TOKEN,
|
||||
vol.Optional(ATTR_INGRESS_TOKEN, default=secrets.token_urlsafe): vol.Coerce(str),
|
||||
vol.Optional(ATTR_OPTIONS, default=dict): dict,
|
||||
vol.Optional(ATTR_AUTO_UPDATE, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_BOOT):
|
||||
|
@ -14,6 +14,7 @@ from .hassos import APIHassOS
|
||||
from .homeassistant import APIHomeAssistant
|
||||
from .host import APIHost
|
||||
from .info import APIInfo
|
||||
from .ingress import APIIngress
|
||||
from .proxy import APIProxy
|
||||
from .security import SecurityMiddleware
|
||||
from .services import APIServices
|
||||
@ -47,6 +48,7 @@ class RestAPI(CoreSysAttributes):
|
||||
self._register_proxy()
|
||||
self._register_panel()
|
||||
self._register_addons()
|
||||
self._register_ingress()
|
||||
self._register_snapshots()
|
||||
self._register_discovery()
|
||||
self._register_services()
|
||||
@ -186,6 +188,16 @@ class RestAPI(CoreSysAttributes):
|
||||
web.get('/addons/{addon}/stats', api_addons.stats),
|
||||
])
|
||||
|
||||
def _register_ingress(self) -> None:
|
||||
"""Register Ingress functions."""
|
||||
api_ingress = APIIngress()
|
||||
api_ingress.coresys = self.coresys
|
||||
|
||||
self.webapp.add_routes([
|
||||
web.post('/ingress/session', api_ingress.create_session),
|
||||
web.view('/ingress/{token}/{path:.*}', api_ingress.handler),
|
||||
])
|
||||
|
||||
def _register_snapshots(self) -> None:
|
||||
"""Register snapshots functions."""
|
||||
api_snapshots = APISnapshots()
|
||||
|
@ -1,31 +1,89 @@
|
||||
"""Init file for Hass.io Home Assistant RESTful API."""
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any, Awaitable, Dict, List
|
||||
|
||||
from aiohttp import web
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
from ..addons.addon import Addon
|
||||
from ..addons.utils import rating_security
|
||||
from ..const import (
|
||||
ATTR_VERSION, ATTR_LAST_VERSION, ATTR_STATE, ATTR_BOOT, ATTR_OPTIONS,
|
||||
ATTR_URL, ATTR_DESCRIPTON, ATTR_DETACHED, ATTR_NAME, ATTR_REPOSITORY,
|
||||
ATTR_BUILD, ATTR_AUTO_UPDATE, ATTR_NETWORK, ATTR_HOST_NETWORK, ATTR_SLUG,
|
||||
ATTR_SOURCE, ATTR_REPOSITORIES, ATTR_ADDONS, ATTR_ARCH, ATTR_MAINTAINER,
|
||||
ATTR_INSTALLED, ATTR_LOGO, ATTR_WEBUI, ATTR_DEVICES, ATTR_PRIVILEGED,
|
||||
ATTR_AUDIO, ATTR_AUDIO_INPUT, ATTR_AUDIO_OUTPUT, ATTR_HASSIO_API,
|
||||
ATTR_GPIO, ATTR_HOMEASSISTANT_API, ATTR_STDIN, BOOT_AUTO, BOOT_MANUAL,
|
||||
ATTR_CHANGELOG, ATTR_HOST_IPC, ATTR_HOST_DBUS, ATTR_LONG_DESCRIPTION,
|
||||
ATTR_CPU_PERCENT, ATTR_MEMORY_LIMIT, ATTR_MEMORY_USAGE, ATTR_NETWORK_TX,
|
||||
ATTR_NETWORK_RX, ATTR_BLK_READ, ATTR_BLK_WRITE, ATTR_ICON, ATTR_SERVICES,
|
||||
ATTR_DISCOVERY, ATTR_APPARMOR, ATTR_DEVICETREE, ATTR_DOCKER_API,
|
||||
ATTR_FULL_ACCESS, ATTR_PROTECTED, ATTR_RATING, ATTR_HOST_PID,
|
||||
ATTR_HASSIO_ROLE, ATTR_MACHINE, ATTR_AVAILABLE, ATTR_AUTH_API,
|
||||
ATTR_ADDONS,
|
||||
ATTR_APPARMOR,
|
||||
ATTR_ARCH,
|
||||
ATTR_AUDIO,
|
||||
ATTR_AUDIO_INPUT,
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_AUTH_API,
|
||||
ATTR_AUTO_UPDATE,
|
||||
ATTR_AVAILABLE,
|
||||
ATTR_BLK_READ,
|
||||
ATTR_BLK_WRITE,
|
||||
ATTR_BOOT,
|
||||
ATTR_BUILD,
|
||||
ATTR_CHANGELOG,
|
||||
ATTR_CPU_PERCENT,
|
||||
ATTR_DESCRIPTON,
|
||||
ATTR_DETACHED,
|
||||
ATTR_DEVICES,
|
||||
ATTR_DEVICETREE,
|
||||
ATTR_DISCOVERY,
|
||||
ATTR_DOCKER_API,
|
||||
ATTR_FULL_ACCESS,
|
||||
ATTR_GPIO,
|
||||
ATTR_HASSIO_API,
|
||||
ATTR_HASSIO_ROLE,
|
||||
ATTR_HOMEASSISTANT,
|
||||
ATTR_HOMEASSISTANT_API,
|
||||
ATTR_HOST_DBUS,
|
||||
ATTR_HOST_IPC,
|
||||
ATTR_HOST_NETWORK,
|
||||
ATTR_HOST_PID,
|
||||
ATTR_ICON,
|
||||
ATTR_INGRESS,
|
||||
ATTR_INGRESS_ENTRY,
|
||||
ATTR_INGRESS_URL,
|
||||
ATTR_INSTALLED,
|
||||
ATTR_IP_ADDRESS,
|
||||
ATTR_KERNEL_MODULES,
|
||||
CONTENT_TYPE_PNG, CONTENT_TYPE_BINARY, CONTENT_TYPE_TEXT, REQUEST_FROM)
|
||||
ATTR_LAST_VERSION,
|
||||
ATTR_LOGO,
|
||||
ATTR_LONG_DESCRIPTION,
|
||||
ATTR_MACHINE,
|
||||
ATTR_MAINTAINER,
|
||||
ATTR_MEMORY_LIMIT,
|
||||
ATTR_MEMORY_USAGE,
|
||||
ATTR_NAME,
|
||||
ATTR_NETWORK,
|
||||
ATTR_NETWORK_RX,
|
||||
ATTR_NETWORK_TX,
|
||||
ATTR_OPTIONS,
|
||||
ATTR_PRIVILEGED,
|
||||
ATTR_PROTECTED,
|
||||
ATTR_RATING,
|
||||
ATTR_REPOSITORIES,
|
||||
ATTR_REPOSITORY,
|
||||
ATTR_SERVICES,
|
||||
ATTR_SLUG,
|
||||
ATTR_SOURCE,
|
||||
ATTR_STATE,
|
||||
ATTR_STDIN,
|
||||
ATTR_URL,
|
||||
ATTR_VERSION,
|
||||
ATTR_WEBUI,
|
||||
BOOT_AUTO,
|
||||
BOOT_MANUAL,
|
||||
CONTENT_TYPE_BINARY,
|
||||
CONTENT_TYPE_PNG,
|
||||
CONTENT_TYPE_TEXT,
|
||||
REQUEST_FROM,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..validate import DOCKER_PORTS, ALSA_DEVICE
|
||||
from ..exceptions import APIError
|
||||
from ..validate import ALSA_DEVICE, DOCKER_PORTS
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@ -51,7 +109,7 @@ SCHEMA_SECURITY = vol.Schema({
|
||||
class APIAddons(CoreSysAttributes):
|
||||
"""Handle RESTful API for add-on functions."""
|
||||
|
||||
def _extract_addon(self, request, check_installed=True):
|
||||
def _extract_addon(self, request: web.Request, check_installed: bool = True) -> Addon:
|
||||
"""Return addon, throw an exception it it doesn't exist."""
|
||||
addon_slug = request.match_info.get('addon')
|
||||
|
||||
@ -69,7 +127,7 @@ class APIAddons(CoreSysAttributes):
|
||||
return addon
|
||||
|
||||
@api_process
|
||||
async def list(self, request):
|
||||
async def list(self, request: web.Request) -> Dict[str, Any]:
|
||||
"""Return all add-ons or repositories."""
|
||||
data_addons = []
|
||||
for addon in self.sys_addons.list_addons:
|
||||
@ -104,13 +162,12 @@ class APIAddons(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def reload(self, request):
|
||||
async def reload(self, request: web.Request) -> None:
|
||||
"""Reload all add-on data."""
|
||||
await asyncio.shield(self.sys_addons.reload())
|
||||
return True
|
||||
|
||||
@api_process
|
||||
async def info(self, request):
|
||||
async def info(self, request: web.Request) -> Dict[str, Any]:
|
||||
"""Return add-on information."""
|
||||
addon = self._extract_addon(request, check_installed=False)
|
||||
|
||||
@ -130,6 +187,7 @@ class APIAddons(CoreSysAttributes):
|
||||
ATTR_OPTIONS: addon.options,
|
||||
ATTR_ARCH: addon.supported_arch,
|
||||
ATTR_MACHINE: addon.supported_machine,
|
||||
ATTR_HOMEASSISTANT: addon.homeassistant_version,
|
||||
ATTR_URL: addon.url,
|
||||
ATTR_DETACHED: addon.is_detached,
|
||||
ATTR_AVAILABLE: addon.available,
|
||||
@ -161,17 +219,20 @@ class APIAddons(CoreSysAttributes):
|
||||
ATTR_AUDIO_OUTPUT: addon.audio_output,
|
||||
ATTR_SERVICES: _pretty_services(addon),
|
||||
ATTR_DISCOVERY: addon.discovery,
|
||||
ATTR_IP_ADDRESS: str(addon.ip_address),
|
||||
ATTR_INGRESS: addon.with_ingress,
|
||||
ATTR_INGRESS_ENTRY: addon.ingress_entry,
|
||||
ATTR_INGRESS_URL: addon.ingress_url,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def options(self, request):
|
||||
async def options(self, request: web.Request) -> None:
|
||||
"""Store user options for add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
|
||||
addon_schema = SCHEMA_OPTIONS.extend({
|
||||
vol.Optional(ATTR_OPTIONS): vol.Any(None, addon.schema),
|
||||
})
|
||||
|
||||
body = await api_validate(addon_schema, request)
|
||||
|
||||
if ATTR_OPTIONS in body:
|
||||
@ -188,10 +249,9 @@ class APIAddons(CoreSysAttributes):
|
||||
addon.audio_output = body[ATTR_AUDIO_OUTPUT]
|
||||
|
||||
addon.save_data()
|
||||
return True
|
||||
|
||||
@api_process
|
||||
async def security(self, request):
|
||||
async def security(self, request: web.Request) -> None:
|
||||
"""Store security options for add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
body = await api_validate(SCHEMA_SECURITY, request)
|
||||
@ -201,17 +261,13 @@ class APIAddons(CoreSysAttributes):
|
||||
addon.protected = body[ATTR_PROTECTED]
|
||||
|
||||
addon.save_data()
|
||||
return True
|
||||
|
||||
@api_process
|
||||
async def stats(self, request):
|
||||
async def stats(self, request: web.Request) -> Dict[str, Any]:
|
||||
"""Return resource information."""
|
||||
addon = self._extract_addon(request)
|
||||
stats = await addon.stats()
|
||||
|
||||
if not stats:
|
||||
raise APIError("No stats available")
|
||||
|
||||
return {
|
||||
ATTR_CPU_PERCENT: stats.cpu_percent,
|
||||
ATTR_MEMORY_USAGE: stats.memory_usage,
|
||||
@ -223,19 +279,19 @@ class APIAddons(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
def install(self, request):
|
||||
def install(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Install add-on."""
|
||||
addon = self._extract_addon(request, check_installed=False)
|
||||
return asyncio.shield(addon.install())
|
||||
|
||||
@api_process
|
||||
def uninstall(self, request):
|
||||
def uninstall(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Uninstall add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
return asyncio.shield(addon.uninstall())
|
||||
|
||||
@api_process
|
||||
def start(self, request):
|
||||
def start(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Start add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
|
||||
@ -249,13 +305,13 @@ class APIAddons(CoreSysAttributes):
|
||||
return asyncio.shield(addon.start())
|
||||
|
||||
@api_process
|
||||
def stop(self, request):
|
||||
def stop(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Stop add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
return asyncio.shield(addon.stop())
|
||||
|
||||
@api_process
|
||||
def update(self, request):
|
||||
def update(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Update add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
|
||||
@ -265,13 +321,13 @@ class APIAddons(CoreSysAttributes):
|
||||
return asyncio.shield(addon.update())
|
||||
|
||||
@api_process
|
||||
def restart(self, request):
|
||||
def restart(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Restart add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
return asyncio.shield(addon.restart())
|
||||
|
||||
@api_process
|
||||
def rebuild(self, request):
|
||||
def rebuild(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Rebuild local build add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
if not addon.need_build:
|
||||
@ -280,13 +336,13 @@ class APIAddons(CoreSysAttributes):
|
||||
return asyncio.shield(addon.rebuild())
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_BINARY)
|
||||
def logs(self, request):
|
||||
def logs(self, request: web.Request) -> Awaitable[bytes]:
|
||||
"""Return logs from add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
return addon.logs()
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_PNG)
|
||||
async def icon(self, request):
|
||||
async def icon(self, request: web.Request) -> bytes:
|
||||
"""Return icon from add-on."""
|
||||
addon = self._extract_addon(request, check_installed=False)
|
||||
if not addon.with_icon:
|
||||
@ -296,7 +352,7 @@ class APIAddons(CoreSysAttributes):
|
||||
return png.read()
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_PNG)
|
||||
async def logo(self, request):
|
||||
async def logo(self, request: web.Request) -> bytes:
|
||||
"""Return logo from add-on."""
|
||||
addon = self._extract_addon(request, check_installed=False)
|
||||
if not addon.with_logo:
|
||||
@ -306,7 +362,7 @@ class APIAddons(CoreSysAttributes):
|
||||
return png.read()
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_TEXT)
|
||||
async def changelog(self, request):
|
||||
async def changelog(self, request: web.Request) -> str:
|
||||
"""Return changelog from add-on."""
|
||||
addon = self._extract_addon(request, check_installed=False)
|
||||
if not addon.with_changelog:
|
||||
@ -316,17 +372,17 @@ class APIAddons(CoreSysAttributes):
|
||||
return changelog.read()
|
||||
|
||||
@api_process
|
||||
async def stdin(self, request):
|
||||
async def stdin(self, request: web.Request) -> None:
|
||||
"""Write to stdin of add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
if not addon.with_stdin:
|
||||
raise APIError("STDIN not supported by add-on")
|
||||
|
||||
data = await request.read()
|
||||
return await asyncio.shield(addon.write_stdin(data))
|
||||
await asyncio.shield(addon.write_stdin(data))
|
||||
|
||||
|
||||
def _pretty_devices(addon):
|
||||
def _pretty_devices(addon: Addon) -> List[str]:
|
||||
"""Return a simplified device list."""
|
||||
dev_list = addon.devices
|
||||
if not dev_list:
|
||||
@ -334,7 +390,7 @@ def _pretty_devices(addon):
|
||||
return [row.split(':')[0] for row in dev_list]
|
||||
|
||||
|
||||
def _pretty_services(addon):
|
||||
def _pretty_services(addon: Addon) -> List[str]:
|
||||
"""Return a simplified services role list."""
|
||||
services = []
|
||||
for name, access in addon.services_role.items():
|
||||
|
@ -1,27 +1,31 @@
|
||||
"""Init file for Hass.io HassOS RESTful API."""
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any, Awaitable, Dict
|
||||
|
||||
import voluptuous as vol
|
||||
from aiohttp import web
|
||||
|
||||
from .utils import api_process, api_validate
|
||||
from ..const import (
|
||||
ATTR_VERSION, ATTR_BOARD, ATTR_VERSION_LATEST, ATTR_VERSION_CLI,
|
||||
ATTR_VERSION_CLI_LATEST)
|
||||
ATTR_BOARD,
|
||||
ATTR_VERSION,
|
||||
ATTR_VERSION_CLI,
|
||||
ATTR_VERSION_CLI_LATEST,
|
||||
ATTR_VERSION_LATEST,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
SCHEMA_VERSION = vol.Schema({
|
||||
vol.Optional(ATTR_VERSION): vol.Coerce(str),
|
||||
})
|
||||
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})
|
||||
|
||||
|
||||
class APIHassOS(CoreSysAttributes):
|
||||
"""Handle RESTful API for HassOS functions."""
|
||||
|
||||
@api_process
|
||||
async def info(self, request):
|
||||
async def info(self, request: web.Request) -> Dict[str, Any]:
|
||||
"""Return HassOS information."""
|
||||
return {
|
||||
ATTR_VERSION: self.sys_hassos.version,
|
||||
@ -32,7 +36,7 @@ class APIHassOS(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def update(self, request):
|
||||
async def update(self, request: web.Request) -> None:
|
||||
"""Update HassOS."""
|
||||
body = await api_validate(SCHEMA_VERSION, request)
|
||||
version = body.get(ATTR_VERSION, self.sys_hassos.version_latest)
|
||||
@ -40,7 +44,7 @@ class APIHassOS(CoreSysAttributes):
|
||||
await asyncio.shield(self.sys_hassos.update(version))
|
||||
|
||||
@api_process
|
||||
async def update_cli(self, request):
|
||||
async def update_cli(self, request: web.Request) -> None:
|
||||
"""Update HassOS CLI."""
|
||||
body = await api_validate(SCHEMA_VERSION, request)
|
||||
version = body.get(ATTR_VERSION, self.sys_hassos.version_cli_latest)
|
||||
@ -48,6 +52,6 @@ class APIHassOS(CoreSysAttributes):
|
||||
await asyncio.shield(self.sys_hassos.update_cli(version))
|
||||
|
||||
@api_process
|
||||
def config_sync(self, request):
|
||||
def config_sync(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Trigger config reload on HassOS."""
|
||||
return asyncio.shield(self.sys_hassos.config_sync())
|
||||
|
@ -27,6 +27,7 @@ from ..const import (
|
||||
ATTR_VERSION,
|
||||
ATTR_WAIT_BOOT,
|
||||
ATTR_WATCHDOG,
|
||||
ATTR_IP_ADDRESS,
|
||||
CONTENT_TYPE_BINARY,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
@ -64,6 +65,7 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
ATTR_VERSION: self.sys_homeassistant.version,
|
||||
ATTR_LAST_VERSION: self.sys_homeassistant.last_version,
|
||||
ATTR_MACHINE: self.sys_homeassistant.machine,
|
||||
ATTR_IP_ADDRESS: str(self.sys_homeassistant.ip_address),
|
||||
ATTR_ARCH: self.sys_homeassistant.arch,
|
||||
ATTR_IMAGE: self.sys_homeassistant.image,
|
||||
ATTR_CUSTOM: self.sys_homeassistant.is_custom_image,
|
||||
|
217
hassio/api/ingress.py
Normal file
217
hassio/api/ingress.py
Normal file
@ -0,0 +1,217 @@
|
||||
"""Hass.io Add-on ingress service."""
|
||||
import asyncio
|
||||
from ipaddress import ip_address
|
||||
import logging
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
import aiohttp
|
||||
from aiohttp import hdrs, web
|
||||
from aiohttp.web_exceptions import (
|
||||
HTTPBadGateway,
|
||||
HTTPServiceUnavailable,
|
||||
HTTPUnauthorized,
|
||||
)
|
||||
from multidict import CIMultiDict, istr
|
||||
|
||||
from ..addons.addon import Addon
|
||||
from ..const import ATTR_SESSION, HEADER_TOKEN, REQUEST_FROM, COOKIE_INGRESS
|
||||
from ..coresys import CoreSysAttributes
|
||||
from .utils import api_process
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class APIIngress(CoreSysAttributes):
|
||||
"""Ingress view to handle add-on webui routing."""
|
||||
|
||||
def _extract_addon(self, request: web.Request) -> Addon:
|
||||
"""Return addon, throw an exception it it doesn't exist."""
|
||||
token = request.match_info.get("token")
|
||||
|
||||
# Find correct add-on
|
||||
addon = self.sys_ingress.get(token)
|
||||
if not addon:
|
||||
_LOGGER.warning("Ingress for %s not available", token)
|
||||
raise HTTPServiceUnavailable()
|
||||
|
||||
return addon
|
||||
|
||||
def _check_ha_access(self, request: web.Request) -> None:
|
||||
if request[REQUEST_FROM] != self.sys_homeassistant:
|
||||
_LOGGER.warning("Ingress is only available behind Home Assistant")
|
||||
raise HTTPUnauthorized()
|
||||
|
||||
def _create_url(self, addon: Addon, path: str) -> str:
|
||||
"""Create URL to container."""
|
||||
return f"{addon.ingress_internal}/{path}"
|
||||
|
||||
@api_process
|
||||
async def create_session(self, request: web.Request) -> Dict[str, Any]:
|
||||
"""Create a new session."""
|
||||
self._check_ha_access(request)
|
||||
|
||||
session = self.sys_ingress.create_session()
|
||||
return {ATTR_SESSION: session}
|
||||
|
||||
async def handler(
|
||||
self, request: web.Request
|
||||
) -> Union[web.Response, web.StreamResponse, web.WebSocketResponse]:
|
||||
"""Route data to Hass.io ingress service."""
|
||||
self._check_ha_access(request)
|
||||
|
||||
# Check Ingress Session
|
||||
session = request.cookies.get(COOKIE_INGRESS)
|
||||
if not self.sys_ingress.validate_session(session):
|
||||
_LOGGER.warning("No valid ingress session %s", session)
|
||||
raise HTTPUnauthorized()
|
||||
|
||||
# Process requests
|
||||
addon = self._extract_addon(request)
|
||||
path = request.match_info.get("path")
|
||||
try:
|
||||
# Websocket
|
||||
if _is_websocket(request):
|
||||
return await self._handle_websocket(request, addon, path)
|
||||
|
||||
# Request
|
||||
return await self._handle_request(request, addon, path)
|
||||
|
||||
except aiohttp.ClientError as err:
|
||||
_LOGGER.error("Ingress error: %s", err)
|
||||
|
||||
raise HTTPBadGateway() from None
|
||||
|
||||
async def _handle_websocket(
|
||||
self, request: web.Request, addon: Addon, path: str
|
||||
) -> web.WebSocketResponse:
|
||||
"""Ingress route for websocket."""
|
||||
ws_server = web.WebSocketResponse()
|
||||
await ws_server.prepare(request)
|
||||
|
||||
# Preparing
|
||||
url = self._create_url(addon, path)
|
||||
source_header = _init_header(request, addon)
|
||||
|
||||
# Support GET query
|
||||
if request.query_string:
|
||||
url = "{}?{}".format(url, request.query_string)
|
||||
|
||||
# Start proxy
|
||||
async with self.sys_websession.ws_connect(
|
||||
url, headers=source_header
|
||||
) as ws_client:
|
||||
# Proxy requests
|
||||
await asyncio.wait(
|
||||
[
|
||||
_websocket_forward(ws_server, ws_client),
|
||||
_websocket_forward(ws_client, ws_server),
|
||||
],
|
||||
return_when=asyncio.FIRST_COMPLETED,
|
||||
)
|
||||
|
||||
return ws_server
|
||||
|
||||
async def _handle_request(
|
||||
self, request: web.Request, addon: Addon, path: str
|
||||
) -> Union[web.Response, web.StreamResponse]:
|
||||
"""Ingress route for request."""
|
||||
url = self._create_url(addon, path)
|
||||
data = await request.read()
|
||||
source_header = _init_header(request, addon)
|
||||
|
||||
async with self.sys_websession.request(
|
||||
request.method, url, headers=source_header, params=request.query, data=data
|
||||
) as result:
|
||||
headers = _response_header(result)
|
||||
|
||||
# Simple request
|
||||
if (
|
||||
hdrs.CONTENT_LENGTH in result.headers
|
||||
and int(result.headers.get(hdrs.CONTENT_LENGTH, 0)) < 4_194_000
|
||||
):
|
||||
# Return Response
|
||||
body = await result.read()
|
||||
return web.Response(headers=headers, status=result.status, body=body)
|
||||
|
||||
# Stream response
|
||||
response = web.StreamResponse(status=result.status, headers=headers)
|
||||
response.content_type = result.content_type
|
||||
|
||||
try:
|
||||
await response.prepare(request)
|
||||
async for data in result.content.iter_chunked(4096):
|
||||
await response.write(data)
|
||||
|
||||
except (aiohttp.ClientError, aiohttp.ClientPayloadError) as err:
|
||||
_LOGGER.error("Stream error with %s: %s", url, err)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def _init_header(
|
||||
request: web.Request, addon: str
|
||||
) -> Union[CIMultiDict, Dict[str, str]]:
|
||||
"""Create initial header."""
|
||||
headers = {}
|
||||
|
||||
# filter flags
|
||||
for name, value in request.headers.items():
|
||||
if name in (
|
||||
hdrs.CONTENT_LENGTH,
|
||||
hdrs.CONTENT_TYPE,
|
||||
hdrs.CONTENT_ENCODING,
|
||||
istr(HEADER_TOKEN),
|
||||
):
|
||||
continue
|
||||
headers[name] = value
|
||||
|
||||
# Update X-Forwarded-For
|
||||
forward_for = request.headers.get(hdrs.X_FORWARDED_FOR)
|
||||
connected_ip = ip_address(request.transport.get_extra_info("peername")[0])
|
||||
headers[hdrs.X_FORWARDED_FOR] = f"{forward_for}, {connected_ip!s}"
|
||||
|
||||
return headers
|
||||
|
||||
|
||||
def _response_header(response: aiohttp.ClientResponse) -> Dict[str, str]:
|
||||
"""Create response header."""
|
||||
headers = {}
|
||||
|
||||
for name, value in response.headers.items():
|
||||
if name in (
|
||||
hdrs.TRANSFER_ENCODING,
|
||||
hdrs.CONTENT_LENGTH,
|
||||
hdrs.CONTENT_TYPE,
|
||||
hdrs.CONTENT_ENCODING,
|
||||
):
|
||||
continue
|
||||
headers[name] = value
|
||||
|
||||
return headers
|
||||
|
||||
|
||||
def _is_websocket(request: web.Request) -> bool:
|
||||
"""Return True if request is a websocket."""
|
||||
headers = request.headers
|
||||
|
||||
if (
|
||||
headers.get(hdrs.CONNECTION) == "Upgrade"
|
||||
and headers.get(hdrs.UPGRADE) == "websocket"
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
async def _websocket_forward(ws_from, ws_to):
|
||||
"""Handle websocket message directly."""
|
||||
async for msg in ws_from:
|
||||
if msg.type == aiohttp.WSMsgType.TEXT:
|
||||
await ws_to.send_str(msg.data)
|
||||
elif msg.type == aiohttp.WSMsgType.BINARY:
|
||||
await ws_to.send_bytes(msg.data)
|
||||
elif msg.type == aiohttp.WSMsgType.PING:
|
||||
await ws_to.ping()
|
||||
elif msg.type == aiohttp.WSMsgType.PONG:
|
||||
await ws_to.pong()
|
||||
elif ws_to.closed:
|
||||
await ws_to.close(code=ws_to.close_code, message=msg.extra)
|
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
3
hassio/api/panel/chunk.1ac383635811d6c2cb4b.js
Normal file
3
hassio/api/panel/chunk.1ac383635811d6c2cb4b.js
Normal file
File diff suppressed because one or more lines are too long
@ -8,6 +8,18 @@ Code distributed by Google as part of the polymer project is also
|
||||
subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt
|
||||
*/
|
||||
|
||||
/**
|
||||
* @fileoverview
|
||||
* @suppress {checkPrototypalTypes}
|
||||
* @license Copyright (c) 2017 The Polymer Project Authors. All rights reserved.
|
||||
* This code may only be used under the BSD style license found at
|
||||
* http://polymer.github.io/LICENSE.txt The complete set of authors may be found
|
||||
* at http://polymer.github.io/AUTHORS.txt The complete set of contributors may
|
||||
* be found at http://polymer.github.io/CONTRIBUTORS.txt Code distributed by
|
||||
* Google as part of the polymer project is also subject to an additional IP
|
||||
* rights grant found at http://polymer.github.io/PATENTS.txt
|
||||
*/
|
||||
|
||||
/**
|
||||
@license
|
||||
Copyright (c) 2015 The Polymer Project Authors. All rights reserved.
|
BIN
hassio/api/panel/chunk.1ac383635811d6c2cb4b.js.gz
Normal file
BIN
hassio/api/panel/chunk.1ac383635811d6c2cb4b.js.gz
Normal file
Binary file not shown.
@ -1 +1 @@
|
||||
{"version":3,"sources":[],"names":[],"mappings":"","file":"chunk.8038876231b1b1817795.js","sourceRoot":""}
|
||||
{"version":3,"sources":[],"names":[],"mappings":"","file":"chunk.1ac383635811d6c2cb4b.js","sourceRoot":""}
|
3
hassio/api/panel/chunk.31b41b04602ce627ad98.js
Normal file
3
hassio/api/panel/chunk.31b41b04602ce627ad98.js
Normal file
File diff suppressed because one or more lines are too long
BIN
hassio/api/panel/chunk.31b41b04602ce627ad98.js.gz
Normal file
BIN
hassio/api/panel/chunk.31b41b04602ce627ad98.js.gz
Normal file
Binary file not shown.
@ -1 +1 @@
|
||||
{"version":3,"sources":[],"names":[],"mappings":"","file":"chunk.d86ead4948c3bb8d56b2.js","sourceRoot":""}
|
||||
{"version":3,"sources":[],"names":[],"mappings":"","file":"chunk.31b41b04602ce627ad98.js","sourceRoot":""}
|
File diff suppressed because one or more lines are too long
BIN
hassio/api/panel/chunk.381b1e7d41316cfb583c.js.gz
Normal file
BIN
hassio/api/panel/chunk.381b1e7d41316cfb583c.js.gz
Normal file
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
BIN
hassio/api/panel/chunk.7589a9f39a552ee63688.js.gz
Normal file
BIN
hassio/api/panel/chunk.7589a9f39a552ee63688.js.gz
Normal file
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
1
hassio/api/panel/chunk.8a4a3a3274af0f09d86b.js
Normal file
1
hassio/api/panel/chunk.8a4a3a3274af0f09d86b.js
Normal file
@ -0,0 +1 @@
|
||||
(window.webpackJsonp=window.webpackJsonp||[]).push([[4],{114:function(n,r,t){"use strict";t.r(r),t.d(r,"marked",function(){return a}),t.d(r,"filterXSS",function(){return c});var e=t(104),i=t.n(e),o=t(106),u=t.n(o),a=i.a,c=u.a}}]);
|
BIN
hassio/api/panel/chunk.8a4a3a3274af0f09d86b.js.gz
Normal file
BIN
hassio/api/panel/chunk.8a4a3a3274af0f09d86b.js.gz
Normal file
Binary file not shown.
1
hassio/api/panel/chunk.a6e3bc73416702354e6d.js
Normal file
1
hassio/api/panel/chunk.a6e3bc73416702354e6d.js
Normal file
File diff suppressed because one or more lines are too long
BIN
hassio/api/panel/chunk.a6e3bc73416702354e6d.js.gz
Normal file
BIN
hassio/api/panel/chunk.a6e3bc73416702354e6d.js.gz
Normal file
Binary file not shown.
@ -1 +0,0 @@
|
||||
(window.webpackJsonp=window.webpackJsonp||[]).push([[4],{110:function(n,r,t){"use strict";t.r(r),t.d(r,"marked",function(){return a}),t.d(r,"filterXSS",function(){return c});var e=t(101),i=t.n(e),o=t(103),u=t.n(o),a=i.a,c=u.a}}]);
|
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
1
hassio/api/panel/chunk.ff45557361d5d6bd46af.js
Normal file
1
hassio/api/panel/chunk.ff45557361d5d6bd46af.js
Normal file
File diff suppressed because one or more lines are too long
BIN
hassio/api/panel/chunk.ff45557361d5d6bd46af.js.gz
Normal file
BIN
hassio/api/panel/chunk.ff45557361d5d6bd46af.js.gz
Normal file
Binary file not shown.
@ -1 +1 @@
|
||||
!function(e){function t(t){for(var n,o,i=t[0],u=t[1],a=0,l=[];a<i.length;a++)o=i[a],r[o]&&l.push(r[o][0]),r[o]=0;for(n in u)Object.prototype.hasOwnProperty.call(u,n)&&(e[n]=u[n]);for(c&&c(t);l.length;)l.shift()()}var n={},r={1:0};function o(t){if(n[t])return n[t].exports;var r=n[t]={i:t,l:!1,exports:{}};return e[t].call(r.exports,r,r.exports,o),r.l=!0,r.exports}o.e=function(e){var t=[],n=r[e];if(0!==n)if(n)t.push(n[2]);else{var i=new Promise(function(t,o){n=r[e]=[t,o]});t.push(n[2]=i);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,o.nc&&a.setAttribute("nonce",o.nc),a.src=function(e){return o.p+"chunk."+{0:"d86ead4948c3bb8d56b2",2:"75766aa821239c9936dc",3:"7b2353341ba15ea393c7",4:"b74ddf4cacc7d5de8a55",5:"05bbfb49a092df0b4304",6:"8038876231b1b1817795",7:"088b1034e27d00ee9329"}[e]+".js"}(e),u=function(t){a.onerror=a.onload=null,clearTimeout(c);var n=r[e];if(0!==n){if(n){var o=t&&("load"===t.type?"missing":t.type),i=t&&t.target&&t.target.src,u=new Error("Loading chunk "+e+" failed.\n("+o+": "+i+")");u.type=o,u.request=i,n[1](u)}r[e]=void 0}};var c=setTimeout(function(){u({type:"timeout",target:a})},12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(t)},o.m=e,o.c=n,o.d=function(e,t,n){o.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},o.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},o.t=function(e,t){if(1&t&&(e=o(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(o.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var r in e)o.d(n,r,function(t){return e[t]}.bind(null,r));return n},o.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return o.d(t,"a",t),t},o.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},o.p="/api/hassio/app/",o.oe=function(e){throw console.error(e),e};var i=window.webpackJsonp=window.webpackJsonp||[],u=i.push.bind(i);i.push=t,i=i.slice();for(var a=0;a<i.length;a++)t(i[a]);var c=u;o(o.s=0)}([function(e,t,n){window.loadES5Adapter().then(function(){Promise.all([n.e(0),n.e(2)]).then(n.bind(null,2)),Promise.all([n.e(0),n.e(6),n.e(3)]).then(n.bind(null,1))}),document.body.style.height="100%"}]);
|
||||
!function(e){function n(n){for(var t,o,i=n[0],a=n[1],u=0,f=[];u<i.length;u++)o=i[u],r[o]&&f.push(r[o][0]),r[o]=0;for(t in a)Object.prototype.hasOwnProperty.call(a,t)&&(e[t]=a[t]);for(c&&c(n);f.length;)f.shift()()}var t={},r={1:0};function o(n){if(t[n])return t[n].exports;var r=t[n]={i:n,l:!1,exports:{}};return e[n].call(r.exports,r,r.exports,o),r.l=!0,r.exports}o.e=function(e){var n=[],t=r[e];if(0!==t)if(t)n.push(t[2]);else{var i=new Promise(function(n,o){t=r[e]=[n,o]});n.push(t[2]=i);var a,u=document.createElement("script");u.charset="utf-8",u.timeout=120,o.nc&&u.setAttribute("nonce",o.nc),u.src=function(e){return o.p+"chunk."+{0:"1ac383635811d6c2cb4b",2:"381b1e7d41316cfb583c",3:"a6e3bc73416702354e6d",4:"8a4a3a3274af0f09d86b",5:"7589a9f39a552ee63688",6:"31b41b04602ce627ad98",7:"ff45557361d5d6bd46af"}[e]+".js"}(e),a=function(n){u.onerror=u.onload=null,clearTimeout(c);var t=r[e];if(0!==t){if(t){var o=n&&("load"===n.type?"missing":n.type),i=n&&n.target&&n.target.src,a=new Error("Loading chunk "+e+" failed.\n("+o+": "+i+")");a.type=o,a.request=i,t[1](a)}r[e]=void 0}};var c=setTimeout(function(){a({type:"timeout",target:u})},12e4);u.onerror=u.onload=a,document.head.appendChild(u)}return Promise.all(n)},o.m=e,o.c=t,o.d=function(e,n,t){o.o(e,n)||Object.defineProperty(e,n,{enumerable:!0,get:t})},o.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},o.t=function(e,n){if(1&n&&(e=o(e)),8&n)return e;if(4&n&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(o.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&n&&"string"!=typeof e)for(var r in e)o.d(t,r,function(n){return e[n]}.bind(null,r));return t},o.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return o.d(n,"a",n),n},o.o=function(e,n){return Object.prototype.hasOwnProperty.call(e,n)},o.p="/api/hassio/app/",o.oe=function(e){throw console.error(e),e};var i=window.webpackJsonp=window.webpackJsonp||[],a=i.push.bind(i);i.push=n,i=i.slice();for(var u=0;u<i.length;u++)n(i[u]);var c=a;o(o.s=0)}([function(e,n,t){window.loadES5Adapter().then(function(){Promise.all([t.e(0),t.e(2)]).then(t.bind(null,2)),Promise.all([t.e(0),t.e(6),t.e(3)]).then(t.bind(null,1))});var r=document.createElement("style");r.innerHTML="\nbody {\n font-family: Roboto, sans-serif;\n -moz-osx-font-smoothing: grayscale;\n -webkit-font-smoothing: antialiased;\n font-weight: 400;\n margin: 0;\n padding: 0;\n height: 100vh;\n}\n",document.head.appendChild(r)}]);
|
Binary file not shown.
@ -35,7 +35,7 @@ class APIProxy(CoreSysAttributes):
|
||||
elif not addon.access_homeassistant_api:
|
||||
_LOGGER.warning("Not permitted API access: %s", addon.slug)
|
||||
else:
|
||||
_LOGGER.info("%s access from %s", request.path, addon.slug)
|
||||
_LOGGER.debug("%s access from %s", request.path, addon.slug)
|
||||
return
|
||||
|
||||
raise HTTPUnauthorized()
|
||||
|
@ -6,12 +6,19 @@ from aiohttp.web import middleware
|
||||
from aiohttp.web_exceptions import HTTPUnauthorized, HTTPForbidden
|
||||
|
||||
from ..const import (
|
||||
HEADER_TOKEN, REQUEST_FROM, ROLE_ADMIN, ROLE_DEFAULT, ROLE_HOMEASSISTANT,
|
||||
ROLE_MANAGER, ROLE_BACKUP)
|
||||
HEADER_TOKEN,
|
||||
REQUEST_FROM,
|
||||
ROLE_ADMIN,
|
||||
ROLE_DEFAULT,
|
||||
ROLE_HOMEASSISTANT,
|
||||
ROLE_MANAGER,
|
||||
ROLE_BACKUP,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
# fmt: off
|
||||
|
||||
# Block Anytime
|
||||
BLACKLIST = re.compile(
|
||||
@ -65,7 +72,7 @@ ADDONS_ROLE_ACCESS = {
|
||||
r"|/hardware/.+"
|
||||
r"|/hassos/.+"
|
||||
r"|/supervisor/.+"
|
||||
r"|/addons(?:/[^/]+/(?!security).+)?"
|
||||
r"|/addons(?:/[^/]+/(?!security).+|/reload)?"
|
||||
r"|/snapshots.*"
|
||||
r")$"
|
||||
),
|
||||
@ -74,6 +81,8 @@ ADDONS_ROLE_ACCESS = {
|
||||
),
|
||||
}
|
||||
|
||||
# fmt: off
|
||||
|
||||
|
||||
class SecurityMiddleware(CoreSysAttributes):
|
||||
"""Security middleware functions."""
|
||||
@ -104,9 +113,7 @@ class SecurityMiddleware(CoreSysAttributes):
|
||||
raise HTTPUnauthorized()
|
||||
|
||||
# Home-Assistant
|
||||
# UUID check need removed with 131
|
||||
if hassio_token in (self.sys_homeassistant.uuid,
|
||||
self.sys_homeassistant.hassio_token):
|
||||
if hassio_token == self.sys_homeassistant.hassio_token:
|
||||
_LOGGER.debug("%s access from Home Assistant", request.path)
|
||||
request_from = self.sys_homeassistant
|
||||
|
||||
|
@ -1,34 +1,57 @@
|
||||
"""Init file for Hass.io Supervisor RESTful API."""
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any, Awaitable, Dict
|
||||
|
||||
from aiohttp import web
|
||||
import voluptuous as vol
|
||||
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
from ..const import (
|
||||
ATTR_ADDONS, ATTR_VERSION, ATTR_LAST_VERSION, ATTR_CHANNEL, ATTR_ARCH,
|
||||
HASSIO_VERSION, ATTR_ADDONS_REPOSITORIES, ATTR_LOGO, ATTR_REPOSITORY,
|
||||
ATTR_DESCRIPTON, ATTR_NAME, ATTR_SLUG, ATTR_INSTALLED, ATTR_TIMEZONE,
|
||||
ATTR_STATE, ATTR_WAIT_BOOT, ATTR_CPU_PERCENT, ATTR_MEMORY_USAGE,
|
||||
ATTR_MEMORY_LIMIT, ATTR_NETWORK_RX, ATTR_NETWORK_TX, ATTR_BLK_READ,
|
||||
ATTR_BLK_WRITE, CONTENT_TYPE_BINARY, ATTR_ICON)
|
||||
ATTR_ADDONS,
|
||||
ATTR_ADDONS_REPOSITORIES,
|
||||
ATTR_ARCH,
|
||||
ATTR_BLK_READ,
|
||||
ATTR_BLK_WRITE,
|
||||
ATTR_CHANNEL,
|
||||
ATTR_CPU_PERCENT,
|
||||
ATTR_DESCRIPTON,
|
||||
ATTR_ICON,
|
||||
ATTR_INSTALLED,
|
||||
ATTR_LAST_VERSION,
|
||||
ATTR_LOGO,
|
||||
ATTR_MEMORY_LIMIT,
|
||||
ATTR_MEMORY_USAGE,
|
||||
ATTR_NAME,
|
||||
ATTR_NETWORK_RX,
|
||||
ATTR_NETWORK_TX,
|
||||
ATTR_REPOSITORY,
|
||||
ATTR_SLUG,
|
||||
ATTR_STATE,
|
||||
ATTR_TIMEZONE,
|
||||
ATTR_VERSION,
|
||||
ATTR_WAIT_BOOT,
|
||||
ATTR_IP_ADDRESS,
|
||||
CONTENT_TYPE_BINARY,
|
||||
HASSIO_VERSION,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..validate import WAIT_BOOT, REPOSITORIES, CHANNELS
|
||||
from ..exceptions import APIError
|
||||
from ..utils.validate import validate_timezone
|
||||
from ..validate import CHANNELS, REPOSITORIES, WAIT_BOOT
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
SCHEMA_OPTIONS = vol.Schema({
|
||||
vol.Optional(ATTR_CHANNEL): CHANNELS,
|
||||
vol.Optional(ATTR_ADDONS_REPOSITORIES): REPOSITORIES,
|
||||
vol.Optional(ATTR_TIMEZONE): validate_timezone,
|
||||
vol.Optional(ATTR_WAIT_BOOT): WAIT_BOOT,
|
||||
})
|
||||
SCHEMA_OPTIONS = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_CHANNEL): CHANNELS,
|
||||
vol.Optional(ATTR_ADDONS_REPOSITORIES): REPOSITORIES,
|
||||
vol.Optional(ATTR_TIMEZONE): validate_timezone,
|
||||
vol.Optional(ATTR_WAIT_BOOT): WAIT_BOOT,
|
||||
}
|
||||
)
|
||||
|
||||
SCHEMA_VERSION = vol.Schema({
|
||||
vol.Optional(ATTR_VERSION): vol.Coerce(str),
|
||||
})
|
||||
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})
|
||||
|
||||
|
||||
class APISupervisor(CoreSysAttributes):
|
||||
@ -40,28 +63,31 @@ class APISupervisor(CoreSysAttributes):
|
||||
return True
|
||||
|
||||
@api_process
|
||||
async def info(self, request):
|
||||
async def info(self, request: web.Request) -> Dict[str, Any]:
|
||||
"""Return host information."""
|
||||
list_addons = []
|
||||
for addon in self.sys_addons.list_addons:
|
||||
if addon.is_installed:
|
||||
list_addons.append({
|
||||
ATTR_NAME: addon.name,
|
||||
ATTR_SLUG: addon.slug,
|
||||
ATTR_DESCRIPTON: addon.description,
|
||||
ATTR_STATE: await addon.state(),
|
||||
ATTR_VERSION: addon.last_version,
|
||||
ATTR_INSTALLED: addon.version_installed,
|
||||
ATTR_REPOSITORY: addon.repository,
|
||||
ATTR_ICON: addon.with_icon,
|
||||
ATTR_LOGO: addon.with_logo,
|
||||
})
|
||||
list_addons.append(
|
||||
{
|
||||
ATTR_NAME: addon.name,
|
||||
ATTR_SLUG: addon.slug,
|
||||
ATTR_DESCRIPTON: addon.description,
|
||||
ATTR_STATE: await addon.state(),
|
||||
ATTR_VERSION: addon.last_version,
|
||||
ATTR_INSTALLED: addon.version_installed,
|
||||
ATTR_REPOSITORY: addon.repository,
|
||||
ATTR_ICON: addon.with_icon,
|
||||
ATTR_LOGO: addon.with_logo,
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
ATTR_VERSION: HASSIO_VERSION,
|
||||
ATTR_LAST_VERSION: self.sys_updater.version_hassio,
|
||||
ATTR_CHANNEL: self.sys_updater.channel,
|
||||
ATTR_ARCH: self.sys_supervisor.arch,
|
||||
ATTR_IP_ADDRESS: str(self.sys_supervisor.ip_address),
|
||||
ATTR_WAIT_BOOT: self.sys_config.wait_boot,
|
||||
ATTR_TIMEZONE: self.sys_config.timezone,
|
||||
ATTR_ADDONS: list_addons,
|
||||
@ -69,7 +95,7 @@ class APISupervisor(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def options(self, request):
|
||||
async def options(self, request: web.Request) -> None:
|
||||
"""Set Supervisor options."""
|
||||
body = await api_validate(SCHEMA_OPTIONS, request)
|
||||
|
||||
@ -88,14 +114,11 @@ class APISupervisor(CoreSysAttributes):
|
||||
|
||||
self.sys_updater.save_data()
|
||||
self.sys_config.save_data()
|
||||
return True
|
||||
|
||||
@api_process
|
||||
async def stats(self, request):
|
||||
async def stats(self, request: web.Request) -> Dict[str, Any]:
|
||||
"""Return resource information."""
|
||||
stats = await self.sys_supervisor.stats()
|
||||
if not stats:
|
||||
raise APIError("No stats available")
|
||||
|
||||
return {
|
||||
ATTR_CPU_PERCENT: stats.cpu_percent,
|
||||
@ -108,31 +131,21 @@ class APISupervisor(CoreSysAttributes):
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def update(self, request):
|
||||
async def update(self, request: web.Request) -> None:
|
||||
"""Update Supervisor OS."""
|
||||
body = await api_validate(SCHEMA_VERSION, request)
|
||||
version = body.get(ATTR_VERSION, self.sys_updater.version_hassio)
|
||||
|
||||
if version == self.sys_supervisor.version:
|
||||
raise APIError("Version {} is already in use".format(version))
|
||||
|
||||
return await asyncio.shield(self.sys_supervisor.update(version))
|
||||
await asyncio.shield(self.sys_supervisor.update(version))
|
||||
|
||||
@api_process
|
||||
async def reload(self, request):
|
||||
def reload(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Reload add-ons, configuration, etc."""
|
||||
tasks = [
|
||||
self.sys_updater.reload(),
|
||||
]
|
||||
results, _ = await asyncio.shield(asyncio.wait(tasks))
|
||||
|
||||
for result in results:
|
||||
if result.exception() is not None:
|
||||
raise APIError("Some reload task fails!")
|
||||
|
||||
return True
|
||||
return asyncio.shield(self.sys_updater.reload())
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_BINARY)
|
||||
def logs(self, request):
|
||||
def logs(self, request: web.Request) -> Awaitable[bytes]:
|
||||
"""Return supervisor Docker logs."""
|
||||
return self.sys_supervisor.logs()
|
||||
|
@ -19,6 +19,7 @@ from .discovery import Discovery
|
||||
from .hassos import HassOS
|
||||
from .homeassistant import HomeAssistant
|
||||
from .host import HostManager
|
||||
from .ingress import Ingress
|
||||
from .services import ServiceManager
|
||||
from .snapshots import SnapshotManager
|
||||
from .supervisor import Supervisor
|
||||
@ -49,6 +50,7 @@ async def initialize_coresys():
|
||||
coresys.addons = AddonManager(coresys)
|
||||
coresys.snapshots = SnapshotManager(coresys)
|
||||
coresys.host = HostManager(coresys)
|
||||
coresys.ingress = Ingress(coresys)
|
||||
coresys.tasks = Tasks(coresys)
|
||||
coresys.services = ServiceManager(coresys)
|
||||
coresys.discovery = Discovery(coresys)
|
||||
@ -71,8 +73,9 @@ def initialize_system_data(coresys):
|
||||
|
||||
# Home Assistant configuration folder
|
||||
if not config.path_homeassistant.is_dir():
|
||||
_LOGGER.info("Create Home Assistant configuration folder %s",
|
||||
config.path_homeassistant)
|
||||
_LOGGER.info(
|
||||
"Create Home Assistant configuration folder %s", config.path_homeassistant
|
||||
)
|
||||
config.path_homeassistant.mkdir()
|
||||
|
||||
# hassio ssl folder
|
||||
@ -82,18 +85,19 @@ def initialize_system_data(coresys):
|
||||
|
||||
# hassio addon data folder
|
||||
if not config.path_addons_data.is_dir():
|
||||
_LOGGER.info("Create Hass.io Add-on data folder %s",
|
||||
config.path_addons_data)
|
||||
_LOGGER.info("Create Hass.io Add-on data folder %s", config.path_addons_data)
|
||||
config.path_addons_data.mkdir(parents=True)
|
||||
|
||||
if not config.path_addons_local.is_dir():
|
||||
_LOGGER.info("Create Hass.io Add-on local repository folder %s",
|
||||
config.path_addons_local)
|
||||
_LOGGER.info(
|
||||
"Create Hass.io Add-on local repository folder %s", config.path_addons_local
|
||||
)
|
||||
config.path_addons_local.mkdir(parents=True)
|
||||
|
||||
if not config.path_addons_git.is_dir():
|
||||
_LOGGER.info("Create Hass.io Add-on git repositories folder %s",
|
||||
config.path_addons_git)
|
||||
_LOGGER.info(
|
||||
"Create Hass.io Add-on git repositories folder %s", config.path_addons_git
|
||||
)
|
||||
config.path_addons_git.mkdir(parents=True)
|
||||
|
||||
# hassio tmp folder
|
||||
@ -154,7 +158,8 @@ def initialize_logging():
|
||||
"ERROR": "red",
|
||||
"CRITICAL": "red",
|
||||
},
|
||||
))
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def check_environment():
|
||||
@ -188,19 +193,16 @@ def check_environment():
|
||||
def reg_signal(loop):
|
||||
"""Register SIGTERM and SIGKILL to stop system."""
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGTERM,
|
||||
lambda: loop.call_soon(loop.stop))
|
||||
loop.add_signal_handler(signal.SIGTERM, lambda: loop.call_soon(loop.stop))
|
||||
except (ValueError, RuntimeError):
|
||||
_LOGGER.warning("Could not bind to SIGTERM")
|
||||
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGHUP,
|
||||
lambda: loop.call_soon(loop.stop))
|
||||
loop.add_signal_handler(signal.SIGHUP, lambda: loop.call_soon(loop.stop))
|
||||
except (ValueError, RuntimeError):
|
||||
_LOGGER.warning("Could not bind to SIGHUP")
|
||||
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGINT,
|
||||
lambda: loop.call_soon(loop.stop))
|
||||
loop.add_signal_handler(signal.SIGINT, lambda: loop.call_soon(loop.stop))
|
||||
except (ValueError, RuntimeError):
|
||||
_LOGGER.warning("Could not bind to SIGINT")
|
||||
|
@ -2,7 +2,8 @@
|
||||
from pathlib import Path
|
||||
from ipaddress import ip_network
|
||||
|
||||
HASSIO_VERSION = "152"
|
||||
|
||||
HASSIO_VERSION = "153"
|
||||
|
||||
URL_HASSIO_ADDONS = "https://github.com/home-assistant/hassio-addons"
|
||||
URL_HASSIO_VERSION = "https://s3.amazonaws.com/hassio-version/{channel}.json"
|
||||
@ -22,6 +23,7 @@ FILE_HASSIO_HOMEASSISTANT = Path(HASSIO_DATA, "homeassistant.json")
|
||||
FILE_HASSIO_UPDATER = Path(HASSIO_DATA, "updater.json")
|
||||
FILE_HASSIO_SERVICES = Path(HASSIO_DATA, "services.json")
|
||||
FILE_HASSIO_DISCOVERY = Path(HASSIO_DATA, "discovery.json")
|
||||
FILE_HASSIO_INGRESS = Path(HASSIO_DATA, "ingress.json")
|
||||
|
||||
SOCKET_DOCKER = Path("/var/run/docker.sock")
|
||||
|
||||
@ -51,8 +53,9 @@ CONTENT_TYPE_JSON = "application/json"
|
||||
CONTENT_TYPE_TEXT = "text/plain"
|
||||
CONTENT_TYPE_TAR = "application/tar"
|
||||
CONTENT_TYPE_URL = "application/x-www-form-urlencoded"
|
||||
HEADER_HA_ACCESS = "x-ha-access"
|
||||
HEADER_TOKEN = "x-hassio-key"
|
||||
HEADER_HA_ACCESS = "X-Ha-Access"
|
||||
HEADER_TOKEN = "X-Hassio-Key"
|
||||
COOKIE_INGRESS = "ingress_session"
|
||||
|
||||
ENV_TOKEN = "HASSIO_TOKEN"
|
||||
ENV_TIME = "TZ"
|
||||
@ -187,6 +190,13 @@ ATTR_SUPERVISOR = "supervisor"
|
||||
ATTR_AUTH_API = "auth_api"
|
||||
ATTR_KERNEL_MODULES = "kernel_modules"
|
||||
ATTR_SUPPORTED_ARCH = "supported_arch"
|
||||
ATTR_INGRESS = "ingress"
|
||||
ATTR_INGRESS_PORT = "ingress_port"
|
||||
ATTR_INGRESS_ENTRY = "ingress_entry"
|
||||
ATTR_INGRESS_TOKEN = "ingress_token"
|
||||
ATTR_INGRESS_URL = "ingress_url"
|
||||
ATTR_IP_ADDRESS = "ip_address"
|
||||
ATTR_SESSION = "session"
|
||||
|
||||
PROVIDE_SERVICE = "provide"
|
||||
NEED_SERVICE = "need"
|
||||
|
@ -62,6 +62,9 @@ class HassIO(CoreSysAttributes):
|
||||
# Load discovery
|
||||
await self.sys_discovery.load()
|
||||
|
||||
# Load ingress
|
||||
await self.sys_ingress.load()
|
||||
|
||||
# start dns forwarding
|
||||
self.sys_create_task(self.sys_dns.start())
|
||||
|
||||
@ -131,6 +134,7 @@ class HassIO(CoreSysAttributes):
|
||||
self.sys_dns.stop(),
|
||||
self.sys_websession.close(),
|
||||
self.sys_websession_ssl.close(),
|
||||
self.sys_ingress.unload(),
|
||||
]
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
|
@ -23,6 +23,7 @@ if TYPE_CHECKING:
|
||||
from .hassos import HassOS
|
||||
from .homeassistant import HomeAssistant
|
||||
from .host import HostManager
|
||||
from .ingress import Ingress
|
||||
from .services import ServiceManager
|
||||
from .snapshots import SnapshotManager
|
||||
from .supervisor import Supervisor
|
||||
@ -63,6 +64,7 @@ class CoreSys:
|
||||
self._snapshots: SnapshotManager = None
|
||||
self._tasks: Tasks = None
|
||||
self._host: HostManager = None
|
||||
self._ingress: Ingress = None
|
||||
self._dbus: DBusManager = None
|
||||
self._hassos: HassOS = None
|
||||
self._services: ServiceManager = None
|
||||
@ -293,6 +295,18 @@ class CoreSys:
|
||||
raise RuntimeError("HostManager already set!")
|
||||
self._host = value
|
||||
|
||||
@property
|
||||
def ingress(self) -> Ingress:
|
||||
"""Return Ingress object."""
|
||||
return self._ingress
|
||||
|
||||
@ingress.setter
|
||||
def ingress(self, value: Ingress):
|
||||
"""Set a Ingress object."""
|
||||
if self._ingress:
|
||||
raise RuntimeError("Ingress already set!")
|
||||
self._ingress = value
|
||||
|
||||
@property
|
||||
def hassos(self) -> HassOS:
|
||||
"""Return HassOS object."""
|
||||
@ -441,6 +455,11 @@ class CoreSysAttributes:
|
||||
"""Return HostManager object."""
|
||||
return self.coresys.host
|
||||
|
||||
@property
|
||||
def sys_ingress(self) -> Ingress:
|
||||
"""Return Ingress object."""
|
||||
return self.coresys.ingress
|
||||
|
||||
@property
|
||||
def sys_hassos(self) -> HassOS:
|
||||
"""Return HassOS object."""
|
||||
|
@ -6,3 +6,5 @@ ATTR_PORT = "port"
|
||||
ATTR_PROTOCOL = "protocol"
|
||||
ATTR_SSL = "ssl"
|
||||
ATTR_USERNAME = "username"
|
||||
ATTR_API_KEY = "api_key"
|
||||
ATTR_SERIAL = "serial"
|
||||
|
@ -3,9 +3,14 @@ import voluptuous as vol
|
||||
|
||||
from hassio.validate import NETWORK_PORT
|
||||
|
||||
from ..const import ATTR_HOST, ATTR_PORT
|
||||
from ..const import ATTR_HOST, ATTR_PORT, ATTR_API_KEY, ATTR_SERIAL
|
||||
|
||||
|
||||
SCHEMA = vol.Schema(
|
||||
{vol.Required(ATTR_HOST): vol.Coerce(str), vol.Required(ATTR_PORT): NETWORK_PORT}
|
||||
{
|
||||
vol.Required(ATTR_HOST): vol.Coerce(str),
|
||||
vol.Required(ATTR_PORT): NETWORK_PORT,
|
||||
vol.Required(ATTR_SERIAL): vol.Coerce(str),
|
||||
vol.Required(ATTR_API_KEY): vol.Coerce(str),
|
||||
}
|
||||
)
|
||||
|
@ -1,12 +1,14 @@
|
||||
"""Init file for Hass.io Docker object."""
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
from contextlib import suppress
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import attr
|
||||
import docker
|
||||
|
||||
from .network import DockerNetwork
|
||||
from ..const import SOCKET_DOCKER
|
||||
from ..exceptions import DockerAPIError
|
||||
from .network import DockerNetwork
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@ -14,8 +16,9 @@ _LOGGER = logging.getLogger(__name__)
|
||||
@attr.s(frozen=True)
|
||||
class CommandReturn:
|
||||
"""Return object from command run."""
|
||||
exit_code = attr.ib()
|
||||
output = attr.ib()
|
||||
|
||||
exit_code: int = attr.ib()
|
||||
output: bytes = attr.ib()
|
||||
|
||||
|
||||
class DockerAPI:
|
||||
@ -26,75 +29,87 @@ class DockerAPI:
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize Docker base wrapper."""
|
||||
self.docker = docker.DockerClient(
|
||||
base_url="unix:/{}".format(str(SOCKET_DOCKER)),
|
||||
version='auto', timeout=900)
|
||||
self.network = DockerNetwork(self.docker)
|
||||
self.docker: docker.DockerClient = docker.DockerClient(
|
||||
base_url="unix:/{}".format(str(SOCKET_DOCKER)), version="auto", timeout=900
|
||||
)
|
||||
self.network: DockerNetwork = DockerNetwork(self.docker)
|
||||
|
||||
@property
|
||||
def images(self):
|
||||
def images(self) -> docker.models.images.ImageCollection:
|
||||
"""Return API images."""
|
||||
return self.docker.images
|
||||
|
||||
@property
|
||||
def containers(self):
|
||||
def containers(self) -> docker.models.containers.ContainerCollection:
|
||||
"""Return API containers."""
|
||||
return self.docker.containers
|
||||
|
||||
@property
|
||||
def api(self):
|
||||
def api(self) -> docker.APIClient:
|
||||
"""Return API containers."""
|
||||
return self.docker.api
|
||||
|
||||
def run(self, image, **kwargs):
|
||||
def run(
|
||||
self, image: str, **kwargs: Dict[str, Any]
|
||||
) -> docker.models.containers.Container:
|
||||
""""Create a Docker container and run it.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
name = kwargs.get('name', image)
|
||||
network_mode = kwargs.get('network_mode')
|
||||
hostname = kwargs.get('hostname')
|
||||
name = kwargs.get("name", image)
|
||||
network_mode = kwargs.get("network_mode")
|
||||
hostname = kwargs.get("hostname")
|
||||
|
||||
# Setup network
|
||||
kwargs['dns_search'] = ["."]
|
||||
kwargs["dns_search"] = ["."]
|
||||
if network_mode:
|
||||
kwargs['dns'] = [str(self.network.supervisor)]
|
||||
kwargs['dns_opt'] = ["ndots:0"]
|
||||
kwargs["dns"] = [str(self.network.supervisor)]
|
||||
kwargs["dns_opt"] = ["ndots:0"]
|
||||
else:
|
||||
kwargs['network'] = None
|
||||
kwargs["network"] = None
|
||||
|
||||
# Create container
|
||||
try:
|
||||
container = self.docker.containers.create(
|
||||
image, use_config_proxy=False, **kwargs)
|
||||
image, use_config_proxy=False, **kwargs
|
||||
)
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't create container from %s: %s", name, err)
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
# attach network
|
||||
# Attach network
|
||||
if not network_mode:
|
||||
alias = [hostname] if hostname else None
|
||||
if self.network.attach_container(container, alias=alias):
|
||||
self.network.detach_default_bridge(container)
|
||||
else:
|
||||
try:
|
||||
self.network.attach_container(container, alias=alias)
|
||||
except DockerAPIError:
|
||||
_LOGGER.warning("Can't attach %s to hassio-net!", name)
|
||||
else:
|
||||
with suppress(DockerAPIError):
|
||||
self.network.detach_default_bridge(container)
|
||||
|
||||
# run container
|
||||
# Run container
|
||||
try:
|
||||
container.start()
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't start %s: %s", name, err)
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
return True
|
||||
# Update metadata
|
||||
with suppress(docker.errors.DockerException):
|
||||
container.reload()
|
||||
|
||||
def run_command(self, image, command=None, **kwargs):
|
||||
return container
|
||||
|
||||
def run_command(
|
||||
self, image: str, command: Optional[str] = None, **kwargs: Dict[str, Any]
|
||||
) -> CommandReturn:
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
stdout = kwargs.get('stdout', True)
|
||||
stderr = kwargs.get('stderr', True)
|
||||
stdout = kwargs.get("stdout", True)
|
||||
stderr = kwargs.get("stderr", True)
|
||||
|
||||
_LOGGER.info("Run command '%s' on %s", command, image)
|
||||
try:
|
||||
@ -112,11 +127,11 @@ class DockerAPI:
|
||||
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't execute command: %s", err)
|
||||
return CommandReturn(None, b"")
|
||||
raise DockerAPIError() from None
|
||||
|
||||
finally:
|
||||
# cleanup container
|
||||
with suppress(docker.errors.DockerException):
|
||||
container.remove(force=True)
|
||||
|
||||
return CommandReturn(result.get('StatusCode'), output)
|
||||
return CommandReturn(result.get("StatusCode"), output)
|
||||
|
@ -1,15 +1,35 @@
|
||||
"""Init file for Hass.io add-on Docker object."""
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import suppress
|
||||
from ipaddress import IPv4Address, ip_address
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Dict, List, Optional, Union, Awaitable
|
||||
|
||||
import docker
|
||||
import requests
|
||||
|
||||
from .interface import DockerInterface
|
||||
from ..addons.build import AddonBuild
|
||||
from ..const import (MAP_CONFIG, MAP_SSL, MAP_ADDONS, MAP_BACKUP, MAP_SHARE,
|
||||
ENV_TOKEN, ENV_TIME, SECURITY_PROFILE, SECURITY_DISABLE)
|
||||
from ..const import (
|
||||
ENV_TIME,
|
||||
ENV_TOKEN,
|
||||
MAP_ADDONS,
|
||||
MAP_BACKUP,
|
||||
MAP_CONFIG,
|
||||
MAP_SHARE,
|
||||
MAP_SSL,
|
||||
SECURITY_DISABLE,
|
||||
SECURITY_PROFILE,
|
||||
)
|
||||
from ..coresys import CoreSys
|
||||
from ..exceptions import DockerAPIError
|
||||
from ..utils import process_lock
|
||||
from .interface import DockerInterface
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..addons.addon import Addon
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@ -19,64 +39,77 @@ AUDIO_DEVICE = "/dev/snd:/dev/snd:rwm"
|
||||
class DockerAddon(DockerInterface):
|
||||
"""Docker Hass.io wrapper for Home Assistant."""
|
||||
|
||||
def __init__(self, coresys, slug):
|
||||
def __init__(self, coresys: CoreSys, slug: str):
|
||||
"""Initialize Docker Home Assistant wrapper."""
|
||||
super().__init__(coresys)
|
||||
self._id = slug
|
||||
self._id: str = slug
|
||||
|
||||
@property
|
||||
def addon(self):
|
||||
def addon(self) -> Addon:
|
||||
"""Return add-on of Docker image."""
|
||||
return self.sys_addons.get(self._id)
|
||||
|
||||
@property
|
||||
def image(self):
|
||||
def image(self) -> str:
|
||||
"""Return name of Docker image."""
|
||||
return self.addon.image
|
||||
|
||||
@property
|
||||
def timeout(self):
|
||||
def ip_address(self) -> IPv4Address:
|
||||
"""Return IP address of this container."""
|
||||
if self.addon.host_network:
|
||||
return self.sys_docker.network.gateway
|
||||
|
||||
# Extract IP-Address
|
||||
try:
|
||||
return ip_address(
|
||||
self._meta["NetworkSettings"]["Networks"]["hassio"]["IPAddress"])
|
||||
except (KeyError, TypeError, ValueError):
|
||||
return ip_address("0.0.0.0")
|
||||
|
||||
@property
|
||||
def timeout(self) -> int:
|
||||
"""Return timeout for Docker actions."""
|
||||
return self.addon.timeout
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
def version(self) -> str:
|
||||
"""Return version of Docker image."""
|
||||
if self.addon.legacy:
|
||||
return self.addon.version_installed
|
||||
return super().version
|
||||
|
||||
@property
|
||||
def arch(self):
|
||||
def arch(self) -> str:
|
||||
"""Return arch of Docker image."""
|
||||
if self.addon.legacy:
|
||||
return self.sys_arch.default
|
||||
return super().arch
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
def name(self) -> str:
|
||||
"""Return name of Docker container."""
|
||||
return "addon_{}".format(self.addon.slug)
|
||||
return f"addon_{self.addon.slug}"
|
||||
|
||||
@property
|
||||
def ipc(self):
|
||||
def ipc(self) -> Optional[str]:
|
||||
"""Return the IPC namespace."""
|
||||
if self.addon.host_ipc:
|
||||
return 'host'
|
||||
return "host"
|
||||
return None
|
||||
|
||||
@property
|
||||
def full_access(self):
|
||||
def full_access(self) -> bool:
|
||||
"""Return True if full access is enabled."""
|
||||
return not self.addon.protected and self.addon.with_full_access
|
||||
|
||||
@property
|
||||
def hostname(self):
|
||||
def hostname(self) -> str:
|
||||
"""Return slug/id of add-on."""
|
||||
return self.addon.slug.replace('_', '-')
|
||||
return self.addon.slug.replace("_", "-")
|
||||
|
||||
@property
|
||||
def environment(self):
|
||||
def environment(self) -> Dict[str, str]:
|
||||
"""Return environment for Docker add-on."""
|
||||
addon_env = self.addon.environment or {}
|
||||
|
||||
@ -86,8 +119,7 @@ class DockerAddon(DockerInterface):
|
||||
if isinstance(value, (int, str)):
|
||||
addon_env[key] = value
|
||||
else:
|
||||
_LOGGER.warning(
|
||||
"Can not set nested option %s as Docker env", key)
|
||||
_LOGGER.warning("Can not set nested option %s as Docker env", key)
|
||||
|
||||
return {
|
||||
**addon_env,
|
||||
@ -96,7 +128,7 @@ class DockerAddon(DockerInterface):
|
||||
}
|
||||
|
||||
@property
|
||||
def devices(self):
|
||||
def devices(self) -> List[str]:
|
||||
"""Return needed devices."""
|
||||
devices = self.addon.devices or []
|
||||
|
||||
@ -113,7 +145,7 @@ class DockerAddon(DockerInterface):
|
||||
return devices or None
|
||||
|
||||
@property
|
||||
def ports(self):
|
||||
def ports(self) -> Optional[Dict[str, Union[str, int, None]]]:
|
||||
"""Filter None from add-on ports."""
|
||||
if not self.addon.ports:
|
||||
return None
|
||||
@ -125,7 +157,7 @@ class DockerAddon(DockerInterface):
|
||||
}
|
||||
|
||||
@property
|
||||
def security_opt(self):
|
||||
def security_opt(self) -> List[str]:
|
||||
"""Controlling security options."""
|
||||
security = []
|
||||
|
||||
@ -143,7 +175,7 @@ class DockerAddon(DockerInterface):
|
||||
return security
|
||||
|
||||
@property
|
||||
def tmpfs(self):
|
||||
def tmpfs(self) -> Optional[Dict[str, str]]:
|
||||
"""Return tmpfs for Docker add-on."""
|
||||
options = self.addon.tmpfs
|
||||
if options:
|
||||
@ -151,156 +183,148 @@ class DockerAddon(DockerInterface):
|
||||
return None
|
||||
|
||||
@property
|
||||
def network_mapping(self):
|
||||
def network_mapping(self) -> Dict[str, str]:
|
||||
"""Return hosts mapping."""
|
||||
return {
|
||||
'homeassistant': self.sys_docker.network.gateway,
|
||||
'hassio': self.sys_docker.network.supervisor,
|
||||
"homeassistant": self.sys_docker.network.gateway,
|
||||
"hassio": self.sys_docker.network.supervisor,
|
||||
}
|
||||
|
||||
@property
|
||||
def network_mode(self):
|
||||
def network_mode(self) -> Optional[str]:
|
||||
"""Return network mode for add-on."""
|
||||
if self.addon.host_network:
|
||||
return 'host'
|
||||
return "host"
|
||||
return None
|
||||
|
||||
@property
|
||||
def pid_mode(self):
|
||||
def pid_mode(self) -> Optional[str]:
|
||||
"""Return PID mode for add-on."""
|
||||
if not self.addon.protected and self.addon.host_pid:
|
||||
return 'host'
|
||||
return "host"
|
||||
return None
|
||||
|
||||
@property
|
||||
def volumes(self):
|
||||
def volumes(self) -> Dict[str, Dict[str, str]]:
|
||||
"""Generate volumes for mappings."""
|
||||
volumes = {
|
||||
str(self.addon.path_extern_data): {
|
||||
'bind': "/data",
|
||||
'mode': 'rw'
|
||||
}
|
||||
}
|
||||
volumes = {str(self.addon.path_extern_data): {"bind": "/data", "mode": "rw"}}
|
||||
|
||||
addon_mapping = self.addon.map_volumes
|
||||
|
||||
# setup config mappings
|
||||
if MAP_CONFIG in addon_mapping:
|
||||
volumes.update({
|
||||
str(self.sys_config.path_extern_homeassistant): {
|
||||
'bind': "/config",
|
||||
'mode': addon_mapping[MAP_CONFIG]
|
||||
volumes.update(
|
||||
{
|
||||
str(self.sys_config.path_extern_homeassistant): {
|
||||
"bind": "/config",
|
||||
"mode": addon_mapping[MAP_CONFIG],
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
if MAP_SSL in addon_mapping:
|
||||
volumes.update({
|
||||
str(self.sys_config.path_extern_ssl): {
|
||||
'bind': "/ssl",
|
||||
'mode': addon_mapping[MAP_SSL]
|
||||
volumes.update(
|
||||
{
|
||||
str(self.sys_config.path_extern_ssl): {
|
||||
"bind": "/ssl",
|
||||
"mode": addon_mapping[MAP_SSL],
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
if MAP_ADDONS in addon_mapping:
|
||||
volumes.update({
|
||||
str(self.sys_config.path_extern_addons_local): {
|
||||
'bind': "/addons",
|
||||
'mode': addon_mapping[MAP_ADDONS]
|
||||
volumes.update(
|
||||
{
|
||||
str(self.sys_config.path_extern_addons_local): {
|
||||
"bind": "/addons",
|
||||
"mode": addon_mapping[MAP_ADDONS],
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
if MAP_BACKUP in addon_mapping:
|
||||
volumes.update({
|
||||
str(self.sys_config.path_extern_backup): {
|
||||
'bind': "/backup",
|
||||
'mode': addon_mapping[MAP_BACKUP]
|
||||
volumes.update(
|
||||
{
|
||||
str(self.sys_config.path_extern_backup): {
|
||||
"bind": "/backup",
|
||||
"mode": addon_mapping[MAP_BACKUP],
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
if MAP_SHARE in addon_mapping:
|
||||
volumes.update({
|
||||
str(self.sys_config.path_extern_share): {
|
||||
'bind': "/share",
|
||||
'mode': addon_mapping[MAP_SHARE]
|
||||
volumes.update(
|
||||
{
|
||||
str(self.sys_config.path_extern_share): {
|
||||
"bind": "/share",
|
||||
"mode": addon_mapping[MAP_SHARE],
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
# Init other hardware mappings
|
||||
|
||||
# GPIO support
|
||||
if self.addon.with_gpio and self.sys_hardware.support_gpio:
|
||||
for gpio_path in ("/sys/class/gpio", "/sys/devices/platform/soc"):
|
||||
volumes.update({
|
||||
gpio_path: {
|
||||
'bind': gpio_path,
|
||||
'mode': 'rw'
|
||||
},
|
||||
})
|
||||
volumes.update({gpio_path: {"bind": gpio_path, "mode": "rw"}})
|
||||
|
||||
# DeviceTree support
|
||||
if self.addon.with_devicetree:
|
||||
volumes.update({
|
||||
"/sys/firmware/devicetree/base": {
|
||||
'bind': "/device-tree",
|
||||
'mode': 'ro'
|
||||
},
|
||||
})
|
||||
volumes.update(
|
||||
{
|
||||
"/sys/firmware/devicetree/base": {
|
||||
"bind": "/device-tree",
|
||||
"mode": "ro",
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# Kernel Modules support
|
||||
if self.addon.with_kernel_modules:
|
||||
volumes.update({
|
||||
"/lib/modules": {
|
||||
'bind': "/lib/modules",
|
||||
'mode': 'ro'
|
||||
},
|
||||
})
|
||||
volumes.update({"/lib/modules": {"bind": "/lib/modules", "mode": "ro"}})
|
||||
|
||||
# Docker API support
|
||||
if not self.addon.protected and self.addon.access_docker_api:
|
||||
volumes.update({
|
||||
"/var/run/docker.sock": {
|
||||
'bind': "/var/run/docker.sock",
|
||||
'mode': 'ro'
|
||||
},
|
||||
})
|
||||
volumes.update(
|
||||
{"/var/run/docker.sock": {"bind": "/var/run/docker.sock", "mode": "ro"}}
|
||||
)
|
||||
|
||||
# Host D-Bus system
|
||||
if self.addon.host_dbus:
|
||||
volumes.update({
|
||||
"/var/run/dbus": {
|
||||
'bind': "/var/run/dbus",
|
||||
'mode': 'rw'
|
||||
}
|
||||
})
|
||||
volumes.update({"/var/run/dbus": {"bind": "/var/run/dbus", "mode": "rw"}})
|
||||
|
||||
# ALSA configuration
|
||||
if self.addon.with_audio:
|
||||
volumes.update({
|
||||
str(self.addon.path_extern_asound): {
|
||||
'bind': "/etc/asound.conf",
|
||||
'mode': 'ro'
|
||||
volumes.update(
|
||||
{
|
||||
str(self.addon.path_extern_asound): {
|
||||
"bind": "/etc/asound.conf",
|
||||
"mode": "ro",
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
return volumes
|
||||
|
||||
def _run(self):
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return True
|
||||
return
|
||||
|
||||
# Security check
|
||||
if not self.addon.protected:
|
||||
_LOGGER.warning("%s run with disabled protected mode!",
|
||||
self.addon.name)
|
||||
_LOGGER.warning("%s run with disabled protected mode!", self.addon.name)
|
||||
|
||||
# cleanup
|
||||
self._stop()
|
||||
# Cleanup
|
||||
with suppress(DockerAPIError):
|
||||
self._stop()
|
||||
|
||||
ret = self.sys_docker.run(
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
self.image,
|
||||
name=self.name,
|
||||
hostname=self.hostname,
|
||||
@ -318,25 +342,23 @@ class DockerAddon(DockerInterface):
|
||||
security_opt=self.security_opt,
|
||||
environment=self.environment,
|
||||
volumes=self.volumes,
|
||||
tmpfs=self.tmpfs)
|
||||
tmpfs=self.tmpfs,
|
||||
)
|
||||
|
||||
if ret:
|
||||
_LOGGER.info("Start Docker add-on %s with version %s", self.image,
|
||||
self.version)
|
||||
_LOGGER.info("Start Docker add-on %s with version %s", self.image, self.version)
|
||||
self._meta = docker_container.attrs
|
||||
|
||||
return ret
|
||||
|
||||
def _install(self, tag, image=None):
|
||||
def _install(self, tag: str, image: Optional[str] = None) -> None:
|
||||
"""Pull Docker image or build it.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self.addon.need_build:
|
||||
return self._build(tag)
|
||||
self._build(tag)
|
||||
|
||||
return super()._install(tag, image)
|
||||
super()._install(tag, image)
|
||||
|
||||
def _build(self, tag):
|
||||
def _build(self, tag: str) -> None:
|
||||
"""Build a Docker container.
|
||||
|
||||
Need run inside executor.
|
||||
@ -346,27 +368,27 @@ class DockerAddon(DockerInterface):
|
||||
_LOGGER.info("Start build %s:%s", self.image, tag)
|
||||
try:
|
||||
image, log = self.sys_docker.images.build(
|
||||
use_config_proxy=False, **build_env.get_docker_args(tag))
|
||||
use_config_proxy=False, **build_env.get_docker_args(tag)
|
||||
)
|
||||
|
||||
_LOGGER.debug("Build %s:%s done: %s", self.image, tag, log)
|
||||
image.tag(self.image, tag='latest')
|
||||
image.tag(self.image, tag="latest")
|
||||
|
||||
# Update meta data
|
||||
self._meta = image.attrs
|
||||
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't build %s:%s: %s", self.image, tag, err)
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
_LOGGER.info("Build %s:%s done", self.image, tag)
|
||||
return True
|
||||
|
||||
@process_lock
|
||||
def export_image(self, path):
|
||||
def export_image(self, tar_file: Path) -> Awaitable[None]:
|
||||
"""Export current images into a tar file."""
|
||||
return self.sys_run_in_executor(self._export_image, path)
|
||||
return self.sys_run_in_executor(self._export_image, tar_file)
|
||||
|
||||
def _export_image(self, tar_file):
|
||||
def _export_image(self, tar_file: Path) -> None:
|
||||
"""Export current images into a tar file.
|
||||
|
||||
Need run inside executor.
|
||||
@ -375,7 +397,7 @@ class DockerAddon(DockerInterface):
|
||||
image = self.sys_docker.api.get_image(self.image)
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't fetch image %s: %s", self.image, err)
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
_LOGGER.info("Export image %s to %s", self.image, tar_file)
|
||||
try:
|
||||
@ -384,17 +406,16 @@ class DockerAddon(DockerInterface):
|
||||
write_tar.write(chunk)
|
||||
except (OSError, requests.exceptions.ReadTimeout) as err:
|
||||
_LOGGER.error("Can't write tar file %s: %s", tar_file, err)
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
_LOGGER.info("Export image %s done", self.image)
|
||||
return True
|
||||
|
||||
@process_lock
|
||||
def import_image(self, path, tag):
|
||||
def import_image(self, tar_file: Path, tag: str) -> Awaitable[None]:
|
||||
"""Import a tar file as image."""
|
||||
return self.sys_run_in_executor(self._import_image, path, tag)
|
||||
return self.sys_run_in_executor(self._import_image, tar_file, tag)
|
||||
|
||||
def _import_image(self, tar_file, tag):
|
||||
def _import_image(self, tar_file: Path, tag: str) -> None:
|
||||
"""Import a tar file as image.
|
||||
|
||||
Need run inside executor.
|
||||
@ -403,37 +424,38 @@ class DockerAddon(DockerInterface):
|
||||
with tar_file.open("rb") as read_tar:
|
||||
self.sys_docker.api.load_image(read_tar, quiet=True)
|
||||
|
||||
image = self.sys_docker.images.get(self.image)
|
||||
image.tag(self.image, tag=tag)
|
||||
docker_image = self.sys_docker.images.get(self.image)
|
||||
docker_image.tag(self.image, tag=tag)
|
||||
except (docker.errors.DockerException, OSError) as err:
|
||||
_LOGGER.error("Can't import image %s: %s", self.image, err)
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
_LOGGER.info("Import image %s and tag %s", tar_file, tag)
|
||||
self._meta = image.attrs
|
||||
self._cleanup()
|
||||
return True
|
||||
self._meta = docker_image.attrs
|
||||
|
||||
with suppress(DockerAPIError):
|
||||
self._cleanup()
|
||||
|
||||
@process_lock
|
||||
def write_stdin(self, data):
|
||||
def write_stdin(self, data: bytes) -> Awaitable[None]:
|
||||
"""Write to add-on stdin."""
|
||||
return self.sys_run_in_executor(self._write_stdin, data)
|
||||
|
||||
def _write_stdin(self, data):
|
||||
def _write_stdin(self, data: bytes) -> None:
|
||||
"""Write to add-on stdin.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if not self._is_running():
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
try:
|
||||
# Load needed docker objects
|
||||
container = self.sys_docker.containers.get(self.name)
|
||||
socket = container.attach_socket(params={'stdin': 1, 'stream': 1})
|
||||
socket = container.attach_socket(params={"stdin": 1, "stream": 1})
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't attach to %s stdin: %s", self.name, err)
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
try:
|
||||
# Write to stdin
|
||||
@ -442,6 +464,4 @@ class DockerAddon(DockerInterface):
|
||||
socket.close()
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't write to %s stdin: %s", self.name, err)
|
||||
return False
|
||||
|
||||
return True
|
||||
raise DockerAPIError() from None
|
||||
|
@ -1,10 +1,14 @@
|
||||
"""Init file for Hass.io Docker object."""
|
||||
from contextlib import suppress
|
||||
from ipaddress import IPv4Address
|
||||
import logging
|
||||
from typing import Awaitable
|
||||
|
||||
import docker
|
||||
|
||||
from .interface import DockerInterface
|
||||
from ..const import ENV_TOKEN, ENV_TIME, LABEL_MACHINE
|
||||
from ..const import ENV_TIME, ENV_TOKEN, LABEL_MACHINE
|
||||
from ..exceptions import DockerAPIError
|
||||
from .interface import CommandReturn, DockerInterface
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@ -39,18 +43,25 @@ class DockerHomeAssistant(DockerInterface):
|
||||
devices.append(f"{device}:{device}:rwm")
|
||||
return devices or None
|
||||
|
||||
def _run(self):
|
||||
@property
|
||||
def ip_address(self) -> IPv4Address:
|
||||
"""Return IP address of this container."""
|
||||
return self.sys_docker.network.gateway
|
||||
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return False
|
||||
return
|
||||
|
||||
# cleanup
|
||||
self._stop()
|
||||
# Cleanup
|
||||
with suppress(DockerAPIError):
|
||||
self._stop()
|
||||
|
||||
ret = self.sys_docker.run(
|
||||
# Create & Run container
|
||||
docker_container = self.sys_docker.run(
|
||||
self.image,
|
||||
name=self.name,
|
||||
hostname=self.name,
|
||||
@ -77,14 +88,10 @@ class DockerHomeAssistant(DockerInterface):
|
||||
},
|
||||
)
|
||||
|
||||
if ret:
|
||||
_LOGGER.info(
|
||||
"Start homeassistant %s with version %s", self.image, self.version
|
||||
)
|
||||
_LOGGER.info("Start homeassistant %s with version %s", self.image, self.version)
|
||||
self._meta = docker_container.attrs
|
||||
|
||||
return ret
|
||||
|
||||
def _execute_command(self, command):
|
||||
def _execute_command(self, command: str) -> CommandReturn:
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
@ -112,11 +119,11 @@ class DockerHomeAssistant(DockerInterface):
|
||||
},
|
||||
)
|
||||
|
||||
def is_initialize(self):
|
||||
def is_initialize(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker container exists."""
|
||||
return self.sys_run_in_executor(self._is_initialize)
|
||||
|
||||
def _is_initialize(self):
|
||||
def _is_initialize(self) -> bool:
|
||||
"""Return True if docker container exists.
|
||||
|
||||
Need run inside executor.
|
||||
|
@ -2,13 +2,16 @@
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
from typing import Any, Dict, Optional, Awaitable
|
||||
|
||||
import docker
|
||||
|
||||
from ..const import LABEL_ARCH, LABEL_VERSION
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import DockerAPIError
|
||||
from ..utils import process_lock
|
||||
from .stats import DockerStats
|
||||
from . import CommandReturn
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@ -16,60 +19,60 @@ _LOGGER = logging.getLogger(__name__)
|
||||
class DockerInterface(CoreSysAttributes):
|
||||
"""Docker Hass.io interface."""
|
||||
|
||||
def __init__(self, coresys):
|
||||
def __init__(self, coresys: CoreSys):
|
||||
"""Initialize Docker base wrapper."""
|
||||
self.coresys = coresys
|
||||
self._meta = None
|
||||
self.lock = asyncio.Lock(loop=coresys.loop)
|
||||
self.coresys: CoreSys = coresys
|
||||
self._meta: Optional[Dict[str, Any]] = None
|
||||
self.lock: asyncio.Lock = asyncio.Lock(loop=coresys.loop)
|
||||
|
||||
@property
|
||||
def timeout(self):
|
||||
def timeout(self) -> str:
|
||||
"""Return timeout for Docker actions."""
|
||||
return 30
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
def name(self) -> Optional[str]:
|
||||
"""Return name of Docker container."""
|
||||
return None
|
||||
|
||||
@property
|
||||
def meta_config(self):
|
||||
def meta_config(self) -> Dict[str, Any]:
|
||||
"""Return meta data of configuration for container/image."""
|
||||
if not self._meta:
|
||||
return {}
|
||||
return self._meta.get("Config", {})
|
||||
|
||||
@property
|
||||
def meta_labels(self):
|
||||
def meta_labels(self) -> Dict[str, str]:
|
||||
"""Return meta data of labels for container/image."""
|
||||
return self.meta_config.get("Labels") or {}
|
||||
|
||||
@property
|
||||
def image(self):
|
||||
def image(self) -> Optional[str]:
|
||||
"""Return name of Docker image."""
|
||||
return self.meta_config.get("Image")
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
def version(self) -> Optional[str]:
|
||||
"""Return version of Docker image."""
|
||||
return self.meta_labels.get(LABEL_VERSION)
|
||||
|
||||
@property
|
||||
def arch(self):
|
||||
def arch(self) -> Optional[str]:
|
||||
"""Return arch of Docker image."""
|
||||
return self.meta_labels.get(LABEL_ARCH)
|
||||
|
||||
@property
|
||||
def in_progress(self):
|
||||
def in_progress(self) -> bool:
|
||||
"""Return True if a task is in progress."""
|
||||
return self.lock.locked()
|
||||
|
||||
@process_lock
|
||||
def install(self, tag, image=None):
|
||||
def install(self, tag: str, image: Optional[str] = None):
|
||||
"""Pull docker image."""
|
||||
return self.sys_run_in_executor(self._install, tag, image)
|
||||
|
||||
def _install(self, tag, image=None):
|
||||
def _install(self, tag: str, image: Optional[str] = None) -> None:
|
||||
"""Pull Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
@ -80,20 +83,19 @@ class DockerInterface(CoreSysAttributes):
|
||||
_LOGGER.info("Pull image %s tag %s.", image, tag)
|
||||
docker_image = self.sys_docker.images.pull(f"{image}:{tag}")
|
||||
|
||||
_LOGGER.info("Tag image %s with version %s as latest", image, tag)
|
||||
docker_image.tag(image, tag="latest")
|
||||
self._meta = docker_image.attrs
|
||||
except docker.errors.APIError as err:
|
||||
_LOGGER.error("Can't install %s:%s -> %s.", image, tag, err)
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
else:
|
||||
self._meta = docker_image.attrs
|
||||
|
||||
_LOGGER.info("Tag image %s with version %s as latest", image, tag)
|
||||
return True
|
||||
|
||||
def exists(self):
|
||||
def exists(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker image exists in local repository."""
|
||||
return self.sys_run_in_executor(self._exists)
|
||||
|
||||
def _exists(self):
|
||||
def _exists(self) -> bool:
|
||||
"""Return True if Docker image exists in local repository.
|
||||
|
||||
Need run inside executor.
|
||||
@ -106,14 +108,14 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
return True
|
||||
|
||||
def is_running(self):
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker is running.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.sys_run_in_executor(self._is_running)
|
||||
|
||||
def _is_running(self):
|
||||
def _is_running(self) -> bool:
|
||||
"""Return True if Docker is running.
|
||||
|
||||
Need run inside executor.
|
||||
@ -139,7 +141,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
"""Attach to running Docker container."""
|
||||
return self.sys_run_in_executor(self._attach)
|
||||
|
||||
def _attach(self):
|
||||
def _attach(self) -> None:
|
||||
"""Attach to running docker container.
|
||||
|
||||
Need run inside executor.
|
||||
@ -147,21 +149,21 @@ class DockerInterface(CoreSysAttributes):
|
||||
try:
|
||||
if self.image:
|
||||
self._meta = self.sys_docker.images.get(self.image).attrs
|
||||
else:
|
||||
self._meta = self.sys_docker.containers.get(self.name).attrs
|
||||
self._meta = self.sys_docker.containers.get(self.name).attrs
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
pass
|
||||
|
||||
_LOGGER.info("Attach to image %s with version %s", self.image, self.version)
|
||||
|
||||
return True
|
||||
# Successfull?
|
||||
if not self._meta:
|
||||
raise DockerAPIError() from None
|
||||
_LOGGER.info("Attach to %s with version %s", self.image, self.version)
|
||||
|
||||
@process_lock
|
||||
def run(self):
|
||||
def run(self) -> Awaitable[None]:
|
||||
"""Run Docker image."""
|
||||
return self.sys_run_in_executor(self._run)
|
||||
|
||||
def _run(self):
|
||||
def _run(self) -> None:
|
||||
"""Run Docker image.
|
||||
|
||||
Need run inside executor.
|
||||
@ -169,11 +171,11 @@ class DockerInterface(CoreSysAttributes):
|
||||
raise NotImplementedError()
|
||||
|
||||
@process_lock
|
||||
def stop(self, remove_container=True):
|
||||
def stop(self, remove_container=True) -> Awaitable[None]:
|
||||
"""Stop/remove Docker container."""
|
||||
return self.sys_run_in_executor(self._stop, remove_container)
|
||||
|
||||
def _stop(self, remove_container=True):
|
||||
def _stop(self, remove_container=True) -> None:
|
||||
"""Stop/remove Docker container.
|
||||
|
||||
Need run inside executor.
|
||||
@ -181,26 +183,24 @@ class DockerInterface(CoreSysAttributes):
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
if docker_container.status == "running":
|
||||
_LOGGER.info("Stop %s Docker application", self.image)
|
||||
_LOGGER.info("Stop %s application", self.name)
|
||||
with suppress(docker.errors.DockerException):
|
||||
docker_container.stop(timeout=self.timeout)
|
||||
|
||||
if remove_container:
|
||||
with suppress(docker.errors.DockerException):
|
||||
_LOGGER.info("Clean %s Docker application", self.image)
|
||||
_LOGGER.info("Clean %s application", self.name)
|
||||
docker_container.remove(force=True)
|
||||
|
||||
return True
|
||||
|
||||
@process_lock
|
||||
def start(self):
|
||||
def start(self) -> Awaitable[None]:
|
||||
"""Start Docker container."""
|
||||
return self.sys_run_in_executor(self._start)
|
||||
|
||||
def _start(self):
|
||||
def _start(self) -> None:
|
||||
"""Start docker container.
|
||||
|
||||
Need run inside executor.
|
||||
@ -208,31 +208,30 @@ class DockerInterface(CoreSysAttributes):
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
_LOGGER.info("Start %s", self.image)
|
||||
try:
|
||||
docker_container.start()
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't start %s: %s", self.image, err)
|
||||
return False
|
||||
|
||||
return True
|
||||
raise DockerAPIError() from None
|
||||
|
||||
@process_lock
|
||||
def remove(self):
|
||||
def remove(self) -> Awaitable[None]:
|
||||
"""Remove Docker images."""
|
||||
return self.sys_run_in_executor(self._remove)
|
||||
|
||||
def _remove(self):
|
||||
def _remove(self) -> None:
|
||||
"""remove docker images.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
# Cleanup container
|
||||
self._stop()
|
||||
with suppress(DockerAPIError):
|
||||
self._stop()
|
||||
|
||||
_LOGGER.info("Remove Docker %s with latest and %s", self.image, self.version)
|
||||
_LOGGER.info("Remove image %s with latest and %s", self.image, self.version)
|
||||
|
||||
try:
|
||||
with suppress(docker.errors.ImageNotFound):
|
||||
@ -245,17 +244,16 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.warning("Can't remove image %s: %s", self.image, err)
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
self._meta = None
|
||||
return True
|
||||
|
||||
@process_lock
|
||||
def update(self, tag, image=None):
|
||||
def update(self, tag: str, image: Optional[str] = None) -> Awaitable[None]:
|
||||
"""Update a Docker image."""
|
||||
return self.sys_run_in_executor(self._update, tag, image)
|
||||
|
||||
def _update(self, tag, image=None):
|
||||
def _update(self, tag: str, image: Optional[str] = None) -> None:
|
||||
"""Update a docker image.
|
||||
|
||||
Need run inside executor.
|
||||
@ -263,27 +261,27 @@ class DockerInterface(CoreSysAttributes):
|
||||
image = image or self.image
|
||||
|
||||
_LOGGER.info(
|
||||
"Update Docker %s:%s to %s:%s", self.image, self.version, image, tag
|
||||
"Update image %s:%s to %s:%s", self.image, self.version, image, tag
|
||||
)
|
||||
|
||||
# Update docker image
|
||||
if not self._install(tag, image):
|
||||
return False
|
||||
self._install(tag, image)
|
||||
|
||||
# Stop container & cleanup
|
||||
self._stop()
|
||||
self._cleanup()
|
||||
with suppress(DockerAPIError):
|
||||
try:
|
||||
self._stop()
|
||||
finally:
|
||||
self._cleanup()
|
||||
|
||||
return True
|
||||
|
||||
def logs(self):
|
||||
def logs(self) -> Awaitable[bytes]:
|
||||
"""Return Docker logs of container.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.sys_run_in_executor(self._logs)
|
||||
|
||||
def _logs(self):
|
||||
def _logs(self) -> bytes:
|
||||
"""Return Docker logs of container.
|
||||
|
||||
Need run inside executor.
|
||||
@ -299,11 +297,11 @@ class DockerInterface(CoreSysAttributes):
|
||||
_LOGGER.warning("Can't grep logs from %s: %s", self.image, err)
|
||||
|
||||
@process_lock
|
||||
def cleanup(self):
|
||||
def cleanup(self) -> Awaitable[None]:
|
||||
"""Check if old version exists and cleanup."""
|
||||
return self.sys_run_in_executor(self._cleanup)
|
||||
|
||||
def _cleanup(self):
|
||||
def _cleanup(self) -> None:
|
||||
"""Check if old version exists and cleanup.
|
||||
|
||||
Need run inside executor.
|
||||
@ -312,24 +310,22 @@ class DockerInterface(CoreSysAttributes):
|
||||
latest = self.sys_docker.images.get(self.image)
|
||||
except docker.errors.DockerException:
|
||||
_LOGGER.warning("Can't find %s for cleanup", self.image)
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
for image in self.sys_docker.images.list(name=self.image):
|
||||
if latest.id == image.id:
|
||||
continue
|
||||
|
||||
with suppress(docker.errors.DockerException):
|
||||
_LOGGER.info("Cleanup Docker images: %s", image.tags)
|
||||
_LOGGER.info("Cleanup images: %s", image.tags)
|
||||
self.sys_docker.images.remove(image.id, force=True)
|
||||
|
||||
return True
|
||||
|
||||
@process_lock
|
||||
def restart(self):
|
||||
def restart(self) -> Awaitable[None]:
|
||||
"""Restart docker container."""
|
||||
return self.sys_loop.run_in_executor(None, self._restart)
|
||||
|
||||
def _restart(self):
|
||||
def _restart(self) -> None:
|
||||
"""Restart docker container.
|
||||
|
||||
Need run inside executor.
|
||||
@ -337,33 +333,32 @@ class DockerInterface(CoreSysAttributes):
|
||||
try:
|
||||
container = self.sys_docker.containers.get(self.name)
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
_LOGGER.info("Restart %s", self.image)
|
||||
try:
|
||||
container.restart(timeout=self.timeout)
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.warning("Can't restart %s: %s", self.image, err)
|
||||
return False
|
||||
return True
|
||||
raise DockerAPIError() from None
|
||||
|
||||
@process_lock
|
||||
def execute_command(self, command):
|
||||
def execute_command(self, command: str) -> Awaitable[CommandReturn]:
|
||||
"""Create a temporary container and run command."""
|
||||
return self.sys_run_in_executor(self._execute_command, command)
|
||||
|
||||
def _execute_command(self, command):
|
||||
def _execute_command(self, command: str) -> CommandReturn:
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def stats(self):
|
||||
def stats(self) -> Awaitable[DockerStats]:
|
||||
"""Read and return stats from container."""
|
||||
return self.sys_run_in_executor(self._stats)
|
||||
|
||||
def _stats(self):
|
||||
def _stats(self) -> DockerStats:
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
@ -371,23 +366,23 @@ class DockerInterface(CoreSysAttributes):
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except docker.errors.DockerException:
|
||||
return None
|
||||
raise DockerAPIError() from None
|
||||
|
||||
try:
|
||||
stats = docker_container.stats(stream=False)
|
||||
return DockerStats(stats)
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't read stats from %s: %s", self.name, err)
|
||||
return None
|
||||
raise DockerAPIError() from None
|
||||
|
||||
def is_fails(self):
|
||||
def is_fails(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker is failing state.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.sys_run_in_executor(self._is_fails)
|
||||
|
||||
def _is_fails(self):
|
||||
def _is_fails(self) -> bool:
|
||||
"""Return True if Docker is failing state.
|
||||
|
||||
Need run inside executor.
|
||||
|
@ -1,9 +1,12 @@
|
||||
"""Internal network manager for Hass.io."""
|
||||
from ipaddress import IPv4Address
|
||||
import logging
|
||||
from typing import List, Optional
|
||||
|
||||
import docker
|
||||
|
||||
from ..const import DOCKER_NETWORK_MASK, DOCKER_NETWORK, DOCKER_NETWORK_RANGE
|
||||
from ..const import DOCKER_NETWORK, DOCKER_NETWORK_MASK, DOCKER_NETWORK_RANGE
|
||||
from ..exceptions import DockerAPIError
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@ -14,32 +17,32 @@ class DockerNetwork:
|
||||
This class is not AsyncIO safe!
|
||||
"""
|
||||
|
||||
def __init__(self, dock):
|
||||
def __init__(self, docker_client: docker.DockerClient):
|
||||
"""Initialize internal Hass.io network."""
|
||||
self.docker = dock
|
||||
self.network = self._get_network()
|
||||
self.docker: docker.DockerClient = docker_client
|
||||
self.network: docker.models.networks.Network = self._get_network()
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
def name(self) -> str:
|
||||
"""Return name of network."""
|
||||
return DOCKER_NETWORK
|
||||
|
||||
@property
|
||||
def containers(self):
|
||||
def containers(self) -> List[docker.models.containers.Container]:
|
||||
"""Return of connected containers from network."""
|
||||
return self.network.containers
|
||||
|
||||
@property
|
||||
def gateway(self):
|
||||
def gateway(self) -> IPv4Address:
|
||||
"""Return gateway of the network."""
|
||||
return DOCKER_NETWORK_MASK[1]
|
||||
|
||||
@property
|
||||
def supervisor(self):
|
||||
def supervisor(self) -> IPv4Address:
|
||||
"""Return supervisor of the network."""
|
||||
return DOCKER_NETWORK_MASK[2]
|
||||
|
||||
def _get_network(self):
|
||||
def _get_network(self) -> docker.models.networks.Network:
|
||||
"""Get HassIO network."""
|
||||
try:
|
||||
return self.docker.networks.get(DOCKER_NETWORK)
|
||||
@ -49,18 +52,25 @@ class DockerNetwork:
|
||||
ipam_pool = docker.types.IPAMPool(
|
||||
subnet=str(DOCKER_NETWORK_MASK),
|
||||
gateway=str(self.gateway),
|
||||
iprange=str(DOCKER_NETWORK_RANGE)
|
||||
iprange=str(DOCKER_NETWORK_RANGE),
|
||||
)
|
||||
|
||||
ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
|
||||
|
||||
return self.docker.networks.create(
|
||||
DOCKER_NETWORK, driver='bridge', ipam=ipam_config,
|
||||
enable_ipv6=False, options={
|
||||
"com.docker.network.bridge.name": DOCKER_NETWORK,
|
||||
})
|
||||
DOCKER_NETWORK,
|
||||
driver="bridge",
|
||||
ipam=ipam_config,
|
||||
enable_ipv6=False,
|
||||
options={"com.docker.network.bridge.name": DOCKER_NETWORK},
|
||||
)
|
||||
|
||||
def attach_container(self, container, alias=None, ipv4=None):
|
||||
def attach_container(
|
||||
self,
|
||||
container: docker.models.containers.Container,
|
||||
alias: Optional[List[str]] = None,
|
||||
ipv4: Optional[IPv4Address] = None,
|
||||
) -> None:
|
||||
"""Attach container to Hass.io network.
|
||||
|
||||
Need run inside executor.
|
||||
@ -71,23 +81,24 @@ class DockerNetwork:
|
||||
self.network.connect(container, aliases=alias, ipv4_address=ipv4)
|
||||
except docker.errors.APIError as err:
|
||||
_LOGGER.error("Can't link container to hassio-net: %s", err)
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
self.network.reload()
|
||||
return True
|
||||
|
||||
def detach_default_bridge(self, container):
|
||||
def detach_default_bridge(
|
||||
self, container: docker.models.containers.Container
|
||||
) -> None:
|
||||
"""Detach default Docker bridge.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
default_network = self.docker.networks.get('bridge')
|
||||
default_network = self.docker.networks.get("bridge")
|
||||
default_network.disconnect(container)
|
||||
|
||||
except docker.errors.NotFound:
|
||||
return
|
||||
|
||||
except docker.errors.APIError as err:
|
||||
_LOGGER.warning(
|
||||
"Can't disconnect container from default: %s", err)
|
||||
_LOGGER.warning("Can't disconnect container from default: %s", err)
|
||||
raise DockerAPIError() from None
|
||||
|
@ -1,11 +1,13 @@
|
||||
"""Init file for Hass.io Docker object."""
|
||||
from ipaddress import IPv4Address
|
||||
import logging
|
||||
import os
|
||||
|
||||
import docker
|
||||
|
||||
from .interface import DockerInterface
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerAPIError
|
||||
from .interface import DockerInterface
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@ -14,29 +16,36 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
"""Docker Hass.io wrapper for Supervisor."""
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
def name(self) -> str:
|
||||
"""Return name of Docker container."""
|
||||
return os.environ['SUPERVISOR_NAME']
|
||||
return os.environ["SUPERVISOR_NAME"]
|
||||
|
||||
def _attach(self):
|
||||
@property
|
||||
def ip_address(self) -> IPv4Address:
|
||||
"""Return IP address of this container."""
|
||||
return self.sys_docker.network.supervisor
|
||||
|
||||
def _attach(self) -> None:
|
||||
"""Attach to running docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
container = self.sys_docker.containers.get(self.name)
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
raise DockerAPIError() from None
|
||||
|
||||
self._meta = container.attrs
|
||||
_LOGGER.info("Attach to Supervisor %s with version %s",
|
||||
self.image, self.version)
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
"Attach to Supervisor %s with version %s", self.image, self.version
|
||||
)
|
||||
|
||||
# If already attach
|
||||
if container in self.sys_docker.network.containers:
|
||||
return True
|
||||
if docker_container in self.sys_docker.network.containers:
|
||||
return
|
||||
|
||||
# Attach to network
|
||||
return self.sys_docker.network.attach_container(
|
||||
container, alias=['hassio'],
|
||||
ipv4=self.sys_docker.network.supervisor)
|
||||
_LOGGER.info("Connect Supervisor to Hass.io Network")
|
||||
self.sys_docker.network.attach_container(
|
||||
docker_container, alias=["hassio"], ipv4=self.sys_docker.network.supervisor
|
||||
)
|
||||
|
@ -28,6 +28,17 @@ class HomeAssistantAuthError(HomeAssistantAPIError):
|
||||
"""Home Assistant Auth API exception."""
|
||||
|
||||
|
||||
# Supervisor
|
||||
|
||||
|
||||
class SupervisorError(HassioError):
|
||||
"""Supervisor error."""
|
||||
|
||||
|
||||
class SupervisorUpdateError(SupervisorError):
|
||||
"""Supervisor update error."""
|
||||
|
||||
|
||||
# HassOS
|
||||
|
||||
|
||||
@ -43,6 +54,17 @@ class HassOSNotSupportedError(HassioNotSupportedError):
|
||||
"""Function not supported by HassOS."""
|
||||
|
||||
|
||||
# Addons
|
||||
|
||||
|
||||
class AddonsError(HassioError):
|
||||
"""Addons exception."""
|
||||
|
||||
|
||||
class AddonsNotSupportedError(HassioNotSupportedError):
|
||||
"""Addons don't support a function."""
|
||||
|
||||
|
||||
# Arch
|
||||
|
||||
|
||||
@ -144,3 +166,10 @@ class AppArmorInvalidError(AppArmorError):
|
||||
|
||||
class JsonFileError(HassioError):
|
||||
"""Invalid json file."""
|
||||
|
||||
|
||||
# docker/api
|
||||
|
||||
|
||||
class DockerAPIError(HassioError):
|
||||
"""Docker API error."""
|
||||
|
@ -1,15 +1,22 @@
|
||||
"""HassOS support on supervisor."""
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Awaitable, Optional
|
||||
|
||||
import aiohttp
|
||||
from cpe import CPE
|
||||
|
||||
from .coresys import CoreSysAttributes
|
||||
from .const import URL_HASSOS_OTA
|
||||
from .coresys import CoreSysAttributes, CoreSys
|
||||
from .docker.hassos_cli import DockerHassOSCli
|
||||
from .exceptions import HassOSNotSupportedError, HassOSUpdateError, DBusError
|
||||
from .exceptions import (
|
||||
DBusError,
|
||||
HassOSNotSupportedError,
|
||||
HassOSUpdateError,
|
||||
DockerAPIError,
|
||||
)
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@ -17,61 +24,61 @@ _LOGGER = logging.getLogger(__name__)
|
||||
class HassOS(CoreSysAttributes):
|
||||
"""HassOS interface inside HassIO."""
|
||||
|
||||
def __init__(self, coresys):
|
||||
def __init__(self, coresys: CoreSys):
|
||||
"""Initialize HassOS handler."""
|
||||
self.coresys = coresys
|
||||
self.instance = DockerHassOSCli(coresys)
|
||||
self._available = False
|
||||
self._version = None
|
||||
self._board = None
|
||||
self.coresys: CoreSys = coresys
|
||||
self.instance: DockerHassOSCli = DockerHassOSCli(coresys)
|
||||
self._available: bool = False
|
||||
self._version: Optional[str] = None
|
||||
self._board: Optional[str] = None
|
||||
|
||||
@property
|
||||
def available(self):
|
||||
def available(self) -> bool:
|
||||
"""Return True, if HassOS on host."""
|
||||
return self._available
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
def version(self) -> Optional[str]:
|
||||
"""Return version of HassOS."""
|
||||
return self._version
|
||||
|
||||
@property
|
||||
def version_cli(self):
|
||||
def version_cli(self) -> Optional[str]:
|
||||
"""Return version of HassOS cli."""
|
||||
return self.instance.version
|
||||
|
||||
@property
|
||||
def version_latest(self):
|
||||
def version_latest(self) -> str:
|
||||
"""Return version of HassOS."""
|
||||
return self.sys_updater.version_hassos
|
||||
|
||||
@property
|
||||
def version_cli_latest(self):
|
||||
def version_cli_latest(self) -> str:
|
||||
"""Return version of HassOS."""
|
||||
return self.sys_updater.version_hassos_cli
|
||||
|
||||
@property
|
||||
def need_update(self):
|
||||
def need_update(self) -> bool:
|
||||
"""Return true if a HassOS update is available."""
|
||||
return self.version != self.version_latest
|
||||
|
||||
@property
|
||||
def need_cli_update(self):
|
||||
def need_cli_update(self) -> bool:
|
||||
"""Return true if a HassOS cli update is available."""
|
||||
return self.version_cli != self.version_cli_latest
|
||||
|
||||
@property
|
||||
def board(self):
|
||||
def board(self) -> Optional[str]:
|
||||
"""Return board name."""
|
||||
return self._board
|
||||
|
||||
def _check_host(self):
|
||||
def _check_host(self) -> None:
|
||||
"""Check if HassOS is available."""
|
||||
if not self.available:
|
||||
_LOGGER.error("No HassOS available")
|
||||
raise HassOSNotSupportedError()
|
||||
|
||||
async def _download_raucb(self, version):
|
||||
async def _download_raucb(self, version: str) -> None:
|
||||
"""Download rauc bundle (OTA) from github."""
|
||||
url = URL_HASSOS_OTA.format(version=version, board=self.board)
|
||||
raucb = Path(self.sys_config.path_tmp, f"hassos-{version}.raucb")
|
||||
@ -83,9 +90,9 @@ class HassOS(CoreSysAttributes):
|
||||
raise HassOSUpdateError()
|
||||
|
||||
# Download RAUCB file
|
||||
with raucb.open('wb') as ota_file:
|
||||
with raucb.open("wb") as ota_file:
|
||||
while True:
|
||||
chunk = await request.content.read(1048576)
|
||||
chunk = await request.content.read(1_048_576)
|
||||
if not chunk:
|
||||
break
|
||||
ota_file.write(chunk)
|
||||
@ -101,7 +108,7 @@ class HassOS(CoreSysAttributes):
|
||||
|
||||
raise HassOSUpdateError()
|
||||
|
||||
async def load(self):
|
||||
async def load(self) -> None:
|
||||
"""Load HassOS data."""
|
||||
try:
|
||||
# Check needed host functions
|
||||
@ -111,7 +118,7 @@ class HassOS(CoreSysAttributes):
|
||||
|
||||
assert self.sys_host.info.cpe is not None
|
||||
cpe = CPE(self.sys_host.info.cpe)
|
||||
assert cpe.get_product()[0] == 'hassos'
|
||||
assert cpe.get_product()[0] == "hassos"
|
||||
except (AssertionError, NotImplementedError):
|
||||
_LOGGER.debug("Found no HassOS")
|
||||
return
|
||||
@ -122,9 +129,10 @@ class HassOS(CoreSysAttributes):
|
||||
self._board = cpe.get_target_hardware()[0]
|
||||
|
||||
_LOGGER.info("Detect HassOS %s on host system", self.version)
|
||||
await self.instance.attach()
|
||||
with suppress(DockerAPIError):
|
||||
await self.instance.attach()
|
||||
|
||||
def config_sync(self):
|
||||
def config_sync(self) -> Awaitable[None]:
|
||||
"""Trigger a host config reload from usb.
|
||||
|
||||
Return a coroutine.
|
||||
@ -132,9 +140,9 @@ class HassOS(CoreSysAttributes):
|
||||
self._check_host()
|
||||
|
||||
_LOGGER.info("Syncing configuration from USB with HassOS.")
|
||||
return self.sys_host.services.restart('hassos-config.service')
|
||||
return self.sys_host.services.restart("hassos-config.service")
|
||||
|
||||
async def update(self, version=None):
|
||||
async def update(self, version: Optional[str] = None) -> None:
|
||||
"""Update HassOS system."""
|
||||
version = version or self.version_latest
|
||||
|
||||
@ -167,20 +175,19 @@ class HassOS(CoreSysAttributes):
|
||||
|
||||
# Update fails
|
||||
rauc_status = await self.sys_dbus.get_properties()
|
||||
_LOGGER.error(
|
||||
"HassOS update fails with: %s", rauc_status.get('LastError'))
|
||||
_LOGGER.error("HassOS update fails with: %s", rauc_status.get("LastError"))
|
||||
raise HassOSUpdateError()
|
||||
|
||||
async def update_cli(self, version=None):
|
||||
async def update_cli(self, version: Optional[str] = None) -> None:
|
||||
"""Update local HassOS cli."""
|
||||
version = version or self.version_cli_latest
|
||||
|
||||
if version == self.version_cli:
|
||||
_LOGGER.warning("Version %s is already installed for CLI", version)
|
||||
raise HassOSUpdateError()
|
||||
|
||||
if await self.instance.update(version):
|
||||
return
|
||||
|
||||
_LOGGER.error("HassOS CLI update fails")
|
||||
raise HassOSUpdateError()
|
||||
try:
|
||||
await self.instance.update(version)
|
||||
except DockerAPIError:
|
||||
_LOGGER.error("HassOS CLI update fails")
|
||||
raise HassOSUpdateError() from None
|
||||
|
@ -7,9 +7,10 @@ import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import re
|
||||
import secrets
|
||||
import socket
|
||||
import time
|
||||
from typing import Any, AsyncContextManager, Coroutine, Dict, Optional
|
||||
from typing import Any, AsyncContextManager, Awaitable, Dict, Optional
|
||||
from uuid import UUID
|
||||
|
||||
import aiohttp
|
||||
@ -33,13 +34,15 @@ from .const import (
|
||||
)
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .docker.homeassistant import DockerHomeAssistant
|
||||
from .docker.stats import DockerStats
|
||||
from .exceptions import (
|
||||
DockerAPIError,
|
||||
HomeAssistantAPIError,
|
||||
HomeAssistantAuthError,
|
||||
HomeAssistantError,
|
||||
HomeAssistantUpdateError,
|
||||
)
|
||||
from .utils import convert_to_ascii, create_token, process_lock
|
||||
from .utils import convert_to_ascii, process_lock
|
||||
from .utils.json import JsonConfig
|
||||
from .validate import SCHEMA_HASS_CONFIG
|
||||
|
||||
@ -72,7 +75,8 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Prepare Home Assistant object."""
|
||||
if await self.instance.attach():
|
||||
with suppress(DockerAPIError):
|
||||
await self.instance.attach()
|
||||
return
|
||||
|
||||
_LOGGER.info("No Home Assistant Docker image %s found.", self.image)
|
||||
@ -94,9 +98,9 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
|
||||
return self._error_state
|
||||
|
||||
@property
|
||||
def api_ip(self) -> IPv4Address:
|
||||
def ip_address(self) -> IPv4Address:
|
||||
"""Return IP of Home Assistant instance."""
|
||||
return self.sys_docker.network.gateway
|
||||
return self.instance.ip_address
|
||||
|
||||
@property
|
||||
def api_port(self) -> int:
|
||||
@ -132,7 +136,7 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
|
||||
def api_url(self) -> str:
|
||||
"""Return API url to Home Assistant."""
|
||||
return "{}://{}:{}".format('https' if self.api_ssl else 'http',
|
||||
self.api_ip, self.api_port)
|
||||
self.ip_address, self.api_port)
|
||||
|
||||
@property
|
||||
def watchdog(self) -> bool:
|
||||
@ -230,8 +234,9 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
|
||||
"""Install a landing page."""
|
||||
_LOGGER.info("Setup HomeAssistant landingpage")
|
||||
while True:
|
||||
if await self.instance.install('landingpage'):
|
||||
break
|
||||
with suppress(DockerAPIError):
|
||||
await self.instance.install('landingpage')
|
||||
return
|
||||
_LOGGER.warning("Fails install landingpage, retry after 30sec")
|
||||
await asyncio.sleep(30)
|
||||
|
||||
@ -245,8 +250,10 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
|
||||
await self.sys_updater.reload()
|
||||
|
||||
tag = self.last_version
|
||||
if tag and await self.instance.install(tag):
|
||||
break
|
||||
if tag:
|
||||
with suppress(DockerAPIError):
|
||||
await self.instance.install(tag)
|
||||
break
|
||||
_LOGGER.warning("Error on install Home Assistant. Retry in 30sec")
|
||||
await asyncio.sleep(30)
|
||||
|
||||
@ -260,7 +267,8 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
|
||||
except HomeAssistantError:
|
||||
_LOGGER.error("Can't start Home Assistant!")
|
||||
finally:
|
||||
await self.instance.cleanup()
|
||||
with suppress(DockerAPIError):
|
||||
await self.instance.cleanup()
|
||||
|
||||
@process_lock
|
||||
async def update(self, version=None) -> None:
|
||||
@ -272,14 +280,17 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
|
||||
|
||||
if exists and version == self.instance.version:
|
||||
_LOGGER.warning("Version %s is already installed", version)
|
||||
return HomeAssistantUpdateError()
|
||||
return
|
||||
|
||||
# process an update
|
||||
async def _update(to_version):
|
||||
"""Run Home Assistant update."""
|
||||
_LOGGER.info("Update Home Assistant to version %s", to_version)
|
||||
if not await self.instance.update(to_version):
|
||||
raise HomeAssistantUpdateError()
|
||||
try:
|
||||
await self.instance.update(to_version)
|
||||
except DockerAPIError:
|
||||
_LOGGER.warning("Update Home Assistant image fails")
|
||||
raise HomeAssistantUpdateError() from None
|
||||
|
||||
if running:
|
||||
await self._start()
|
||||
@ -304,70 +315,84 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
|
||||
return
|
||||
|
||||
# Create new API token
|
||||
self._data[ATTR_ACCESS_TOKEN] = create_token()
|
||||
self._data[ATTR_ACCESS_TOKEN] = secrets.token_hex(56)
|
||||
self.save_data()
|
||||
|
||||
if not await self.instance.run():
|
||||
raise HomeAssistantError()
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError:
|
||||
raise HomeAssistantError() from None
|
||||
await self._block_till_run()
|
||||
|
||||
@process_lock
|
||||
async def start(self) -> None:
|
||||
"""Run Home Assistant docker."""
|
||||
if await self.instance.is_running():
|
||||
await self.instance.restart()
|
||||
elif await self.instance.is_initialize():
|
||||
await self.instance.start()
|
||||
else:
|
||||
await self._start()
|
||||
return
|
||||
try:
|
||||
if await self.instance.is_running():
|
||||
await self.instance.restart()
|
||||
elif await self.instance.is_initialize():
|
||||
await self.instance.start()
|
||||
else:
|
||||
await self._start()
|
||||
return
|
||||
|
||||
await self._block_till_run()
|
||||
await self._block_till_run()
|
||||
except DockerAPIError:
|
||||
raise HomeAssistantError() from None
|
||||
|
||||
@process_lock
|
||||
def stop(self) -> Coroutine:
|
||||
async def stop(self) -> None:
|
||||
"""Stop Home Assistant Docker.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.stop(remove_container=False)
|
||||
try:
|
||||
return await self.instance.stop(remove_container=False)
|
||||
except DockerAPIError:
|
||||
raise HomeAssistantError() from None
|
||||
|
||||
@process_lock
|
||||
async def restart(self) -> None:
|
||||
"""Restart Home Assistant Docker."""
|
||||
if not await self.instance.restart():
|
||||
raise HomeAssistantError()
|
||||
try:
|
||||
await self.instance.restart()
|
||||
except DockerAPIError:
|
||||
raise HomeAssistantError() from None
|
||||
|
||||
await self._block_till_run()
|
||||
|
||||
@process_lock
|
||||
async def rebuild(self) -> None:
|
||||
"""Rebuild Home Assistant Docker container."""
|
||||
await self.instance.stop()
|
||||
with suppress(DockerAPIError):
|
||||
await self.instance.stop()
|
||||
await self._start()
|
||||
|
||||
def logs(self) -> Coroutine:
|
||||
def logs(self) -> Awaitable[bytes]:
|
||||
"""Get HomeAssistant docker logs.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.logs()
|
||||
|
||||
def stats(self) -> Coroutine:
|
||||
async def stats(self) -> DockerStats:
|
||||
"""Return stats of Home Assistant.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.stats()
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError:
|
||||
raise HomeAssistantError() from None
|
||||
|
||||
def is_running(self) -> Coroutine:
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker container is running.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.is_running()
|
||||
|
||||
def is_fails(self) -> Coroutine:
|
||||
def is_fails(self) -> Awaitable[bool]:
|
||||
"""Return True if a Docker container is fails state.
|
||||
|
||||
Return a coroutine.
|
||||
@ -485,7 +510,7 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
|
||||
"""Check if port is mapped."""
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
try:
|
||||
result = sock.connect_ex((str(self.api_ip), self.api_port))
|
||||
result = sock.connect_ex((str(self.ip_address), self.api_port))
|
||||
sock.close()
|
||||
|
||||
# Check if the port is available
|
||||
|
103
hassio/ingress.py
Normal file
103
hassio/ingress.py
Normal file
@ -0,0 +1,103 @@
|
||||
"""Fetch last versions from webserver."""
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
from typing import Dict, Optional
|
||||
import secrets
|
||||
|
||||
from .addons.addon import Addon
|
||||
from .const import ATTR_SESSION, FILE_HASSIO_INGRESS
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .utils.json import JsonConfig
|
||||
from .utils.dt import utcnow, utc_from_timestamp
|
||||
from .validate import SCHEMA_INGRESS_CONFIG
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Ingress(JsonConfig, CoreSysAttributes):
|
||||
"""Fetch last versions from version.json."""
|
||||
|
||||
def __init__(self, coresys: CoreSys):
|
||||
"""Initialize updater."""
|
||||
super().__init__(FILE_HASSIO_INGRESS, SCHEMA_INGRESS_CONFIG)
|
||||
self.coresys: CoreSys = coresys
|
||||
self.tokens: Dict[str, str] = {}
|
||||
|
||||
def get(self, token: str) -> Optional[Addon]:
|
||||
"""Return addon they have this ingress token."""
|
||||
if token not in self.tokens:
|
||||
self._update_token_list()
|
||||
return self.sys_addons.get(self.tokens.get(token))
|
||||
|
||||
@property
|
||||
def sessions(self) -> Dict[str, float]:
|
||||
"""Return sessions."""
|
||||
return self._data[ATTR_SESSION]
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Update internal data."""
|
||||
self._update_token_list()
|
||||
self._cleanup_sessions()
|
||||
|
||||
_LOGGER.info("Load %d ingress session", len(self.sessions))
|
||||
|
||||
async def reload(self) -> None:
|
||||
"""Reload/Validate sessions."""
|
||||
self._cleanup_sessions()
|
||||
|
||||
async def unload(self) -> None:
|
||||
"""Shutdown sessions."""
|
||||
self.save_data()
|
||||
|
||||
def _cleanup_sessions(self) -> None:
|
||||
"""Remove not used sessions."""
|
||||
now = utcnow()
|
||||
|
||||
sessions = {}
|
||||
for session, valid in self.sessions.items():
|
||||
valid_dt = utc_from_timestamp(valid)
|
||||
if valid_dt < now:
|
||||
continue
|
||||
|
||||
# Is valid
|
||||
sessions[session] = valid
|
||||
|
||||
# Write back
|
||||
self.sessions.clear()
|
||||
self.sessions.update(sessions)
|
||||
|
||||
def _update_token_list(self) -> None:
|
||||
"""Regenerate token <-> Add-on map."""
|
||||
self.tokens.clear()
|
||||
|
||||
# Read all ingress token and build a map
|
||||
for addon in self.sys_addons.list_installed:
|
||||
if not addon.with_ingress:
|
||||
continue
|
||||
self.tokens[addon.ingress_token] = addon.slug
|
||||
|
||||
def create_session(self) -> str:
|
||||
"""Create new session."""
|
||||
session = secrets.token_hex(64)
|
||||
valid = utcnow() + timedelta(minutes=15)
|
||||
|
||||
self.sessions[session] = valid.timestamp()
|
||||
self.save_data()
|
||||
|
||||
return session
|
||||
|
||||
def validate_session(self, session: str) -> bool:
|
||||
"""Return True if session valid and make it longer valid."""
|
||||
if session not in self.sessions:
|
||||
return False
|
||||
valid_until = utc_from_timestamp(self.sessions[session])
|
||||
|
||||
# Is still valid?
|
||||
if valid_until < utcnow():
|
||||
return False
|
||||
|
||||
# Update time
|
||||
valid_until = valid_until + timedelta(minutes=15)
|
||||
self.sessions[session] = valid_until.timestamp()
|
||||
|
||||
return True
|
@ -39,6 +39,7 @@ from ..const import (
|
||||
CRYPTO_AES128,
|
||||
)
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import AddonsError
|
||||
from ..utils.json import write_json_file
|
||||
from ..utils.tar import SecureTarFile
|
||||
from .utils import key_to_iv, password_for_validating, password_to_key, remove_folder
|
||||
@ -289,7 +290,9 @@ class Snapshot(CoreSysAttributes):
|
||||
'w', key=self._key)
|
||||
|
||||
# Take snapshot
|
||||
if not await addon.snapshot(addon_file):
|
||||
try:
|
||||
await addon.snapshot(addon_file)
|
||||
except AddonsError:
|
||||
_LOGGER.error("Can't make snapshot from %s", addon.slug)
|
||||
return
|
||||
|
||||
@ -326,10 +329,11 @@ class Snapshot(CoreSysAttributes):
|
||||
_LOGGER.error("Can't find snapshot for %s", addon.slug)
|
||||
return
|
||||
|
||||
# Performe a restore
|
||||
if not await addon.restore(addon_file):
|
||||
# Perform a restore
|
||||
try:
|
||||
await addon.restore(addon_file)
|
||||
except AddonsError:
|
||||
_LOGGER.error("Can't restore snapshot for %s", addon.slug)
|
||||
return
|
||||
|
||||
# Run tasks
|
||||
tasks = [_addon_restore(addon) for addon in addon_list]
|
||||
|
@ -1,15 +1,24 @@
|
||||
"""Home Assistant control object."""
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
from ipaddress import IPv4Address
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Awaitable, Optional
|
||||
|
||||
import aiohttp
|
||||
|
||||
from .coresys import CoreSysAttributes
|
||||
from .docker.supervisor import DockerSupervisor
|
||||
from .const import URL_HASSIO_APPARMOR
|
||||
from .exceptions import HostAppArmorError
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .docker.stats import DockerStats
|
||||
from .docker.supervisor import DockerSupervisor
|
||||
from .exceptions import (
|
||||
DockerAPIError,
|
||||
HostAppArmorError,
|
||||
SupervisorError,
|
||||
SupervisorUpdateError,
|
||||
)
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@ -17,43 +26,52 @@ _LOGGER = logging.getLogger(__name__)
|
||||
class Supervisor(CoreSysAttributes):
|
||||
"""Home Assistant core object for handle it."""
|
||||
|
||||
def __init__(self, coresys):
|
||||
def __init__(self, coresys: CoreSys):
|
||||
"""Initialize hass object."""
|
||||
self.coresys = coresys
|
||||
self.instance = DockerSupervisor(coresys)
|
||||
self.coresys: CoreSys = coresys
|
||||
self.instance: DockerSupervisor = DockerSupervisor(coresys)
|
||||
|
||||
async def load(self):
|
||||
async def load(self) -> None:
|
||||
"""Prepare Home Assistant object."""
|
||||
if not await self.instance.attach():
|
||||
try:
|
||||
await self.instance.attach()
|
||||
except DockerAPIError:
|
||||
_LOGGER.fatal("Can't setup Supervisor Docker container!")
|
||||
await self.instance.cleanup()
|
||||
|
||||
with suppress(DockerAPIError):
|
||||
await self.instance.cleanup()
|
||||
|
||||
@property
|
||||
def need_update(self):
|
||||
def ip_address(self) -> IPv4Address:
|
||||
"""Return IP of Supervisor instance."""
|
||||
return self.instance.ip_address
|
||||
|
||||
@property
|
||||
def need_update(self) -> bool:
|
||||
"""Return True if an update is available."""
|
||||
return self.version != self.last_version
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
def version(self) -> str:
|
||||
"""Return version of running Home Assistant."""
|
||||
return self.instance.version
|
||||
|
||||
@property
|
||||
def last_version(self):
|
||||
def last_version(self) -> str:
|
||||
"""Return last available version of Home Assistant."""
|
||||
return self.sys_updater.version_hassio
|
||||
|
||||
@property
|
||||
def image(self):
|
||||
def image(self) -> str:
|
||||
"""Return image name of Home Assistant container."""
|
||||
return self.instance.image
|
||||
|
||||
@property
|
||||
def arch(self):
|
||||
def arch(self) -> str:
|
||||
"""Return arch of the Hass.io container."""
|
||||
return self.instance.arch
|
||||
|
||||
async def update_apparmor(self):
|
||||
async def update_apparmor(self) -> None:
|
||||
"""Fetch last version and update profile."""
|
||||
url = URL_HASSIO_APPARMOR
|
||||
try:
|
||||
@ -63,22 +81,25 @@ class Supervisor(CoreSysAttributes):
|
||||
|
||||
except (aiohttp.ClientError, asyncio.TimeoutError) as err:
|
||||
_LOGGER.warning("Can't fetch AppArmor profile: %s", err)
|
||||
return
|
||||
raise SupervisorError() from None
|
||||
|
||||
with TemporaryDirectory(dir=self.sys_config.path_tmp) as tmp_dir:
|
||||
profile_file = Path(tmp_dir, 'apparmor.txt')
|
||||
profile_file = Path(tmp_dir, "apparmor.txt")
|
||||
try:
|
||||
profile_file.write_text(data)
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't write temporary profile: %s", err)
|
||||
return
|
||||
raise SupervisorError() from None
|
||||
|
||||
try:
|
||||
await self.sys_host.apparmor.load_profile(
|
||||
"hassio-supervisor", profile_file)
|
||||
"hassio-supervisor", profile_file
|
||||
)
|
||||
except HostAppArmorError:
|
||||
_LOGGER.error("Can't update AppArmor profile!")
|
||||
raise SupervisorError() from None
|
||||
|
||||
async def update(self, version=None):
|
||||
async def update(self, version: Optional[str] = None) -> None:
|
||||
"""Update Home Assistant version."""
|
||||
version = version or self.last_version
|
||||
|
||||
@ -87,29 +108,31 @@ class Supervisor(CoreSysAttributes):
|
||||
return
|
||||
|
||||
_LOGGER.info("Update Supervisor to version %s", version)
|
||||
if await self.instance.install(version):
|
||||
await self.update_apparmor()
|
||||
self.sys_loop.call_later(1, self.sys_loop.stop)
|
||||
return True
|
||||
try:
|
||||
await self.instance.install(version)
|
||||
except DockerAPIError:
|
||||
_LOGGER.error("Update of Hass.io fails!")
|
||||
raise SupervisorUpdateError() from None
|
||||
|
||||
_LOGGER.error("Update of Hass.io fails!")
|
||||
return False
|
||||
with suppress(SupervisorError):
|
||||
await self.update_apparmor()
|
||||
self.sys_loop.call_later(1, self.sys_loop.stop)
|
||||
|
||||
@property
|
||||
def in_progress(self):
|
||||
def in_progress(self) -> bool:
|
||||
"""Return True if a task is in progress."""
|
||||
return self.instance.in_progress
|
||||
|
||||
def logs(self):
|
||||
def logs(self) -> Awaitable[bytes]:
|
||||
"""Get Supervisor docker logs.
|
||||
|
||||
Return a coroutine.
|
||||
Return Coroutine.
|
||||
"""
|
||||
return self.instance.logs()
|
||||
|
||||
def stats(self):
|
||||
"""Return stats of Supervisor.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.stats()
|
||||
async def stats(self) -> DockerStats:
|
||||
"""Return stats of Supervisor."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError:
|
||||
raise SupervisorError() from None
|
||||
|
101
hassio/tasks.py
101
hassio/tasks.py
@ -7,7 +7,7 @@ from .exceptions import HomeAssistantError
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
HASS_WATCHDOG_API = 'HASS_WATCHDOG_API'
|
||||
HASS_WATCHDOG_API = "HASS_WATCHDOG_API"
|
||||
|
||||
RUN_UPDATE_SUPERVISOR = 29100
|
||||
RUN_UPDATE_ADDONS = 57600
|
||||
@ -17,6 +17,7 @@ RUN_RELOAD_ADDONS = 21600
|
||||
RUN_RELOAD_SNAPSHOTS = 72000
|
||||
RUN_RELOAD_HOST = 72000
|
||||
RUN_RELOAD_UPDATER = 21600
|
||||
RUN_RELOAD_INGRESS = 930
|
||||
|
||||
RUN_WATCHDOG_HOMEASSISTANT_DOCKER = 15
|
||||
RUN_WATCHDOG_HOMEASSISTANT_API = 300
|
||||
@ -33,28 +34,55 @@ class Tasks(CoreSysAttributes):
|
||||
|
||||
async def load(self):
|
||||
"""Add Tasks to scheduler."""
|
||||
self.jobs.add(self.sys_scheduler.register_task(
|
||||
self._update_addons, RUN_UPDATE_ADDONS))
|
||||
self.jobs.add(self.sys_scheduler.register_task(
|
||||
self._update_supervisor, RUN_UPDATE_SUPERVISOR))
|
||||
self.jobs.add(self.sys_scheduler.register_task(
|
||||
self._update_hassos_cli, RUN_UPDATE_HASSOSCLI))
|
||||
# Update
|
||||
self.jobs.add(
|
||||
self.sys_scheduler.register_task(self._update_addons, RUN_UPDATE_ADDONS)
|
||||
)
|
||||
self.jobs.add(
|
||||
self.sys_scheduler.register_task(
|
||||
self._update_supervisor, RUN_UPDATE_SUPERVISOR
|
||||
)
|
||||
)
|
||||
self.jobs.add(
|
||||
self.sys_scheduler.register_task(
|
||||
self._update_hassos_cli, RUN_UPDATE_HASSOSCLI
|
||||
)
|
||||
)
|
||||
|
||||
self.jobs.add(self.sys_scheduler.register_task(
|
||||
self.sys_addons.reload, RUN_RELOAD_ADDONS))
|
||||
self.jobs.add(self.sys_scheduler.register_task(
|
||||
self.sys_updater.reload, RUN_RELOAD_UPDATER))
|
||||
self.jobs.add(self.sys_scheduler.register_task(
|
||||
self.sys_snapshots.reload, RUN_RELOAD_SNAPSHOTS))
|
||||
self.jobs.add(self.sys_scheduler.register_task(
|
||||
self.sys_host.reload, RUN_RELOAD_HOST))
|
||||
# Reload
|
||||
self.jobs.add(
|
||||
self.sys_scheduler.register_task(self.sys_addons.reload, RUN_RELOAD_ADDONS)
|
||||
)
|
||||
self.jobs.add(
|
||||
self.sys_scheduler.register_task(
|
||||
self.sys_updater.reload, RUN_RELOAD_UPDATER
|
||||
)
|
||||
)
|
||||
self.jobs.add(
|
||||
self.sys_scheduler.register_task(
|
||||
self.sys_snapshots.reload, RUN_RELOAD_SNAPSHOTS
|
||||
)
|
||||
)
|
||||
self.jobs.add(
|
||||
self.sys_scheduler.register_task(self.sys_host.reload, RUN_RELOAD_HOST)
|
||||
)
|
||||
self.jobs.add(
|
||||
self.sys_scheduler.register_task(
|
||||
self.sys_ingress.reload, RUN_RELOAD_INGRESS
|
||||
)
|
||||
)
|
||||
|
||||
self.jobs.add(self.sys_scheduler.register_task(
|
||||
self._watchdog_homeassistant_docker,
|
||||
RUN_WATCHDOG_HOMEASSISTANT_DOCKER))
|
||||
self.jobs.add(self.sys_scheduler.register_task(
|
||||
self._watchdog_homeassistant_api,
|
||||
RUN_WATCHDOG_HOMEASSISTANT_API))
|
||||
# Watchdog
|
||||
self.jobs.add(
|
||||
self.sys_scheduler.register_task(
|
||||
self._watchdog_homeassistant_docker, RUN_WATCHDOG_HOMEASSISTANT_DOCKER
|
||||
)
|
||||
)
|
||||
self.jobs.add(
|
||||
self.sys_scheduler.register_task(
|
||||
self._watchdog_homeassistant_api, RUN_WATCHDOG_HOMEASSISTANT_API
|
||||
)
|
||||
)
|
||||
|
||||
_LOGGER.info("All core tasks are scheduled")
|
||||
|
||||
@ -72,7 +100,8 @@ class Tasks(CoreSysAttributes):
|
||||
tasks.append(addon.update())
|
||||
else:
|
||||
_LOGGER.warning(
|
||||
"Add-on %s will be ignored, schema tests fails", addon.slug)
|
||||
"Add-on %s will be ignored, schema tests fails", addon.slug
|
||||
)
|
||||
|
||||
if tasks:
|
||||
_LOGGER.info("Add-on auto update process %d tasks", len(tasks))
|
||||
@ -94,14 +123,18 @@ class Tasks(CoreSysAttributes):
|
||||
async def _watchdog_homeassistant_docker(self):
|
||||
"""Check running state of Docker and start if they is close."""
|
||||
# if Home Assistant is active
|
||||
if not await self.sys_homeassistant.is_fails() or \
|
||||
not self.sys_homeassistant.watchdog or \
|
||||
self.sys_homeassistant.error_state:
|
||||
if (
|
||||
not await self.sys_homeassistant.is_fails()
|
||||
or not self.sys_homeassistant.watchdog
|
||||
or self.sys_homeassistant.error_state
|
||||
):
|
||||
return
|
||||
|
||||
# if Home Assistant is running
|
||||
if self.sys_homeassistant.in_progress or \
|
||||
await self.sys_homeassistant.is_running():
|
||||
if (
|
||||
self.sys_homeassistant.in_progress
|
||||
or await self.sys_homeassistant.is_running()
|
||||
):
|
||||
return
|
||||
|
||||
_LOGGER.warning("Watchdog found a problem with Home Assistant Docker!")
|
||||
@ -117,17 +150,21 @@ class Tasks(CoreSysAttributes):
|
||||
a delay in our system.
|
||||
"""
|
||||
# If Home-Assistant is active
|
||||
if not await self.sys_homeassistant.is_fails() or \
|
||||
not self.sys_homeassistant.watchdog or \
|
||||
self.sys_homeassistant.error_state:
|
||||
if (
|
||||
not await self.sys_homeassistant.is_fails()
|
||||
or not self.sys_homeassistant.watchdog
|
||||
or self.sys_homeassistant.error_state
|
||||
):
|
||||
return
|
||||
|
||||
# Init cache data
|
||||
retry_scan = self._cache.get(HASS_WATCHDOG_API, 0)
|
||||
|
||||
# If Home-Assistant API is up
|
||||
if self.sys_homeassistant.in_progress or \
|
||||
await self.sys_homeassistant.check_api_state():
|
||||
if (
|
||||
self.sys_homeassistant.in_progress
|
||||
or await self.sys_homeassistant.check_api_state()
|
||||
):
|
||||
return
|
||||
|
||||
# Look like we run into a problem
|
||||
|
@ -1,32 +1,26 @@
|
||||
"""Tools file for Hass.io."""
|
||||
from datetime import datetime
|
||||
import hashlib
|
||||
import logging
|
||||
import re
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
RE_STRING = re.compile(r"\x1b(\[.*?[@-~]|\].*?(\x07|\x1b\\))")
|
||||
|
||||
|
||||
def convert_to_ascii(raw):
|
||||
def convert_to_ascii(raw) -> str:
|
||||
"""Convert binary to ascii and remove colors."""
|
||||
return RE_STRING.sub("", raw.decode())
|
||||
|
||||
|
||||
def create_token():
|
||||
"""Create token for API access."""
|
||||
return hashlib.sha256(uuid.uuid4().bytes).hexdigest()
|
||||
|
||||
|
||||
def process_lock(method):
|
||||
"""Wrap function with only run once."""
|
||||
|
||||
async def wrap_api(api, *args, **kwargs):
|
||||
"""Return api wrapper."""
|
||||
if api.lock.locked():
|
||||
_LOGGER.error(
|
||||
"Can't execute %s while a task is in progress",
|
||||
method.__name__)
|
||||
"Can't execute %s while a task is in progress", method.__name__
|
||||
)
|
||||
return False
|
||||
|
||||
async with api.lock:
|
||||
@ -40,6 +34,7 @@ class AsyncThrottle:
|
||||
Decorator that prevents a function from being called more than once every
|
||||
time period.
|
||||
"""
|
||||
|
||||
def __init__(self, delta):
|
||||
"""Initialize async throttle."""
|
||||
self.throttle_period = delta
|
||||
@ -47,6 +42,7 @@ class AsyncThrottle:
|
||||
|
||||
def __call__(self, method):
|
||||
"""Throttle function"""
|
||||
|
||||
async def wrapper(*args, **kwargs):
|
||||
"""Throttle function wrapper"""
|
||||
now = datetime.now()
|
||||
|
@ -58,6 +58,11 @@ def parse_datetime(dt_str):
|
||||
return datetime(**kws)
|
||||
|
||||
|
||||
def utcnow():
|
||||
def utcnow() -> datetime:
|
||||
"""Return the current timestamp including timezone."""
|
||||
return datetime.now(UTC)
|
||||
|
||||
|
||||
def utc_from_timestamp(timestamp: float) -> datetime:
|
||||
"""Return a UTC time from a timestamp."""
|
||||
return UTC.localize(datetime.utcfromtimestamp(timestamp))
|
||||
|
@ -1,36 +1,36 @@
|
||||
"""Validate functions."""
|
||||
import uuid
|
||||
import re
|
||||
import uuid
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from .const import (
|
||||
ATTR_IMAGE,
|
||||
ATTR_LAST_VERSION,
|
||||
ATTR_CHANNEL,
|
||||
ATTR_TIMEZONE,
|
||||
ATTR_HASSOS,
|
||||
ATTR_ADDONS_CUSTOM_LIST,
|
||||
ATTR_PASSWORD,
|
||||
ATTR_HOMEASSISTANT,
|
||||
ATTR_HASSIO,
|
||||
ATTR_BOOT,
|
||||
ATTR_LAST_BOOT,
|
||||
ATTR_SSL,
|
||||
ATTR_PORT,
|
||||
ATTR_WATCHDOG,
|
||||
ATTR_WAIT_BOOT,
|
||||
ATTR_UUID,
|
||||
ATTR_REFRESH_TOKEN,
|
||||
ATTR_HASSOS_CLI,
|
||||
ATTR_ACCESS_TOKEN,
|
||||
CHANNEL_STABLE,
|
||||
ATTR_ADDONS_CUSTOM_LIST,
|
||||
ATTR_BOOT,
|
||||
ATTR_CHANNEL,
|
||||
ATTR_HASSIO,
|
||||
ATTR_HASSOS,
|
||||
ATTR_HASSOS_CLI,
|
||||
ATTR_HOMEASSISTANT,
|
||||
ATTR_IMAGE,
|
||||
ATTR_LAST_BOOT,
|
||||
ATTR_LAST_VERSION,
|
||||
ATTR_PASSWORD,
|
||||
ATTR_PORT,
|
||||
ATTR_REFRESH_TOKEN,
|
||||
ATTR_SESSION,
|
||||
ATTR_SSL,
|
||||
ATTR_TIMEZONE,
|
||||
ATTR_UUID,
|
||||
ATTR_WAIT_BOOT,
|
||||
ATTR_WATCHDOG,
|
||||
CHANNEL_BETA,
|
||||
CHANNEL_DEV,
|
||||
CHANNEL_STABLE,
|
||||
)
|
||||
from .utils.validate import validate_timezone
|
||||
|
||||
|
||||
RE_REPOSITORY = re.compile(r"^(?P<url>[^#]+)(?:#(?P<branch>[\w\-]+))?$")
|
||||
|
||||
NETWORK_PORT = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
|
||||
@ -40,6 +40,7 @@ ALSA_DEVICE = vol.Maybe(vol.Match(r"\d+,\d+"))
|
||||
CHANNELS = vol.In([CHANNEL_STABLE, CHANNEL_BETA, CHANNEL_DEV])
|
||||
UUID_MATCH = vol.Match(r"^[0-9a-f]{32}$")
|
||||
SHA256 = vol.Match(r"^[0-9a-f]{64}$")
|
||||
TOKEN = vol.Match(r"^[0-9a-f]{32,256}$")
|
||||
|
||||
|
||||
def validate_repository(repository):
|
||||
@ -94,7 +95,7 @@ DOCKER_PORTS = vol.Schema(
|
||||
SCHEMA_HASS_CONFIG = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_UUID, default=lambda: uuid.uuid4().hex): UUID_MATCH,
|
||||
vol.Optional(ATTR_ACCESS_TOKEN): SHA256,
|
||||
vol.Optional(ATTR_ACCESS_TOKEN): TOKEN,
|
||||
vol.Optional(ATTR_BOOT, default=True): vol.Boolean(),
|
||||
vol.Inclusive(ATTR_IMAGE, "custom_hass"): DOCKER_IMAGE,
|
||||
vol.Inclusive(ATTR_LAST_VERSION, "custom_hass"): vol.Coerce(str),
|
||||
@ -139,3 +140,9 @@ SCHEMA_HASSIO_CONFIG = vol.Schema(
|
||||
|
||||
|
||||
SCHEMA_AUTH_CONFIG = vol.Schema({SHA256: SHA256})
|
||||
|
||||
|
||||
SCHEMA_INGRESS_CONFIG = vol.Schema(
|
||||
{vol.Required(ATTR_SESSION, default=dict): vol.Schema({TOKEN: vol.Coerce(float)})},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
)
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 549db23ff5f2e7e967cd16f6807b9efa19dce1fe
|
||||
Subproject commit cadcd845cc03f62b1de7fc02f5ce349dcecfe730
|
4
pylintrc
4
pylintrc
@ -45,3 +45,7 @@ disable=
|
||||
|
||||
[EXCEPTIONS]
|
||||
overgeneral-exceptions=Exception
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
ignored-modules = distutils
|
||||
|
@ -5,9 +5,9 @@ cchardet==2.1.4
|
||||
colorlog==4.0.2
|
||||
cpe==1.2.1
|
||||
cryptography==2.6.1
|
||||
docker==3.7.0
|
||||
docker==3.7.2
|
||||
gitpython==2.1.11
|
||||
pytz==2018.9
|
||||
pyudev==0.21.0
|
||||
uvloop==0.11.3
|
||||
uvloop==0.12.2
|
||||
voluptuous==0.11.5
|
||||
|
@ -14,4 +14,4 @@ use_parentheses = true
|
||||
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
ignore = E501
|
||||
ignore = E501, W503
|
||||
|
@ -11,32 +11,30 @@ from hassio.bootstrap import initialize_coresys
|
||||
@pytest.fixture
|
||||
def docker():
|
||||
"""Mock Docker API."""
|
||||
with patch('hassio.coresys.DockerAPI') as mock:
|
||||
with patch("hassio.coresys.DockerAPI") as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def coresys(loop, docker):
|
||||
"""Create a CoreSys Mock."""
|
||||
with patch('hassio.bootstrap.initialize_system_data'):
|
||||
with patch("hassio.bootstrap.initialize_system_data"):
|
||||
coresys_obj = await initialize_coresys()
|
||||
|
||||
coresys_obj.ingress.save_data = MagicMock()
|
||||
|
||||
yield coresys_obj
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sys_machine():
|
||||
"""Mock sys_machine."""
|
||||
with patch(
|
||||
'hassio.coresys.CoreSys.machine',
|
||||
new_callable=PropertyMock) as mock:
|
||||
with patch("hassio.coresys.CoreSys.machine", new_callable=PropertyMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sys_supervisor():
|
||||
with patch(
|
||||
'hassio.coresys.CoreSys.supervisor',
|
||||
new_callable=PropertyMock) as mock:
|
||||
with patch("hassio.coresys.CoreSys.supervisor", new_callable=PropertyMock) as mock:
|
||||
mock.return_value = MagicMock()
|
||||
yield MagicMock
|
||||
|
@ -9,11 +9,14 @@ from hassio.discovery.validate import valid_discovery_config
|
||||
def test_good_config():
|
||||
"""Test good deconz config."""
|
||||
|
||||
valid_discovery_config("deconz", {"host": "test", "port": 3812})
|
||||
valid_discovery_config(
|
||||
"deconz",
|
||||
{"host": "test", "port": 3812, "api_key": "MY_api_KEY99", "serial": "xyz"},
|
||||
)
|
||||
|
||||
|
||||
def test_bad_config():
|
||||
"""Test good deconz config."""
|
||||
|
||||
with pytest.raises(vol.Invalid):
|
||||
valid_discovery_config("deconz", {"host": "test"})
|
||||
valid_discovery_config("deconz", {"host": "test", "port": 8080})
|
||||
|
22
tests/test_ingress.py
Normal file
22
tests/test_ingress.py
Normal file
@ -0,0 +1,22 @@
|
||||
"""Test ingress."""
|
||||
from datetime import timedelta
|
||||
|
||||
from hassio.utils.dt import utc_from_timestamp
|
||||
|
||||
|
||||
def test_session_handling(coresys):
|
||||
"""Create and test session."""
|
||||
session = coresys.ingress.create_session()
|
||||
validate = coresys.ingress.sessions[session]
|
||||
|
||||
assert coresys.ingress.save_data.called
|
||||
assert session
|
||||
assert validate
|
||||
|
||||
assert coresys.ingress.validate_session(session)
|
||||
assert coresys.ingress.sessions[session] != validate
|
||||
|
||||
not_valid = utc_from_timestamp(validate) - timedelta(minutes=20)
|
||||
coresys.ingress.sessions[session] = not_valid.timestamp()
|
||||
assert not coresys.ingress.validate_session(session)
|
||||
assert not coresys.ingress.validate_session("invalid session")
|
Loading…
x
Reference in New Issue
Block a user