Merge pull request #1209 from home-assistant/dev

Release 174
This commit is contained in:
Pascal Vizeli 2019-08-14 15:38:57 +02:00 committed by GitHub
commit 3cf189ad94
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 1107 additions and 199 deletions

View File

@ -1,8 +1,8 @@
FROM python:3.7
WORKDIR /workspace
WORKDIR /workspaces
# install Node/Yarn for Frontent
# Install Node/Yarn for Frontent
RUN apt-get update && apt-get install -y --no-install-recommends \
curl \
git \
@ -17,8 +17,24 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
&& rm -rf /var/lib/apt/lists/*
ENV NVM_DIR /root/.nvm
# Install docker
# https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/
RUN apt-get update && apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
curl \
software-properties-common \
gpg-agent \
&& curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \
&& add-apt-repository "deb https://download.docker.com/linux/debian $(lsb_release -cs) stable" \
&& apt-get update && apt-get install -y --no-install-recommends \
docker-ce \
docker-ce-cli \
containerd.io \
&& rm -rf /var/lib/apt/lists/*
# Install Python dependencies from requirements.txt if it exists
COPY requirements.txt requirements_tests.txt /workspace/
COPY requirements.txt requirements_tests.txt /workspaces/
RUN pip install -r requirements.txt \
&& pip3 install -r requirements_tests.txt \
&& pip install black tox

View File

@ -3,9 +3,11 @@
"name": "Hass.io dev",
"context": "..",
"dockerFile": "Dockerfile",
"appPort": "9123:8123",
"runArgs": [
"-e",
"GIT_EDTIOR='code --wait'"
"GIT_EDITOR='code --wait'",
"--privileged"
],
"extensions": [
"ms-python.python"
@ -24,4 +26,4 @@
"editor.formatOnType": true,
"files.trimTrailingWhitespace": true
}
}
}

View File

@ -18,3 +18,6 @@ venv/
home-assistant-polymer/*
misc/*
script/*
# Test ENV
data/

75
.vscode/tasks.json vendored
View File

@ -1,10 +1,38 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "Run Testenv",
"type": "shell",
"command": "./scripts/test_env.sh",
"group": {
"kind": "test",
"isDefault": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
},
{
"label": "Run Testenv CLI",
"type": "shell",
"command": "docker run --rm -ti -v /etc/machine-id:/etc/machine-id --network=hassio --add-host hassio:172.30.32.2 homeassistant/amd64-hassio-cli:dev",
"group": {
"kind": "test",
"isDefault": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
},
{
"label": "Update UI",
"type": "shell",
"command": "./script/update-frontend.sh",
"command": "./scripts/update-frontend.sh",
"group": {
"kind": "build",
"isDefault": true
@ -14,6 +42,51 @@
"panel": "new"
},
"problemMatcher": []
},
{
"label": "Pytest",
"type": "shell",
"command": "pytest --timeout=10 tests",
"group": {
"kind": "test",
"isDefault": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
},
{
"label": "Flake8",
"type": "shell",
"command": "flake8 hassio tests",
"group": {
"kind": "test",
"isDefault": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
},
{
"label": "Pylint",
"type": "shell",
"command": "pylint hassio",
"dependsOn": [
"Install all Requirements"
],
"group": {
"kind": "test",
"isDefault": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
}
]
}

48
API.md
View File

@ -105,6 +105,7 @@ Output is the raw docker log.
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
@ -421,6 +422,7 @@ Proxy to real websocket instance.
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
@ -473,6 +475,8 @@ Get all available addons.
{
"name": "xy bla",
"slug": "xdssd_xybla",
"hostname": "xdssd-xybla",
"dns": [],
"description": "description",
"long_description": "null|markdown",
"auto_update": "bool",
@ -498,6 +502,7 @@ Get all available addons.
"privileged": ["NET_ADMIN", "SYS_ADMIN"],
"apparmor": "disable|default|profile",
"devices": ["/dev/xy"],
"udev": "bool",
"auto_uart": "bool",
"icon": "bool",
"logo": "bool",
@ -593,6 +598,7 @@ Write data to add-on stdin
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
@ -739,6 +745,48 @@ return:
}
```
### DNS
- GET `/dns/info`
```json
{
"host": "ip-address",
"version": "1",
"latest_version": "2",
"servers": ["dns://8.8.8.8"]
}
```
- POST `/dns/options`
```json
{
"servers": ["dns://8.8.8.8"]
}
```
- POST `/dns/update`
```json
{
"version": "VERSION"
}
```
- GET `/dns/logs`
- GET `/dns/stats`
```json
{
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
"blk_write": 0
}
```
### Auth / SSO API
You can use the user system on homeassistant. We handle this auth system on

View File

@ -9,7 +9,7 @@ import secrets
import shutil
import tarfile
from tempfile import TemporaryDirectory
from typing import Any, Awaitable, Dict, Optional
from typing import Any, Awaitable, Dict, List, Optional
import voluptuous as vol
from voluptuous.humanize import humanize_error
@ -35,6 +35,7 @@ from ..const import (
ATTR_USER,
ATTR_UUID,
ATTR_VERSION,
DNS_SUFFIX,
STATE_NONE,
STATE_STARTED,
STATE_STOPPED,
@ -119,6 +120,11 @@ class Addon(AddonModel):
"""Return installed version."""
return self.persist[ATTR_VERSION]
@property
def dns(self) -> List[str]:
"""Return list of DNS name for that add-on."""
return [f"{self.hostname}.{DNS_SUFFIX}"]
@property
def options(self) -> Dict[str, Any]:
"""Return options with local changes."""

View File

@ -51,6 +51,7 @@ from ..const import (
ATTR_STDIN,
ATTR_TIMEOUT,
ATTR_TMPFS,
ATTR_UDEV,
ATTR_URL,
ATTR_VERSION,
ATTR_WEBUI,
@ -109,6 +110,16 @@ class AddonModel(CoreSysAttributes):
"""Return name of add-on."""
return self.data[ATTR_NAME]
@property
def hostname(self) -> str:
"""Return slug/id of add-on."""
return self.slug.replace("_", "-")
@property
def dns(self) -> List[str]:
"""Return list of DNS name for that add-on."""
return []
@property
def timeout(self) -> int:
"""Return timeout of addon for docker stop."""
@ -333,6 +344,11 @@ class AddonModel(CoreSysAttributes):
"""Return True if the add-on access to GPIO interface."""
return self.data[ATTR_GPIO]
@property
def with_udev(self) -> bool:
"""Return True if the add-on have his own udev."""
return self.data[ATTR_UDEV]
@property
def with_kernel_modules(self) -> bool:
"""Return True if the add-on access to kernel modules."""

View File

@ -68,6 +68,7 @@ from ..const import (
ATTR_SYSTEM,
ATTR_TIMEOUT,
ATTR_TMPFS,
ATTR_UDEV,
ATTR_URL,
ATTR_USER,
ATTR_UUID,
@ -186,6 +187,7 @@ SCHEMA_ADDON_CONFIG = vol.Schema(
vol.Optional(ATTR_HOST_DBUS, default=False): vol.Boolean(),
vol.Optional(ATTR_DEVICES): [vol.Match(r"^(.*):(.*):([rwm]{1,3})$")],
vol.Optional(ATTR_AUTO_UART, default=False): vol.Boolean(),
vol.Optional(ATTR_UDEV, default=False): vol.Boolean(),
vol.Optional(ATTR_TMPFS): vol.Match(r"^size=(\d)*[kmg](,uid=\d{1,4})?(,rw)?$"),
vol.Optional(ATTR_MAP, default=list): [vol.Match(RE_VOLUME)],
vol.Optional(ATTR_ENVIRONMENT): {vol.Match(r"\w*"): vol.Coerce(str)},

View File

@ -9,6 +9,7 @@ from ..coresys import CoreSys, CoreSysAttributes
from .addons import APIAddons
from .auth import APIAuth
from .discovery import APIDiscovery
from .dns import APICoreDNS
from .hardware import APIHardware
from .hassos import APIHassOS
from .homeassistant import APIHomeAssistant
@ -55,6 +56,7 @@ class RestAPI(CoreSysAttributes):
self._register_services()
self._register_info()
self._register_auth()
self._register_dns()
def _register_host(self) -> None:
"""Register hostcontrol functions."""
@ -264,6 +266,21 @@ class RestAPI(CoreSysAttributes):
]
)
def _register_dns(self) -> None:
"""Register DNS functions."""
api_dns = APICoreDNS()
api_dns.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/dns/info", api_dns.info),
web.get("/dns/stats", api_dns.stats),
web.get("/dns/logs", api_dns.logs),
web.post("/dns/update", api_dns.update),
web.post("/dns/options", api_dns.options),
]
)
def _register_panel(self) -> None:
"""Register panel for Home Assistant."""
panel_dir = Path(__file__).parent.joinpath("panel")

View File

@ -8,6 +8,7 @@ import voluptuous as vol
from voluptuous.humanize import humanize_error
from ..addons import AnyAddon
from ..docker.stats import DockerStats
from ..addons.utils import rating_security
from ..const import (
ATTR_ADDONS,
@ -30,6 +31,7 @@ from ..const import (
ATTR_DEVICES,
ATTR_DEVICETREE,
ATTR_DISCOVERY,
ATTR_DNS,
ATTR_DOCKER_API,
ATTR_FULL_ACCESS,
ATTR_GPIO,
@ -41,6 +43,7 @@ from ..const import (
ATTR_HOST_IPC,
ATTR_HOST_NETWORK,
ATTR_HOST_PID,
ATTR_HOSTNAME,
ATTR_ICON,
ATTR_INGRESS,
ATTR_INGRESS_ENTRY,
@ -56,6 +59,7 @@ from ..const import (
ATTR_MACHINE,
ATTR_MAINTAINER,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_PERCENT,
ATTR_MEMORY_USAGE,
ATTR_NAME,
ATTR_NETWORK,
@ -73,6 +77,7 @@ from ..const import (
ATTR_SOURCE,
ATTR_STATE,
ATTR_STDIN,
ATTR_UDEV,
ATTR_URL,
ATTR_VERSION,
ATTR_WEBUI,
@ -116,7 +121,7 @@ class APIAddons(CoreSysAttributes):
self, request: web.Request, check_installed: bool = True
) -> AnyAddon:
"""Return addon, throw an exception it it doesn't exist."""
addon_slug = request.match_info.get("addon")
addon_slug: str = request.match_info.get("addon")
# Lookup itself
if addon_slug == "self":
@ -175,11 +180,13 @@ class APIAddons(CoreSysAttributes):
@api_process
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return add-on information."""
addon = self._extract_addon(request, check_installed=False)
addon: AnyAddon = self._extract_addon(request, check_installed=False)
data = {
ATTR_NAME: addon.name,
ATTR_SLUG: addon.slug,
ATTR_HOSTNAME: addon.hostname,
ATTR_DNS: addon.dns,
ATTR_DESCRIPTON: addon.description,
ATTR_LONG_DESCRIPTION: addon.long_description,
ATTR_AUTO_UPDATE: None,
@ -220,6 +227,7 @@ class APIAddons(CoreSysAttributes):
ATTR_GPIO: addon.with_gpio,
ATTR_KERNEL_MODULES: addon.with_kernel_modules,
ATTR_DEVICETREE: addon.with_devicetree,
ATTR_UDEV: addon.with_udev,
ATTR_DOCKER_API: addon.access_docker_api,
ATTR_AUDIO: addon.with_audio,
ATTR_AUDIO_INPUT: None,
@ -256,12 +264,12 @@ class APIAddons(CoreSysAttributes):
@api_process
async def options(self, request: web.Request) -> None:
"""Store user options for add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
addon_schema = SCHEMA_OPTIONS.extend(
{vol.Optional(ATTR_OPTIONS): vol.Any(None, addon.schema)}
)
body = await api_validate(addon_schema, request)
body: Dict[str, Any] = await api_validate(addon_schema, request)
if ATTR_OPTIONS in body:
addon.options = body[ATTR_OPTIONS]
@ -284,8 +292,8 @@ class APIAddons(CoreSysAttributes):
@api_process
async def security(self, request: web.Request) -> None:
"""Store security options for add-on."""
addon = self._extract_addon(request)
body = await api_validate(SCHEMA_SECURITY, request)
addon: AnyAddon = self._extract_addon(request)
body: Dict[str, Any] = await api_validate(SCHEMA_SECURITY, request)
if ATTR_PROTECTED in body:
_LOGGER.warning("Protected flag changing for %s!", addon.slug)
@ -296,13 +304,14 @@ class APIAddons(CoreSysAttributes):
@api_process
async def stats(self, request: web.Request) -> Dict[str, Any]:
"""Return resource information."""
addon = self._extract_addon(request)
stats = await addon.stats()
addon: AnyAddon = self._extract_addon(request)
stats: DockerStats = await addon.stats()
return {
ATTR_CPU_PERCENT: stats.cpu_percent,
ATTR_MEMORY_USAGE: stats.memory_usage,
ATTR_MEMORY_LIMIT: stats.memory_limit,
ATTR_MEMORY_PERCENT: stats.memory_percent,
ATTR_NETWORK_RX: stats.network_rx,
ATTR_NETWORK_TX: stats.network_tx,
ATTR_BLK_READ: stats.blk_read,
@ -312,19 +321,19 @@ class APIAddons(CoreSysAttributes):
@api_process
def install(self, request: web.Request) -> Awaitable[None]:
"""Install add-on."""
addon = self._extract_addon(request, check_installed=False)
addon: AnyAddon = self._extract_addon(request, check_installed=False)
return asyncio.shield(addon.install())
@api_process
def uninstall(self, request: web.Request) -> Awaitable[None]:
"""Uninstall add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
return asyncio.shield(addon.uninstall())
@api_process
def start(self, request: web.Request) -> Awaitable[None]:
"""Start add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
# check options
options = addon.options
@ -338,13 +347,13 @@ class APIAddons(CoreSysAttributes):
@api_process
def stop(self, request: web.Request) -> Awaitable[None]:
"""Stop add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
return asyncio.shield(addon.stop())
@api_process
def update(self, request: web.Request) -> Awaitable[None]:
"""Update add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
if addon.latest_version == addon.version:
raise APIError("No update available!")
@ -354,13 +363,13 @@ class APIAddons(CoreSysAttributes):
@api_process
def restart(self, request: web.Request) -> Awaitable[None]:
"""Restart add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
return asyncio.shield(addon.restart())
@api_process
def rebuild(self, request: web.Request) -> Awaitable[None]:
"""Rebuild local build add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
if not addon.need_build:
raise APIError("Only local build addons are supported")
@ -369,13 +378,13 @@ class APIAddons(CoreSysAttributes):
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Awaitable[bytes]:
"""Return logs from add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
return addon.logs()
@api_process_raw(CONTENT_TYPE_PNG)
async def icon(self, request: web.Request) -> bytes:
"""Return icon from add-on."""
addon = self._extract_addon(request, check_installed=False)
addon: AnyAddon = self._extract_addon(request, check_installed=False)
if not addon.with_icon:
raise APIError("No icon found!")
@ -385,7 +394,7 @@ class APIAddons(CoreSysAttributes):
@api_process_raw(CONTENT_TYPE_PNG)
async def logo(self, request: web.Request) -> bytes:
"""Return logo from add-on."""
addon = self._extract_addon(request, check_installed=False)
addon: AnyAddon = self._extract_addon(request, check_installed=False)
if not addon.with_logo:
raise APIError("No logo found!")
@ -395,7 +404,7 @@ class APIAddons(CoreSysAttributes):
@api_process_raw(CONTENT_TYPE_TEXT)
async def changelog(self, request: web.Request) -> str:
"""Return changelog from add-on."""
addon = self._extract_addon(request, check_installed=False)
addon: AnyAddon = self._extract_addon(request, check_installed=False)
if not addon.with_changelog:
raise APIError("No changelog found!")
@ -405,7 +414,7 @@ class APIAddons(CoreSysAttributes):
@api_process
async def stdin(self, request: web.Request) -> None:
"""Write to stdin of add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
if not addon.with_stdin:
raise APIError("STDIN not supported by add-on")

89
hassio/api/dns.py Normal file
View File

@ -0,0 +1,89 @@
"""Init file for Hass.io DNS RESTful API."""
import asyncio
import logging
from typing import Any, Awaitable, Dict
from aiohttp import web
import voluptuous as vol
from ..const import (
ATTR_BLK_READ,
ATTR_BLK_WRITE,
ATTR_CPU_PERCENT,
ATTR_HOST,
ATTR_LATEST_VERSION,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_USAGE,
ATTR_MEMORY_PERCENT,
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
ATTR_SERVERS,
ATTR_VERSION,
CONTENT_TYPE_BINARY,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError
from ..validate import DNS_SERVER_LIST
from .utils import api_process, api_process_raw, api_validate
_LOGGER = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
SCHEMA_OPTIONS = vol.Schema({vol.Optional(ATTR_SERVERS): DNS_SERVER_LIST})
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})
class APICoreDNS(CoreSysAttributes):
"""Handle RESTful API for DNS functions."""
@api_process
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return DNS information."""
return {
ATTR_VERSION: self.sys_dns.version,
ATTR_LATEST_VERSION: self.sys_dns.latest_version,
ATTR_HOST: str(self.sys_docker.network.dns),
ATTR_SERVERS: self.sys_dns.servers,
}
@api_process
async def options(self, request: web.Request) -> None:
"""Set DNS options."""
body = await api_validate(SCHEMA_OPTIONS, request)
if ATTR_SERVERS in body:
self.sys_dns.servers = body[ATTR_SERVERS]
self.sys_dns.save_data()
@api_process
async def stats(self, request: web.Request) -> Dict[str, Any]:
"""Return resource information."""
stats = await self.sys_dns.stats()
return {
ATTR_CPU_PERCENT: stats.cpu_percent,
ATTR_MEMORY_USAGE: stats.memory_usage,
ATTR_MEMORY_LIMIT: stats.memory_limit,
ATTR_MEMORY_PERCENT: stats.memory_percent,
ATTR_NETWORK_RX: stats.network_rx,
ATTR_NETWORK_TX: stats.network_tx,
ATTR_BLK_READ: stats.blk_read,
ATTR_BLK_WRITE: stats.blk_write,
}
@api_process
async def update(self, request: web.Request) -> None:
"""Update DNS plugin."""
body = await api_validate(SCHEMA_VERSION, request)
version = body.get(ATTR_VERSION, self.sys_dns.latest_version)
if version == self.sys_dns.version:
raise APIError("Version {} is already in use".format(version))
await asyncio.shield(self.sys_dns.update(version))
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Awaitable[bytes]:
"""Return DNS Docker logs."""
return self.sys_dns.logs()

View File

@ -22,7 +22,9 @@ class APIHardware(CoreSysAttributes):
async def info(self, request):
"""Show hardware info."""
return {
ATTR_SERIAL: list(self.sys_hardware.serial_devices),
ATTR_SERIAL: list(
self.sys_hardware.serial_devices | self.sys_hardware.serial_by_id
),
ATTR_INPUT: list(self.sys_hardware.input_devices),
ATTR_DISK: list(self.sys_hardware.disk_devices),
ATTR_GPIO: list(self.sys_hardware.gpio_devices),

View File

@ -18,6 +18,7 @@ from ..const import (
ATTR_MACHINE,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_USAGE,
ATTR_MEMORY_PERCENT,
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
ATTR_PASSWORD,
@ -121,6 +122,7 @@ class APIHomeAssistant(CoreSysAttributes):
ATTR_CPU_PERCENT: stats.cpu_percent,
ATTR_MEMORY_USAGE: stats.memory_usage,
ATTR_MEMORY_LIMIT: stats.memory_limit,
ATTR_MEMORY_PERCENT: stats.memory_percent,
ATTR_NETWORK_RX: stats.network_rx,
ATTR_NETWORK_TX: stats.network_tx,
ATTR_BLK_READ: stats.blk_read,

View File

@ -25,6 +25,7 @@ from ..const import (
ATTR_LOGO,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_USAGE,
ATTR_MEMORY_PERCENT,
ATTR_NAME,
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
@ -140,6 +141,7 @@ class APISupervisor(CoreSysAttributes):
ATTR_CPU_PERCENT: stats.cpu_percent,
ATTR_MEMORY_USAGE: stats.memory_usage,
ATTR_MEMORY_LIMIT: stats.memory_limit,
ATTR_MEMORY_PERCENT: stats.memory_percent,
ATTR_NETWORK_RX: stats.network_rx,
ATTR_NETWORK_TX: stats.network_tx,
ATTR_BLK_READ: stats.blk_read,

View File

@ -10,6 +10,8 @@ from .utils.json import read_json_file
_LOGGER = logging.getLogger(__name__)
ARCH_JSON: Path = Path(__file__).parent.joinpath("data/arch.json")
MAP_CPU = {
"armv7": "armv7",
"armv6": "armhf",
@ -47,7 +49,7 @@ class CpuArch(CoreSysAttributes):
async def load(self) -> None:
"""Load data and initialize default arch."""
try:
arch_data = read_json_file(Path(__file__).parent.joinpath("arch.json"))
arch_data = read_json_file(ARCH_JSON)
except JsonFileError:
_LOGGER.warning("Can't read arch json")
return

View File

@ -11,11 +11,12 @@ from .addons import AddonManager
from .api import RestAPI
from .arch import CpuArch
from .auth import Auth
from .const import SOCKET_DOCKER
from .const import CHANNEL_DEV, SOCKET_DOCKER
from .core import HassIO
from .coresys import CoreSys
from .dbus import DBusManager
from .discovery import Discovery
from .dns import CoreDNS
from .hassos import HassOS
from .homeassistant import HomeAssistant
from .host import HostManager
@ -43,6 +44,7 @@ async def initialize_coresys():
# Initialize core objects
coresys.core = HassIO(coresys)
coresys.dns = CoreDNS(coresys)
coresys.arch = CpuArch(coresys)
coresys.auth = Auth(coresys)
coresys.updater = Updater(coresys)
@ -127,9 +129,21 @@ def initialize_system_data(coresys: CoreSys):
_LOGGER.info("Create Hass.io Apparmor folder %s", config.path_apparmor)
config.path_apparmor.mkdir()
# dns folder
if not config.path_dns.is_dir():
_LOGGER.info("Create Hass.io DNS folder %s", config.path_dns)
config.path_dns.mkdir()
# Update log level
coresys.config.modify_log_level()
# Check if ENV is in development mode
if bool(os.environ.get("SUPERVISOR_DEV", 0)):
_LOGGER.warning("SUPERVISOR_DEV is set")
coresys.updater.channel = CHANNEL_DEV
coresys.config.logging = "debug"
coresys.config.debug = True
def migrate_system_env(coresys: CoreSys):
"""Cleanup some stuff after update."""

View File

@ -34,6 +34,7 @@ BACKUP_DATA = PurePath("backup")
SHARE_DATA = PurePath("share")
TMP_DATA = PurePath("tmp")
APPARMOR_DATA = PurePath("apparmor")
DNS_DATA = PurePath("dns")
DEFAULT_BOOT_TIME = datetime.utcfromtimestamp(0).isoformat()
@ -99,7 +100,7 @@ class CoreConfig(JsonConfig):
def modify_log_level(self) -> None:
"""Change log level."""
lvl = getattr(logging, self.logging.upper())
logging.basicConfig(level=lvl)
logging.getLogger("hassio").setLevel(lvl)
@property
def last_boot(self):
@ -211,6 +212,16 @@ class CoreConfig(JsonConfig):
"""Return root share data folder external for Docker."""
return PurePath(self.path_extern_hassio, SHARE_DATA)
@property
def path_extern_dns(self):
"""Return dns path external for Docker."""
return str(PurePath(self.path_extern_hassio, DNS_DATA))
@property
def path_dns(self):
"""Return dns path inside supervisor."""
return Path(HASSIO_DATA, DNS_DATA)
@property
def addons_repositories(self):
"""Return list of custom Add-on repositories."""

View File

@ -3,7 +3,7 @@ from pathlib import Path
from ipaddress import ip_network
HASSIO_VERSION = "173"
HASSIO_VERSION = "174"
URL_HASSIO_ADDONS = "https://github.com/home-assistant/hassio-addons"
URL_HASSIO_VERSION = "https://version.home-assistant.io/{channel}.json"
@ -24,6 +24,7 @@ FILE_HASSIO_UPDATER = Path(HASSIO_DATA, "updater.json")
FILE_HASSIO_SERVICES = Path(HASSIO_DATA, "services.json")
FILE_HASSIO_DISCOVERY = Path(HASSIO_DATA, "discovery.json")
FILE_HASSIO_INGRESS = Path(HASSIO_DATA, "ingress.json")
FILE_HASSIO_DNS = Path(HASSIO_DATA, "dns.json")
SOCKET_DOCKER = Path("/var/run/docker.sock")
@ -31,6 +32,9 @@ DOCKER_NETWORK = "hassio"
DOCKER_NETWORK_MASK = ip_network("172.30.32.0/23")
DOCKER_NETWORK_RANGE = ip_network("172.30.33.0/24")
DNS_SERVERS = ["dns://8.8.8.8", "dns://1.1.1.1"]
DNS_SUFFIX = "local.hass.io"
LABEL_VERSION = "io.hass.version"
LABEL_ARCH = "io.hass.arch"
LABEL_TYPE = "io.hass.type"
@ -86,6 +90,7 @@ ATTR_VERSION_LATEST = "version_latest"
ATTR_AUTO_UART = "auto_uart"
ATTR_LAST_BOOT = "last_boot"
ATTR_LAST_VERSION = "last_version"
ATTR_LATEST_VERSION = "latest_version"
ATTR_CHANNEL = "channel"
ATTR_NAME = "name"
ATTR_SLUG = "slug"
@ -159,6 +164,7 @@ ATTR_NETWORK_RX = "network_rx"
ATTR_NETWORK_TX = "network_tx"
ATTR_MEMORY_LIMIT = "memory_limit"
ATTR_MEMORY_USAGE = "memory_usage"
ATTR_MEMORY_PERCENT = "memory_percent"
ATTR_BLK_READ = "blk_read"
ATTR_BLK_WRITE = "blk_write"
ATTR_ADDON = "addon"
@ -210,6 +216,9 @@ ATTR_ADMIN = "admin"
ATTR_PANELS = "panels"
ATTR_DEBUG = "debug"
ATTR_DEBUG_BLOCK = "debug_block"
ATTR_DNS = "dns"
ATTR_SERVERS = "servers"
ATTR_UDEV = "udev"
PROVIDE_SERVICE = "provide"
NEED_SERVICE = "need"

View File

@ -30,6 +30,9 @@ class HassIO(CoreSysAttributes):
async def setup(self):
"""Setup HassIO orchestration."""
# Load CoreDNS
await self.sys_dns.load()
# Load DBus
await self.sys_dbus.load()
@ -69,9 +72,6 @@ class HassIO(CoreSysAttributes):
# Load ingress
await self.sys_ingress.load()
# start dns forwarding
self.sys_create_task(self.sys_dns.start())
async def start(self):
"""Start Hass.io orchestration."""
await self.sys_api.start()
@ -142,10 +142,10 @@ class HassIO(CoreSysAttributes):
await asyncio.wait(
[
self.sys_api.stop(),
self.sys_dns.stop(),
self.sys_websession.close(),
self.sys_websession_ssl.close(),
self.sys_ingress.unload(),
self.sys_dns.unload(),
]
)
except asyncio.TimeoutError:
@ -176,6 +176,7 @@ class HassIO(CoreSysAttributes):
await self.sys_run_in_executor(self.sys_docker.repair)
# Restore core functionality
await self.sys_dns.repair()
await self.sys_addons.repair()
await self.sys_homeassistant.repair()

View File

@ -1,14 +1,13 @@
"""Handle core shared data."""
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Optional
import aiohttp
from .config import CoreConfig
from .const import CHANNEL_DEV
from .docker import DockerAPI
from .misc.dns import DNSForward
from .misc.hardware import Hardware
from .misc.scheduler import Scheduler
@ -20,6 +19,7 @@ if TYPE_CHECKING:
from .core import HassIO
from .dbus import DBusManager
from .discovery import Discovery
from .dns import CoreDNS
from .hassos import HassOS
from .homeassistant import HomeAssistant
from .host import HostManager
@ -52,26 +52,26 @@ class CoreSys:
self._hardware: Hardware = Hardware()
self._docker: DockerAPI = DockerAPI()
self._scheduler: Scheduler = Scheduler()
self._dns: DNSForward = DNSForward()
# Internal objects pointers
self._core: HassIO = None
self._arch: CpuArch = None
self._auth: Auth = None
self._homeassistant: HomeAssistant = None
self._supervisor: Supervisor = None
self._addons: AddonManager = None
self._api: RestAPI = None
self._updater: Updater = None
self._snapshots: SnapshotManager = None
self._tasks: Tasks = None
self._host: HostManager = None
self._ingress: Ingress = None
self._dbus: DBusManager = None
self._hassos: HassOS = None
self._services: ServiceManager = None
self._store: StoreManager = None
self._discovery: Discovery = None
self._core: Optional[HassIO] = None
self._arch: Optional[CpuArch] = None
self._auth: Optional[Auth] = None
self._dns: Optional[CoreDNS] = None
self._homeassistant: Optional[HomeAssistant] = None
self._supervisor: Optional[Supervisor] = None
self._addons: Optional[AddonManager] = None
self._api: Optional[RestAPI] = None
self._updater: Optional[Updater] = None
self._snapshots: Optional[SnapshotManager] = None
self._tasks: Optional[Tasks] = None
self._host: Optional[HostManager] = None
self._ingress: Optional[Ingress] = None
self._dbus: Optional[DBusManager] = None
self._hassos: Optional[HassOS] = None
self._services: Optional[ServiceManager] = None
self._store: Optional[StoreManager] = None
self._discovery: Optional[Discovery] = None
@property
def machine(self) -> str:
@ -125,11 +125,6 @@ class CoreSys:
"""Return Scheduler object."""
return self._scheduler
@property
def dns(self) -> DNSForward:
"""Return DNSForward object."""
return self._dns
@property
def core(self) -> HassIO:
"""Return HassIO object."""
@ -298,6 +293,18 @@ class CoreSys:
raise RuntimeError("DBusManager already set!")
self._dbus = value
@property
def dns(self) -> CoreDNS:
"""Return CoreDNS object."""
return self._dns
@dns.setter
def dns(self, value: CoreDNS):
"""Set a CoreDNS object."""
if self._dns:
raise RuntimeError("CoreDNS already set!")
self._dns = value
@property
def host(self) -> HostManager:
"""Return HostManager object."""
@ -395,11 +402,6 @@ class CoreSysAttributes:
"""Return Scheduler object."""
return self.coresys.scheduler
@property
def sys_dns(self) -> DNSForward:
"""Return DNSForward object."""
return self.coresys.dns
@property
def sys_core(self) -> HassIO:
"""Return HassIO object."""
@ -470,6 +472,11 @@ class CoreSysAttributes:
"""Return DBusManager object."""
return self.coresys.dbus
@property
def sys_dns(self) -> CoreDNS:
"""Return CoreDNS object."""
return self.coresys.dns
@property
def sys_host(self) -> HostManager:
"""Return HostManager object."""

9
hassio/data/coredns.tmpl Normal file
View File

@ -0,0 +1,9 @@
.:53 {
log
hosts /config/hosts {
fallthrough
}
forward . $servers {
health_check 10s
}
}

2
hassio/data/hosts.tmpl Normal file
View File

@ -0,0 +1,2 @@
$supervisor hassio supervisor.local.hass.io hassio.local.hass.io
$homeassistant homeassistant homeassistant.local.hass.io home-assistant.local.hass.io

305
hassio/dns.py Normal file
View File

@ -0,0 +1,305 @@
"""Home Assistant control object."""
import asyncio
import logging
from contextlib import suppress
from ipaddress import IPv4Address, AddressValueError
from pathlib import Path
from string import Template
from typing import Awaitable, Dict, List, Optional
from .const import ATTR_SERVERS, ATTR_VERSION, DNS_SERVERS, FILE_HASSIO_DNS, DNS_SUFFIX
from .coresys import CoreSys, CoreSysAttributes
from .docker.dns import DockerDNS
from .docker.stats import DockerStats
from .exceptions import CoreDNSError, CoreDNSUpdateError, DockerAPIError
from .misc.forwarder import DNSForward
from .utils.json import JsonConfig
from .validate import SCHEMA_DNS_CONFIG
_LOGGER = logging.getLogger(__name__)
COREDNS_TMPL: Path = Path(__file__).parents[0].joinpath("data/coredns.tmpl")
class CoreDNS(JsonConfig, CoreSysAttributes):
"""Home Assistant core object for handle it."""
def __init__(self, coresys: CoreSys):
"""Initialize hass object."""
super().__init__(FILE_HASSIO_DNS, SCHEMA_DNS_CONFIG)
self.coresys: CoreSys = coresys
self.instance: DockerDNS = DockerDNS(coresys)
self.forwarder: DNSForward = DNSForward()
self._hosts: Dict[IPv4Address, List[str]] = {}
@property
def corefile(self) -> Path:
"""Return Path to corefile."""
return Path(self.sys_config.path_dns, "corefile")
@property
def hosts(self) -> Path:
"""Return Path to corefile."""
return Path(self.sys_config.path_dns, "hosts")
@property
def servers(self) -> List[str]:
"""Return list of DNS servers."""
return self._data[ATTR_SERVERS]
@servers.setter
def servers(self, value: List[str]) -> None:
"""Return list of DNS servers."""
self._data[ATTR_SERVERS] = value
@property
def version(self) -> Optional[str]:
"""Return current version of DNS."""
return self._data.get(ATTR_VERSION)
@version.setter
def version(self, value: str) -> None:
"""Return current version of DNS."""
self._data[ATTR_VERSION] = value
@property
def latest_version(self) -> Optional[str]:
"""Return latest version of CoreDNS."""
return self.sys_updater.version_dns
@property
def in_progress(self) -> bool:
"""Return True if a task is in progress."""
return self.instance.in_progress
async def load(self) -> None:
"""Load DNS setup."""
with suppress(CoreDNSError):
self._import_hosts()
# Check CoreDNS state
try:
# Evaluate Version if we lost this information
if not self.version:
self.version = await self.instance.get_latest_version(key=int)
await self.instance.attach(tag=self.version)
except DockerAPIError:
_LOGGER.info(
"No CoreDNS plugin Docker image %s found.", self.instance.image
)
# Install CoreDNS
with suppress(CoreDNSError):
await self.install()
else:
self.version = self.instance.version
self.save_data()
# Start DNS forwarder
self.sys_create_task(self.forwarder.start(self.sys_docker.network.dns))
# Start is not Running
if await self.instance.is_running():
return
await self.start()
async def unload(self) -> None:
"""Unload DNS forwarder."""
await self.forwarder.stop()
async def install(self) -> None:
"""Install CoreDNS."""
_LOGGER.info("Setup CoreDNS plugin")
while True:
# read homeassistant tag and install it
if not self.latest_version:
await self.sys_updater.reload()
if self.latest_version:
with suppress(DockerAPIError):
await self.instance.install(self.latest_version)
break
_LOGGER.warning("Error on install CoreDNS plugin. Retry in 30sec")
await asyncio.sleep(30)
_LOGGER.info("CoreDNS plugin now installed")
self.version = self.instance.version
self.save_data()
await self.start()
async def update(self, version: Optional[str] = None) -> None:
"""Update CoreDNS plugin."""
version = version or self.latest_version
if version == self.version:
_LOGGER.warning("Version %s is already installed for CoreDNS", version)
return
try:
await self.instance.update(version)
except DockerAPIError:
_LOGGER.error("CoreDNS update fails")
raise CoreDNSUpdateError() from None
else:
# Cleanup
with suppress(DockerAPIError):
await self.instance.cleanup()
self.version = version
self.save_data()
# Start CoreDNS
await self.start()
async def restart(self) -> None:
"""Restart CoreDNS plugin."""
with suppress(DockerAPIError):
await self.instance.stop()
await self.start()
async def start(self) -> None:
"""Run CoreDNS."""
self._write_corefile()
# Start Instance
_LOGGER.info("Start CoreDNS plugin")
try:
await self.instance.run()
except DockerAPIError:
_LOGGER.error("Can't start CoreDNS plugin")
raise CoreDNSError() from None
def reset(self) -> None:
"""Reset Config / Hosts."""
self.servers = DNS_SERVERS
with suppress(OSError):
self.hosts.unlink()
self._import_hosts()
def _write_corefile(self) -> None:
"""Write CoreDNS config."""
try:
corefile_template: Template = Template(COREDNS_TMPL.read_text())
except OSError as err:
_LOGGER.error("Can't read coredns template file: %s", err)
raise CoreDNSError() from None
# Generate config file
dns_servers = self.servers + list(set(DNS_SERVERS) - set(self.servers))
data = corefile_template.safe_substitute(servers=" ".join(dns_servers))
try:
self.corefile.write_text(data)
except OSError as err:
_LOGGER.error("Can't update corefile: %s", err)
raise CoreDNSError() from None
def _import_hosts(self) -> None:
"""Import hosts entry."""
# Generate Default
if not self.hosts.exists():
self.add_host(self.sys_docker.network.supervisor, ["hassio", "supervisor"])
self.add_host(
self.sys_docker.network.gateway, ["homeassistant", "home-assistant"]
)
return
# Import Exists host table
try:
with self.hosts.open("r") as hosts:
for line in hosts.readlines():
try:
data = line.split(" ")
self._hosts[IPv4Address(data[0])] = data[1:]
except AddressValueError:
_LOGGER.warning("Fails to read %s", line)
except OSError as err:
_LOGGER.error("Can't read hosts file: %s", err)
raise CoreDNSError() from None
def _write_hosts(self) -> None:
"""Write hosts from memory to file."""
try:
with self.hosts.open("w") as hosts:
for address, hostnames in self._hosts.items():
host = " ".join(hostnames)
hosts.write(f"{address!s} {host}")
except OSError as err:
_LOGGER.error("Can't write hosts file: %s", err)
raise CoreDNSError() from None
def add_host(self, ipv4: IPv4Address, names: List[str]) -> None:
"""Add a new host entry."""
hostnames: List[str] = []
for name in names:
hostnames.append(name)
hostnames.append(f"{name}.{DNS_SUFFIX}")
self._hosts[ipv4] = hostnames
_LOGGER.debug("Add Host entry %s -> %s", ipv4, hostnames)
self._write_hosts()
def delete_host(
self, ipv4: Optional[IPv4Address] = None, host: Optional[str] = None
) -> None:
"""Remove a entry from hosts."""
if host:
for address, hostnames in self._hosts.items():
if host not in hostnames:
continue
ipv4 = address
break
# Remove entry
if ipv4:
_LOGGER.debug("Remove Host entry %s", ipv4)
self._hosts.pop(ipv4, None)
self._write_hosts()
else:
_LOGGER.warning("Can't remove Host entry: %s/%s", ipv4, host)
def logs(self) -> Awaitable[bytes]:
"""Get CoreDNS docker logs.
Return Coroutine.
"""
return self.instance.logs()
async def stats(self) -> DockerStats:
"""Return stats of CoreDNS."""
try:
return await self.instance.stats()
except DockerAPIError:
raise CoreDNSError() from None
def is_running(self) -> Awaitable[bool]:
"""Return True if Docker container is running.
Return a coroutine.
"""
return self.instance.is_running()
def is_fails(self) -> Awaitable[bool]:
"""Return True if a Docker container is fails state.
Return a coroutine.
"""
return self.instance.is_fails()
async def repair(self):
"""Repair CoreDNS plugin."""
if await self.instance.exists():
return
_LOGGER.info("Repair CoreDNS %s", self.version)
try:
await self.instance.install(self.version)
except DockerAPIError:
_LOGGER.error("Repairing of CoreDNS fails")

View File

@ -1,12 +1,13 @@
"""Init file for Hass.io Docker object."""
import logging
from contextlib import suppress
from ipaddress import IPv4Address
import logging
from typing import Any, Dict, Optional
import attr
import docker
from ..const import SOCKET_DOCKER
from ..const import SOCKET_DOCKER, DNS_SUFFIX
from ..exceptions import DockerAPIError
from .network import DockerNetwork
@ -50,7 +51,11 @@ class DockerAPI:
return self.docker.api
def run(
self, image: str, version: str = "latest", **kwargs: Dict[str, Any]
self,
image: str,
version: str = "latest",
ipv4: Optional[IPv4Address] = None,
**kwargs: Dict[str, Any],
) -> docker.models.containers.Container:
""""Create a Docker container and run it.
@ -60,12 +65,13 @@ class DockerAPI:
network_mode: str = kwargs.get("network_mode")
hostname: str = kwargs.get("hostname")
# Setup DNS
kwargs["dns"] = [str(self.network.dns)]
kwargs["dns_search"] = [DNS_SUFFIX]
kwargs["domainname"] = DNS_SUFFIX
# Setup network
kwargs["dns_search"] = ["."]
if network_mode:
kwargs["dns"] = [str(self.network.supervisor)]
kwargs["dns_opt"] = ["ndots:0"]
else:
if not network_mode:
kwargs["network"] = None
# Create container
@ -81,7 +87,7 @@ class DockerAPI:
if not network_mode:
alias = [hostname] if hostname else None
try:
self.network.attach_container(container, alias=alias)
self.network.attach_container(container, alias=alias, ipv4=ipv4)
except DockerAPIError:
_LOGGER.warning("Can't attach %s to hassio-net!", name)
else:

View File

@ -35,6 +35,7 @@ if TYPE_CHECKING:
_LOGGER = logging.getLogger(__name__)
AUDIO_DEVICE = "/dev/snd:/dev/snd:rwm"
NO_ADDDRESS = ip_address("0.0.0.0")
class DockerAddon(DockerInterface):
@ -62,7 +63,7 @@ class DockerAddon(DockerInterface):
self._meta["NetworkSettings"]["Networks"]["hassio"]["IPAddress"]
)
except (KeyError, TypeError, ValueError):
return ip_address("0.0.0.0")
return NO_ADDDRESS
@property
def timeout(self) -> int:
@ -100,11 +101,6 @@ class DockerAddon(DockerInterface):
"""Return True if full access is enabled."""
return not self.addon.protected and self.addon.with_full_access
@property
def hostname(self) -> str:
"""Return slug/id of add-on."""
return self.addon.slug.replace("_", "-")
@property
def environment(self) -> Dict[str, str]:
"""Return environment for Docker add-on."""
@ -139,7 +135,14 @@ class DockerAddon(DockerInterface):
# Auto mapping UART devices
if self.addon.auto_uart:
for device in self.sys_hardware.serial_devices:
if self.addon.with_udev:
serial_devs = self.sys_hardware.serial_devices
else:
serial_devs = (
self.sys_hardware.serial_devices | self.sys_hardware.serial_by_id
)
for device in serial_devs:
devices.append(f"{device}:{device}:rwm")
# Return None if no devices is present
@ -186,10 +189,7 @@ class DockerAddon(DockerInterface):
@property
def network_mapping(self) -> Dict[str, str]:
"""Return hosts mapping."""
return {
"homeassistant": self.sys_docker.network.gateway,
"hassio": self.sys_docker.network.supervisor,
}
return {"hassio": self.sys_docker.network.supervisor}
@property
def network_mode(self) -> Optional[str]:
@ -329,7 +329,7 @@ class DockerAddon(DockerInterface):
self.image,
version=self.addon.version,
name=self.name,
hostname=self.hostname,
hostname=self.addon.hostname,
detach=True,
init=True,
privileged=self.full_access,
@ -350,6 +350,9 @@ class DockerAddon(DockerInterface):
self._meta = docker_container.attrs
_LOGGER.info("Start Docker add-on %s with version %s", self.image, self.version)
# Write data to DNS server
self.sys_dns.add_host(ipv4=self.ip_address, names=[self.addon.hostname])
def _install(
self, tag: str, image: Optional[str] = None, latest: bool = False
) -> None:
@ -467,3 +470,12 @@ class DockerAddon(DockerInterface):
except OSError as err:
_LOGGER.error("Can't write to %s stdin: %s", self.name, err)
raise DockerAPIError() from None
def _stop(self, remove_container=True) -> None:
"""Stop/remove Docker container.
Need run inside executor.
"""
if self.ip_address != NO_ADDDRESS:
self.sys_dns.delete_host(ipv4=self.ip_address)
super()._stop(remove_container)

56
hassio/docker/dns.py Normal file
View File

@ -0,0 +1,56 @@
"""HassOS Cli docker object."""
from contextlib import suppress
import logging
from ..const import ENV_TIME
from ..coresys import CoreSysAttributes
from ..exceptions import DockerAPIError
from .interface import DockerInterface
_LOGGER = logging.getLogger(__name__)
DNS_DOCKER_NAME: str = "hassio_dns"
class DockerDNS(DockerInterface, CoreSysAttributes):
"""Docker Hass.io wrapper for Hass.io DNS."""
@property
def image(self) -> str:
"""Return name of Hass.io DNS image."""
return f"homeassistant/{self.sys_arch.supervisor}-hassio-dns"
@property
def name(self) -> str:
"""Return name of Docker container."""
return DNS_DOCKER_NAME
def _run(self) -> None:
"""Run Docker image.
Need run inside executor.
"""
if self._is_running():
return
# Cleanup
with suppress(DockerAPIError):
self._stop()
# Create & Run container
docker_container = self.sys_docker.run(
self.image,
version=self.sys_dns.version,
ipv4=self.sys_docker.network.dns,
name=self.name,
hostname=self.name.replace("_", "-"),
detach=True,
init=True,
environment={ENV_TIME: self.sys_timezone},
volumes={
str(self.sys_config.path_extern_dns): {"bind": "/config", "mode": "ro"}
},
)
self._meta = docker_container.attrs
_LOGGER.info("Start DNS %s with version %s", self.image, self.version)

View File

@ -1,10 +1,8 @@
"""Init file for Hass.io Docker object."""
from distutils.version import StrictVersion
from contextlib import suppress
from ipaddress import IPv4Address
import logging
import re
from typing import Awaitable, List, Optional
from typing import Awaitable, Optional
import docker
@ -15,7 +13,6 @@ from .interface import CommandReturn, DockerInterface
_LOGGER = logging.getLogger(__name__)
HASS_DOCKER_NAME = "homeassistant"
RE_VERSION = re.compile(r"(?P<version>\d+\.\d+\.\d+(?:b\d+|d\d+)?)")
class DockerHomeAssistant(DockerInterface):
@ -139,33 +136,3 @@ class DockerHomeAssistant(DockerInterface):
return False
return True
def get_latest_version(self) -> Awaitable[str]:
"""Return latest version of local Home Asssistant image."""
return self.sys_run_in_executor(self._get_latest_version)
def _get_latest_version(self) -> str:
"""Return latest version of local Home Asssistant image.
Need run inside executor.
"""
available_version: List[str] = []
try:
for image in self.sys_docker.images.list(self.image):
for tag in image.tags:
match = RE_VERSION.search(tag)
if not match:
continue
available_version.append(match.group("version"))
assert available_version
except (docker.errors.DockerException, AssertionError):
_LOGGER.warning("No local HA version found")
raise DockerAPIError()
else:
_LOGGER.debug("Found HA versions: %s", available_version)
# Sort version and return latest version
available_version.sort(key=StrictVersion, reverse=True)
return available_version[0]

View File

@ -2,16 +2,16 @@
import asyncio
from contextlib import suppress
import logging
from typing import Any, Dict, Optional, Awaitable
from typing import Any, Awaitable, Dict, List, Optional
import docker
from . import CommandReturn
from ..const import LABEL_ARCH, LABEL_VERSION
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import DockerAPIError
from ..utils import process_lock
from .stats import DockerStats
from . import CommandReturn
_LOGGER = logging.getLogger(__name__)
@ -50,7 +50,10 @@ class DockerInterface(CoreSysAttributes):
@property
def image(self) -> Optional[str]:
"""Return name of Docker image."""
return self.meta_config.get("Image")
try:
return self.meta_config["Image"].partition(":")[0]
except KeyError:
return None
@property
def version(self) -> Optional[str]:
@ -80,7 +83,6 @@ class DockerInterface(CoreSysAttributes):
Need run inside executor.
"""
image = image or self.image
image = image.partition(":")[0] # remove potential tag
_LOGGER.info("Pull image %s tag %s.", image, tag)
try:
@ -397,3 +399,35 @@ class DockerInterface(CoreSysAttributes):
return True
return False
def get_latest_version(self, key: Any = int) -> Awaitable[str]:
"""Return latest version of local Home Asssistant image."""
return self.sys_run_in_executor(self._get_latest_version, key)
def _get_latest_version(self, key: Any = int) -> str:
"""Return latest version of local Home Asssistant image.
Need run inside executor.
"""
available_version: List[str] = []
try:
for image in self.sys_docker.images.list(self.image):
for tag in image.tags:
version = tag.partition(":")[2]
try:
key(version)
except (AttributeError, ValueError):
continue
available_version.append(version)
assert available_version
except (docker.errors.DockerException, AssertionError):
_LOGGER.debug("No version found for %s", self.image)
raise DockerAPIError()
else:
_LOGGER.debug("Found HA versions: %s", available_version)
# Sort version and return latest version
available_version.sort(key=key, reverse=True)
return available_version[0]

View File

@ -42,6 +42,11 @@ class DockerNetwork:
"""Return supervisor of the network."""
return DOCKER_NETWORK_MASK[2]
@property
def dns(self) -> IPv4Address:
"""Return dns of the network."""
return DOCKER_NETWORK_MASK[3]
def _get_network(self) -> docker.models.networks.Network:
"""Get HassIO network."""
try:

View File

@ -20,6 +20,12 @@ class DockerStats:
self._memory_usage = 0
self._memory_limit = 0
# Calculate percent usage
if self._memory_limit != 0:
self._memory_percent = self._memory_usage / self._memory_limit * 100.0
else:
self._memory_percent = 0
with suppress(KeyError):
self._calc_cpu_percent(stats)
@ -39,13 +45,12 @@ class DockerStats:
stats["cpu_stats"]["system_cpu_usage"]
- stats["precpu_stats"]["system_cpu_usage"]
)
online_cpu = stats["cpu_stats"]["online_cpus"]
if online_cpu == 0.0:
online_cpu = len(stats["cpu_stats"]["cpu_usage"]["percpu_usage"])
if system_delta > 0.0 and cpu_delta > 0.0:
self._cpu = (
(cpu_delta / system_delta)
* len(stats["cpu_stats"]["cpu_usage"]["percpu_usage"])
* 100.0
)
self._cpu = (cpu_delta / system_delta) * online_cpu * 100.0
def _calc_network(self, networks):
"""Calculate Network IO stats."""
@ -64,7 +69,7 @@ class DockerStats:
@property
def cpu_percent(self):
"""Return CPU percent."""
return self._cpu
return round(self._cpu, 2)
@property
def memory_usage(self):
@ -76,6 +81,11 @@ class DockerStats:
"""Return memory limit."""
return self._memory_limit
@property
def memory_percent(self):
"""Return memory usage in percent."""
return round(self._memory_percent, 2)
@property
def network_rx(self):
"""Return network rx stats."""

View File

@ -38,7 +38,9 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
self._meta = docker_container.attrs
_LOGGER.info(
"Attach to Supervisor %s with version %s", self.image, self.version
"Attach to Supervisor %s with version %s",
self.image,
self.sys_supervisor.version,
)
# If already attach

View File

@ -54,6 +54,17 @@ class HassOSNotSupportedError(HassioNotSupportedError):
"""Function not supported by HassOS."""
# DNS
class CoreDNSError(HassioError):
"""CoreDNS exception."""
class CoreDNSUpdateError(CoreDNSError):
"""Error on update of a CoreDNS."""
# Addons

View File

@ -188,13 +188,13 @@ class HassOS(CoreSysAttributes):
try:
await self.instance.update(version, latest=True)
# Cleanup
with suppress(DockerAPIError):
await self.instance.cleanup()
except DockerAPIError:
_LOGGER.error("HassOS CLI update fails")
raise HassOSUpdateError() from None
else:
# Cleanup
with suppress(DockerAPIError):
await self.instance.cleanup()
async def repair_cli(self) -> None:
"""Repair CLI container."""

View File

@ -2,6 +2,7 @@
import asyncio
from contextlib import asynccontextmanager, suppress
from datetime import datetime, timedelta
from distutils.version import StrictVersion
from ipaddress import IPv4Address
import logging
import os
@ -79,7 +80,7 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
try:
# Evaluate Version if we lost this information
if not self.version:
self.version = await self.instance.get_latest_version()
self.version = await self.instance.get_latest_version(key=StrictVersion)
await self.instance.attach(tag=self.version)
except DockerAPIError:

View File

@ -15,6 +15,10 @@ _LOGGER = logging.getLogger(__name__)
DefaultConfig = attr.make_class("DefaultConfig", ["input", "output"])
AUDIODB_JSON: Path = Path(__file__).parents[1].joinpath("data/audiodb.json")
ASOUND_TMPL: Path = Path(__file__).parents[1].joinpath("data/asound.tmpl")
class AlsaAudio(CoreSysAttributes):
"""Handle Audio ALSA host data."""
@ -82,12 +86,8 @@ class AlsaAudio(CoreSysAttributes):
@staticmethod
def _audio_database():
"""Read local json audio data into dict."""
json_file = Path(__file__).parent.joinpath("data/audiodb.json")
try:
# pylint: disable=no-member
with json_file.open("r") as database:
return json.loads(database.read())
return json.loads(AUDIODB_JSON.read_text())
except (ValueError, OSError) as err:
_LOGGER.warning("Can't read audio DB: %s", err)
@ -122,11 +122,8 @@ class AlsaAudio(CoreSysAttributes):
alsa_output = alsa_output or self.default.output
# Read Template
asound_file = Path(__file__).parent.joinpath("data/asound.tmpl")
try:
# pylint: disable=no-member
with asound_file.open("r") as asound:
asound_data = asound.read()
asound_data = ASOUND_TMPL.read_text()
except OSError as err:
_LOGGER.error("Can't read asound.tmpl: %s", err)
return ""

View File

@ -2,12 +2,14 @@
import asyncio
import logging
import shlex
from ipaddress import IPv4Address
from typing import Optional
import async_timeout
_LOGGER = logging.getLogger(__name__)
COMMAND = "socat UDP-RECVFROM:53,fork UDP-SENDTO:127.0.0.11:53"
COMMAND = "socat UDP-RECVFROM:53,fork UDP-SENDTO:{!s}"
class DNSForward:
@ -15,13 +17,13 @@ class DNSForward:
def __init__(self):
"""Initialize DNS forwarding."""
self.proc = None
self.proc: Optional[asyncio.Process] = None
async def start(self):
async def start(self, dns_server: IPv4Address) -> None:
"""Start DNS forwarding."""
try:
self.proc = await asyncio.create_subprocess_exec(
*shlex.split(COMMAND),
*shlex.split(COMMAND.format(dns_server)),
stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL,
@ -29,9 +31,9 @@ class DNSForward:
except OSError as err:
_LOGGER.error("Can't start DNS forwarding: %s", err)
else:
_LOGGER.info("Start DNS port forwarding for host add-ons")
_LOGGER.info("Start DNS port forwarding to %s", dns_server)
async def stop(self):
async def stop(self) -> None:
"""Stop DNS forwarding."""
if not self.proc:
_LOGGER.warning("DNS forwarding is not running!")

View File

@ -3,25 +3,26 @@ from datetime import datetime
import logging
from pathlib import Path
import re
from typing import Any, Dict, Optional, Set
import pyudev
from ..const import ATTR_NAME, ATTR_TYPE, ATTR_DEVICES, CHAN_ID, CHAN_TYPE
from ..const import ATTR_DEVICES, ATTR_NAME, ATTR_TYPE, CHAN_ID, CHAN_TYPE
_LOGGER = logging.getLogger(__name__)
ASOUND_CARDS = Path("/proc/asound/cards")
RE_CARDS = re.compile(r"(\d+) \[(\w*) *\]: (.*\w)")
ASOUND_CARDS: Path = Path("/proc/asound/cards")
RE_CARDS: re.Pattern = re.compile(r"(\d+) \[(\w*) *\]: (.*\w)")
ASOUND_DEVICES = Path("/proc/asound/devices")
RE_DEVICES = re.compile(r"\[.*(\d+)- (\d+).*\]: ([\w ]*)")
ASOUND_DEVICES: Path = Path("/proc/asound/devices")
RE_DEVICES: re.Pattern = re.compile(r"\[.*(\d+)- (\d+).*\]: ([\w ]*)")
PROC_STAT = Path("/proc/stat")
RE_BOOT_TIME = re.compile(r"btime (\d+)")
PROC_STAT: Path = Path("/proc/stat")
RE_BOOT_TIME: re.Pattern = re.compile(r"btime (\d+)")
GPIO_DEVICES = Path("/sys/class/gpio")
SOC_DEVICES = Path("/sys/devices/platform/soc")
RE_TTY = re.compile(r"tty[A-Z]+")
GPIO_DEVICES: Path = Path("/sys/class/gpio")
SOC_DEVICES: Path = Path("/sys/devices/platform/soc")
RE_TTY: re.Pattern = re.compile(r"tty[A-Z]+")
class Hardware:
@ -32,13 +33,21 @@ class Hardware:
self.context = pyudev.Context()
@property
def serial_devices(self):
def serial_devices(self) -> Set[str]:
"""Return all serial and connected devices."""
dev_list = set()
dev_list: Set[str] = set()
for device in self.context.list_devices(subsystem="tty"):
if "ID_VENDOR" in device.properties or RE_TTY.search(device.device_node):
dev_list.add(device.device_node)
return dev_list
@property
def serial_by_id(self) -> Set[str]:
"""Return all /dev/serial/by-id for serial devices."""
dev_list: Set[str] = set()
for device in self.context.list_devices(subsystem="tty"):
if "ID_VENDOR" in device.properties or RE_TTY.search(device.device_node):
# Add /dev/serial/by-id devlink for current device
for dev_link in device.device_links:
if not dev_link.startswith("/dev/serial/by-id"):
@ -48,9 +57,9 @@ class Hardware:
return dev_list
@property
def input_devices(self):
def input_devices(self) -> Set[str]:
"""Return all input devices."""
dev_list = set()
dev_list: Set[str] = set()
for device in self.context.list_devices(subsystem="input"):
if "NAME" in device.properties:
dev_list.add(device.properties["NAME"].replace('"', ""))
@ -58,9 +67,9 @@ class Hardware:
return dev_list
@property
def disk_devices(self):
def disk_devices(self) -> Set[str]:
"""Return all disk devices."""
dev_list = set()
dev_list: Set[str] = set()
for device in self.context.list_devices(subsystem="block"):
if "ID_NAME" in device.properties:
dev_list.add(device.device_node)
@ -68,15 +77,15 @@ class Hardware:
return dev_list
@property
def support_audio(self):
def support_audio(self) -> bool:
"""Return True if the system have audio support."""
return bool(self.audio_devices)
@property
def audio_devices(self):
def audio_devices(self) -> Dict[str, Any]:
"""Return all available audio interfaces."""
if not ASOUND_CARDS.exists():
_LOGGER.debug("No audio devices found")
_LOGGER.info("No audio devices found")
return {}
try:
@ -86,7 +95,7 @@ class Hardware:
_LOGGER.error("Can't read asound data: %s", err)
return {}
audio_list = {}
audio_list: Dict[str, Any] = {}
# parse cards
for match in RE_CARDS.finditer(cards):
@ -109,31 +118,31 @@ class Hardware:
return audio_list
@property
def support_gpio(self):
def support_gpio(self) -> bool:
"""Return True if device support GPIOs."""
return SOC_DEVICES.exists() and GPIO_DEVICES.exists()
@property
def gpio_devices(self):
def gpio_devices(self) -> Set[str]:
"""Return list of GPIO interface on device."""
dev_list = set()
dev_list: Set[str] = set()
for interface in GPIO_DEVICES.glob("gpio*"):
dev_list.add(interface.name)
return dev_list
@property
def last_boot(self):
def last_boot(self) -> Optional[str]:
"""Return last boot time."""
try:
with PROC_STAT.open("r") as stat_file:
stats = stat_file.read()
stats: str = stat_file.read()
except OSError as err:
_LOGGER.error("Can't read stat data: %s", err)
return None
# parse stat file
found = RE_BOOT_TIME.search(stats)
found: Optional[re.Match] = RE_BOOT_TIME.search(stats)
if not found:
_LOGGER.error("Can't found last boot time!")
return None

View File

@ -3,7 +3,7 @@ import asyncio
import logging
from .coresys import CoreSysAttributes
from .exceptions import HomeAssistantError
from .exceptions import HomeAssistantError, CoreDNSError
_LOGGER = logging.getLogger(__name__)
@ -22,6 +22,8 @@ RUN_RELOAD_INGRESS = 930
RUN_WATCHDOG_HOMEASSISTANT_DOCKER = 15
RUN_WATCHDOG_HOMEASSISTANT_API = 300
RUN_WATCHDOG_DNS_DOCKER = 20
class Tasks(CoreSysAttributes):
"""Handle Tasks inside Hass.io."""
@ -83,6 +85,11 @@ class Tasks(CoreSysAttributes):
self._watchdog_homeassistant_api, RUN_WATCHDOG_HOMEASSISTANT_API
)
)
self.jobs.add(
self.sys_scheduler.register_task(
self._watchdog_dns_docker, RUN_WATCHDOG_DNS_DOCKER
)
)
_LOGGER.info("All core tasks are scheduled")
@ -194,3 +201,19 @@ class Tasks(CoreSysAttributes):
_LOGGER.info("Found new HassOS CLI version")
await self.sys_hassos.update_cli()
async def _watchdog_dns_docker(self):
"""Check running state of Docker and start if they is close."""
# if Home Assistant is active
if await self.sys_dns.is_running():
return
_LOGGER.warning("Watchdog found a problem with CoreDNS plugin!")
if await self.sys_dns.is_fails():
_LOGGER.warning("CoreDNS plugin is in fails state / Reset config")
self.sys_dns.reset()
try:
await self.sys_dns.start()
except CoreDNSError:
_LOGGER.error("Watchdog CoreDNS reanimation fails!")

View File

@ -4,23 +4,25 @@ from contextlib import suppress
from datetime import timedelta
import json
import logging
from typing import Optional
import aiohttp
from .const import (
URL_HASSIO_VERSION,
FILE_HASSIO_UPDATER,
ATTR_HOMEASSISTANT,
ATTR_HASSIO,
ATTR_CHANNEL,
ATTR_DNS,
ATTR_HASSIO,
ATTR_HASSOS,
ATTR_HASSOS_CLI,
ATTR_HOMEASSISTANT,
FILE_HASSIO_UPDATER,
URL_HASSIO_VERSION,
)
from .coresys import CoreSysAttributes
from .exceptions import HassioUpdaterError
from .utils import AsyncThrottle
from .utils.json import JsonConfig
from .validate import SCHEMA_UPDATER_CONFIG
from .exceptions import HassioUpdaterError
_LOGGER = logging.getLogger(__name__)
@ -33,43 +35,48 @@ class Updater(JsonConfig, CoreSysAttributes):
super().__init__(FILE_HASSIO_UPDATER, SCHEMA_UPDATER_CONFIG)
self.coresys = coresys
async def load(self):
async def load(self) -> None:
"""Update internal data."""
with suppress(HassioUpdaterError):
await self.fetch_data()
async def reload(self):
async def reload(self) -> None:
"""Update internal data."""
with suppress(HassioUpdaterError):
await self.fetch_data()
@property
def version_homeassistant(self):
"""Return last version of Home Assistant."""
def version_homeassistant(self) -> Optional[str]:
"""Return latest version of Home Assistant."""
return self._data.get(ATTR_HOMEASSISTANT)
@property
def version_hassio(self):
"""Return last version of Hass.io."""
def version_hassio(self) -> Optional[str]:
"""Return latest version of Hass.io."""
return self._data.get(ATTR_HASSIO)
@property
def version_hassos(self):
"""Return last version of HassOS."""
def version_hassos(self) -> Optional[str]:
"""Return latest version of HassOS."""
return self._data.get(ATTR_HASSOS)
@property
def version_hassos_cli(self):
"""Return last version of HassOS cli."""
def version_hassos_cli(self) -> Optional[str]:
"""Return latest version of HassOS cli."""
return self._data.get(ATTR_HASSOS_CLI)
@property
def channel(self):
def version_dns(self) -> Optional[str]:
"""Return latest version of Hass.io DNS."""
return self._data.get(ATTR_DNS)
@property
def channel(self) -> str:
"""Return upstream channel of Hass.io instance."""
return self._data[ATTR_CHANNEL]
@channel.setter
def channel(self, value):
def channel(self, value: str):
"""Set upstream mode."""
self._data[ATTR_CHANNEL] = value
@ -104,6 +111,7 @@ class Updater(JsonConfig, CoreSysAttributes):
try:
# update supervisor version
self._data[ATTR_HASSIO] = data["supervisor"]
self._data[ATTR_DNS] = data["dns"]
# update Home Assistant version
self._data[ATTR_HOMEASSISTANT] = data["homeassistant"][machine]

View File

@ -142,6 +142,7 @@ class DBus:
data = await self._send(command)
# Parse and return data
_LOGGER.debug("Receive from %s: %s", method, data)
return self.parse_gvariant(data)
async def get_properties(self, interface):

View File

@ -11,6 +11,7 @@ from .const import (
ATTR_CHANNEL,
ATTR_DEBUG,
ATTR_DEBUG_BLOCK,
ATTR_DNS,
ATTR_HASSIO,
ATTR_HASSOS,
ATTR_HASSOS_CLI,
@ -23,6 +24,7 @@ from .const import (
ATTR_PORT,
ATTR_PORTS,
ATTR_REFRESH_TOKEN,
ATTR_SERVERS,
ATTR_SESSION,
ATTR_SSL,
ATTR_TIMEZONE,
@ -33,11 +35,13 @@ from .const import (
CHANNEL_BETA,
CHANNEL_DEV,
CHANNEL_STABLE,
DNS_SERVERS,
)
from .utils.validate import validate_timezone
RE_REPOSITORY = re.compile(r"^(?P<url>[^#]+)(?:#(?P<branch>[\w\-]+))?$")
# pylint: disable=no-value-for-parameter
NETWORK_PORT = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
WAIT_BOOT = vol.All(vol.Coerce(int), vol.Range(min=1, max=60))
DOCKER_IMAGE = vol.Match(r"^[\w{}]+/[\-\w{}]+$")
@ -47,6 +51,7 @@ UUID_MATCH = vol.Match(r"^[0-9a-f]{32}$")
SHA256 = vol.Match(r"^[0-9a-f]{64}$")
TOKEN = vol.Match(r"^[0-9a-f]{32,256}$")
LOG_LEVEL = vol.In(["debug", "info", "warning", "error", "critical"])
DNS_SERVER_LIST = vol.All([vol.Url()], vol.Length(max=8))
def validate_repository(repository):
@ -108,6 +113,7 @@ SCHEMA_UPDATER_CONFIG = vol.Schema(
vol.Optional(ATTR_HASSIO): vol.Coerce(str),
vol.Optional(ATTR_HASSOS): vol.Coerce(str),
vol.Optional(ATTR_HASSOS_CLI): vol.Coerce(str),
vol.Optional(ATTR_DNS): vol.Coerce(str),
},
extra=vol.REMOVE_EXTRA,
)
@ -145,3 +151,12 @@ SCHEMA_INGRESS_CONFIG = vol.Schema(
},
extra=vol.REMOVE_EXTRA,
)
SCHEMA_DNS_CONFIG = vol.Schema(
{
vol.Optional(ATTR_VERSION): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_SERVERS, default=DNS_SERVERS): DNS_SERVER_LIST,
},
extra=vol.REMOVE_EXTRA,
)

View File

@ -6,9 +6,9 @@ colorlog==4.0.2
cpe==1.2.1
cryptography==2.7
docker==4.0.2
gitpython==2.1.13
gitpython==3.0.0
pytz==2019.2
pyudev==0.21.0
uvloop==0.12.2
voluptuous==0.11.5
ptvsd==4.3.0
voluptuous==0.11.7
ptvsd==4.3.2

102
scripts/test_env.sh Executable file
View File

@ -0,0 +1,102 @@
#!/bin/bash
set -eE
DOCKER_TIMEOUT=30
DOCKER_PID=0
function start_docker() {
local starttime
local endtime
echo "Starting docker."
dockerd 2> /dev/null &
DOCKER_PID=$!
echo "Waiting for docker to initialize..."
starttime="$(date +%s)"
endtime="$(date +%s)"
until docker info >/dev/null 2>&1; do
if [ $((endtime - starttime)) -le $DOCKER_TIMEOUT ]; then
sleep 1
endtime=$(date +%s)
else
echo "Timeout while waiting for docker to come up"
exit 1
fi
done
echo "Docker was initialized"
}
function stop_docker() {
local starttime
local endtime
echo "Stopping in container docker..."
if [ "$DOCKER_PID" -gt 0 ] && kill -0 "$DOCKER_PID" 2> /dev/null; then
starttime="$(date +%s)"
endtime="$(date +%s)"
# Now wait for it to die
kill "$DOCKER_PID"
while kill -0 "$DOCKER_PID" 2> /dev/null; do
if [ $((endtime - starttime)) -le $DOCKER_TIMEOUT ]; then
sleep 1
endtime=$(date +%s)
else
echo "Timeout while waiting for container docker to die"
exit 1
fi
done
else
echo "Your host might have been left with unreleased resources"
fi
}
function build_supervisor() {
docker pull homeassistant/amd64-builder:dev
docker run --rm --privileged \
-v /run/docker.sock:/run/docker.sock -v "$(pwd):/data" \
homeassistant/amd64-builder:dev \
--supervisor 3.7-alpine3.10 --version dev \
-t /data --test --amd64 \
--no-cache --docker-hub homeassistant
}
function install_cli() {
docker pull homeassistant/amd64-hassio-cli:dev
}
function setup_test_env() {
mkdir -p /workspaces/test_hassio
docker run --rm --privileged \
--name hassio_supervisor \
--security-opt seccomp=unconfined \
--security-opt apparmor:unconfined \
-v /run/docker.sock:/run/docker.sock \
-v /run/dbus:/run/dbus \
-v "/workspaces/test_hassio":/data \
-v /etc/machine-id:/etc/machine-id:ro \
-e SUPERVISOR_SHARE="/workspaces/test_hassio" \
-e SUPERVISOR_NAME=hassio_supervisor \
-e SUPERVISOR_DEV=1 \
-e HOMEASSISTANT_REPOSITORY="homeassistant/qemux86-64-homeassistant" \
homeassistant/amd64-hassio-supervisor:latest
}
echo "Start Test-Env"
start_docker
trap "stop_docker" ERR
build_supervisor
install_cli
setup_test_env
stop_docker

View File