Merge pull request #1421 from home-assistant/dev

Release 194
This commit is contained in:
Pascal Vizeli 2020-01-07 21:09:50 +01:00 committed by GitHub
commit d1b4521290
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 662 additions and 533 deletions

654
API.md
View File

@ -8,8 +8,8 @@ On error / Code 400:
```json
{
"result": "error",
"message": ""
"result": "error",
"message": ""
}
```
@ -17,8 +17,8 @@ On success / Code 200:
```json
{
"result": "ok",
"data": { }
"result": "ok",
"data": {}
}
```
@ -36,32 +36,30 @@ The addons from `addons` are only installed one.
```json
{
"version": "INSTALL_VERSION",
"last_version": "LAST_VERSION",
"arch": "armhf|aarch64|i386|amd64",
"channel": "stable|beta|dev",
"timezone": "TIMEZONE",
"logging": "debug|info|warning|error|critical",
"ip_address": "ip address",
"wait_boot": "int",
"debug": "bool",
"debug_block": "bool",
"addons": [
{
"name": "xy bla",
"slug": "xy",
"description": "description",
"repository": "12345678|null",
"version": "LAST_VERSION",
"installed": "INSTALL_VERSION",
"icon": "bool",
"logo": "bool",
"state": "started|stopped",
}
],
"addons_repositories": [
"REPO_URL"
]
"version": "INSTALL_VERSION",
"last_version": "LAST_VERSION",
"arch": "armhf|aarch64|i386|amd64",
"channel": "stable|beta|dev",
"timezone": "TIMEZONE",
"logging": "debug|info|warning|error|critical",
"ip_address": "ip address",
"wait_boot": "int",
"debug": "bool",
"debug_block": "bool",
"addons": [
{
"name": "xy bla",
"slug": "xy",
"description": "description",
"repository": "12345678|null",
"version": "LAST_VERSION",
"installed": "INSTALL_VERSION",
"icon": "bool",
"logo": "bool",
"state": "started|stopped"
}
],
"addons_repositories": ["REPO_URL"]
}
```
@ -71,7 +69,7 @@ Optional:
```json
{
"version": "VERSION"
"version": "VERSION"
}
```
@ -79,15 +77,13 @@ Optional:
```json
{
"channel": "stable|beta|dev",
"timezone": "TIMEZONE",
"wait_boot": "int",
"debug": "bool",
"debug_block": "bool",
"logging": "debug|info|warning|error|critical",
"addons_repositories": [
"REPO_URL"
]
"channel": "stable|beta|dev",
"timezone": "TIMEZONE",
"wait_boot": "int",
"debug": "bool",
"debug_block": "bool",
"logging": "debug|info|warning|error|critical",
"addons_repositories": ["REPO_URL"]
}
```
@ -100,16 +96,17 @@ Reload addons/version.
Output is the raw docker log.
- GET `/supervisor/stats`
```json
{
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
"blk_write": 0
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
"blk_write": 0
}
```
@ -123,15 +120,15 @@ Repair overlayfs issue and restore lost images
```json
{
"snapshots": [
{
"slug": "SLUG",
"date": "ISO",
"name": "Custom name",
"type": "full|partial",
"protected": "bool"
}
]
"snapshots": [
{
"slug": "SLUG",
"date": "ISO",
"name": "Custom name",
"type": "full|partial",
"protected": "bool"
}
]
}
```
@ -140,9 +137,10 @@ Repair overlayfs issue and restore lost images
- POST `/snapshots/new/upload`
return:
```json
{
"slug": ""
"slug": ""
}
```
@ -150,15 +148,16 @@ return:
```json
{
"name": "Optional",
"password": "Optional"
"name": "Optional",
"password": "Optional"
}
```
return:
```json
{
"slug": ""
"slug": ""
}
```
@ -166,17 +165,18 @@ return:
```json
{
"name": "Optional",
"addons": ["ADDON_SLUG"],
"folders": ["FOLDER_NAME"],
"password": "Optional"
"name": "Optional",
"addons": ["ADDON_SLUG"],
"folders": ["FOLDER_NAME"],
"password": "Optional"
}
```
return:
```json
{
"slug": ""
"slug": ""
}
```
@ -186,23 +186,23 @@ return:
```json
{
"slug": "SNAPSHOT ID",
"type": "full|partial",
"name": "custom snapshot name / description",
"date": "ISO",
"size": "SIZE_IN_MB",
"protected": "bool",
"homeassistant": "version",
"addons": [
{
"slug": "ADDON_SLUG",
"name": "NAME",
"version": "INSTALLED_VERSION",
"size": "SIZE_IN_MB"
}
],
"repositories": ["URL"],
"folders": ["NAME"]
"slug": "SNAPSHOT ID",
"type": "full|partial",
"name": "custom snapshot name / description",
"date": "ISO",
"size": "SIZE_IN_MB",
"protected": "bool",
"homeassistant": "version",
"addons": [
{
"slug": "ADDON_SLUG",
"name": "NAME",
"version": "INSTALLED_VERSION",
"size": "SIZE_IN_MB"
}
],
"repositories": ["URL"],
"folders": ["NAME"]
}
```
@ -214,7 +214,7 @@ return:
```json
{
"password": "Optional"
"password": "Optional"
}
```
@ -222,10 +222,10 @@ return:
```json
{
"homeassistant": "bool",
"addons": ["ADDON_SLUG"],
"folders": ["FOLDER_NAME"],
"password": "Optional"
"homeassistant": "bool",
"addons": ["ADDON_SLUG"],
"folders": ["FOLDER_NAME"],
"password": "Optional"
}
```
@ -241,13 +241,13 @@ return:
```json
{
"hostname": "hostname|null",
"features": ["shutdown", "reboot", "hostname", "services", "hassos"],
"operating_system": "HassOS XY|Ubuntu 16.4|null",
"kernel": "4.15.7|null",
"chassis": "specific|null",
"deployment": "stable|beta|dev|null",
"cpe": "xy|null",
"hostname": "hostname|null",
"features": ["shutdown", "reboot", "hostname", "services", "hassos"],
"operating_system": "HassOS XY|Ubuntu 16.4|null",
"kernel": "4.15.7|null",
"chassis": "specific|null",
"deployment": "stable|beta|dev|null",
"cpe": "xy|null"
}
```
@ -255,7 +255,7 @@ return:
```json
{
"hostname": "",
"hostname": ""
}
```
@ -264,15 +264,16 @@ return:
#### Services
- GET `/host/services`
```json
{
"services": [
{
"name": "xy.service",
"description": "XY ...",
"state": "active|"
}
]
"services": [
{
"name": "xy.service",
"description": "XY ...",
"state": "active|"
}
]
}
```
@ -285,27 +286,31 @@ return:
### HassOS
- GET `/hassos/info`
```json
{
"version": "2.3",
"version_cli": "7",
"version_latest": "2.4",
"version_cli_latest": "8",
"board": "ova|rpi"
"version": "2.3",
"version_cli": "7",
"version_latest": "2.4",
"version_cli_latest": "8",
"board": "ova|rpi",
"boot": "rauc boot slot"
}
```
- POST `/hassos/update`
```json
{
"version": "optional"
"version": "optional"
}
```
- POST `/hassos/update/cli`
```json
{
"version": "optional"
"version": "optional"
}
```
@ -316,6 +321,7 @@ Load host configs from a USB stick.
### Hardware
- GET `/hardware/info`
```json
{
"serial": ["/dev/xy"],
@ -336,17 +342,18 @@ Load host configs from a USB stick.
```
- GET `/hardware/audio`
```json
{
"audio": {
"input": {
"0,0": "Mic"
},
"output": {
"1,0": "Jack",
"1,1": "HDMI"
}
"audio": {
"input": {
"0,0": "Mic"
},
"output": {
"1,0": "Jack",
"1,1": "HDMI"
}
}
}
```
@ -360,18 +367,18 @@ Trigger an udev reload
```json
{
"version": "INSTALL_VERSION",
"last_version": "LAST_VERSION",
"arch": "arch",
"machine": "Image machine type",
"ip_address": "ip address",
"image": "str",
"custom": "bool -> if custom image",
"boot": "bool",
"port": 8123,
"ssl": "bool",
"watchdog": "bool",
"wait_boot": 600
"version": "INSTALL_VERSION",
"last_version": "LAST_VERSION",
"arch": "arch",
"machine": "Image machine type",
"ip_address": "ip address",
"image": "str",
"custom": "bool -> if custom image",
"boot": "bool",
"port": 8123,
"ssl": "bool",
"watchdog": "bool",
"wait_boot": 600
}
```
@ -381,7 +388,7 @@ Optional:
```json
{
"version": "VERSION"
"version": "VERSION"
}
```
@ -399,14 +406,14 @@ Output is the raw Docker log.
```json
{
"image": "Optional|null",
"last_version": "Optional for custom image|null",
"port": "port for access hass",
"ssl": "bool",
"password": "",
"refresh_token": "",
"watchdog": "bool",
"wait_boot": 600
"image": "Optional|null",
"last_version": "Optional for custom image|null",
"port": "port for access hass",
"ssl": "bool",
"password": "",
"refresh_token": "",
"watchdog": "bool",
"wait_boot": 600
}
```
@ -421,16 +428,17 @@ Proxy to real home-assistant instance.
Proxy to real websocket instance.
- GET `/homeassistant/stats`
```json
{
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
"blk_write": 0
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
"blk_write": 0
}
```
@ -444,31 +452,31 @@ Get all available addons.
```json
{
"addons": [
{
"name": "xy bla",
"slug": "xy",
"description": "description",
"repository": "core|local|REP_ID",
"version": "LAST_VERSION",
"installed": "none|INSTALL_VERSION",
"detached": "bool",
"available": "bool",
"build": "bool",
"url": "null|url",
"icon": "bool",
"logo": "bool"
}
],
"repositories": [
{
"slug": "12345678",
"name": "Repitory Name|unknown",
"source": "URL_OF_REPOSITORY",
"url": "WEBSITE|REPOSITORY",
"maintainer": "BLA BLU <fla@dld.ch>|unknown"
}
]
"addons": [
{
"name": "xy bla",
"slug": "xy",
"description": "description",
"repository": "core|local|REP_ID",
"version": "LAST_VERSION",
"installed": "none|INSTALL_VERSION",
"detached": "bool",
"available": "bool",
"build": "bool",
"url": "null|url",
"icon": "bool",
"logo": "bool"
}
],
"repositories": [
{
"slug": "12345678",
"name": "Repitory Name|unknown",
"source": "URL_OF_REPOSITORY",
"url": "WEBSITE|REPOSITORY",
"maintainer": "BLA BLU <fla@dld.ch>|unknown"
}
]
}
```
@ -477,64 +485,64 @@ Get all available addons.
```json
{
"name": "xy bla",
"slug": "xdssd_xybla",
"hostname": "xdssd-xybla",
"dns": [],
"description": "description",
"long_description": "null|markdown",
"auto_update": "bool",
"url": "null|url of addon",
"detached": "bool",
"available": "bool",
"arch": ["armhf", "aarch64", "i386", "amd64"],
"machine": "[raspberrypi2, tinker]",
"homeassistant": "null|min Home Assistant version",
"repository": "12345678|null",
"version": "null|VERSION_INSTALLED",
"last_version": "LAST_VERSION",
"state": "none|started|stopped",
"boot": "auto|manual",
"build": "bool",
"options": "{}",
"network": "{}|null",
"network_description": "{}|null",
"host_network": "bool",
"host_pid": "bool",
"host_ipc": "bool",
"host_dbus": "bool",
"privileged": ["NET_ADMIN", "SYS_ADMIN"],
"apparmor": "disable|default|profile",
"devices": ["/dev/xy"],
"udev": "bool",
"auto_uart": "bool",
"icon": "bool",
"logo": "bool",
"changelog": "bool",
"hassio_api": "bool",
"hassio_role": "default|homeassistant|manager|admin",
"homeassistant_api": "bool",
"auth_api": "bool",
"full_access": "bool",
"protected": "bool",
"rating": "1-6",
"stdin": "bool",
"webui": "null|http(s)://[HOST]:port/xy/zx",
"gpio": "bool",
"kernel_modules": "bool",
"devicetree": "bool",
"docker_api": "bool",
"audio": "bool",
"audio_input": "null|0,0",
"audio_output": "null|0,0",
"services_role": "['service:access']",
"discovery": "['service']",
"ip_address": "ip address",
"ingress": "bool",
"ingress_entry": "null|/api/hassio_ingress/slug",
"ingress_url": "null|/api/hassio_ingress/slug/entry.html",
"ingress_port": "null|int",
"ingress_panel": "null|bool"
"name": "xy bla",
"slug": "xdssd_xybla",
"hostname": "xdssd-xybla",
"dns": [],
"description": "description",
"long_description": "null|markdown",
"auto_update": "bool",
"url": "null|url of addon",
"detached": "bool",
"available": "bool",
"arch": ["armhf", "aarch64", "i386", "amd64"],
"machine": "[raspberrypi2, tinker]",
"homeassistant": "null|min Home Assistant version",
"repository": "12345678|null",
"version": "null|VERSION_INSTALLED",
"last_version": "LAST_VERSION",
"state": "none|started|stopped",
"boot": "auto|manual",
"build": "bool",
"options": "{}",
"network": "{}|null",
"network_description": "{}|null",
"host_network": "bool",
"host_pid": "bool",
"host_ipc": "bool",
"host_dbus": "bool",
"privileged": ["NET_ADMIN", "SYS_ADMIN"],
"apparmor": "disable|default|profile",
"devices": ["/dev/xy"],
"udev": "bool",
"auto_uart": "bool",
"icon": "bool",
"logo": "bool",
"changelog": "bool",
"hassio_api": "bool",
"hassio_role": "default|homeassistant|manager|admin",
"homeassistant_api": "bool",
"auth_api": "bool",
"full_access": "bool",
"protected": "bool",
"rating": "1-6",
"stdin": "bool",
"webui": "null|http(s)://[HOST]:port/xy/zx",
"gpio": "bool",
"kernel_modules": "bool",
"devicetree": "bool",
"docker_api": "bool",
"audio": "bool",
"audio_input": "null|0,0",
"audio_output": "null|0,0",
"services_role": "['service:access']",
"discovery": "['service']",
"ip_address": "ip address",
"ingress": "bool",
"ingress_entry": "null|/api/hassio_ingress/slug",
"ingress_url": "null|/api/hassio_ingress/slug/entry.html",
"ingress_port": "null|int",
"ingress_panel": "null|bool"
}
```
@ -548,15 +556,15 @@ Get all available addons.
```json
{
"boot": "auto|manual",
"auto_update": "bool",
"network": {
"CONTAINER": "port|[ip, port]"
},
"options": {},
"audio_output": "null|0,0",
"audio_input": "null|0,0",
"ingress_panel": "bool"
"boot": "auto|manual",
"auto_update": "bool",
"network": {
"CONTAINER": "port|[ip, port]"
},
"options": {},
"audio_output": "null|0,0",
"audio_input": "null|0,0",
"ingress_panel": "bool"
}
```
@ -568,7 +576,7 @@ This function is not callable by itself.
```json
{
"protected": "bool",
"protected": "bool"
}
```
@ -597,16 +605,17 @@ Only supported for local build addons
Write data to add-on stdin
- GET `/addons/{addon}/stats`
```json
{
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
"blk_write": 0
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
"blk_write": 0
}
```
@ -618,7 +627,7 @@ Create a new Session for access to ingress service.
```json
{
"session": "token"
"session": "token"
}
```
@ -628,14 +637,14 @@ Return a list of enabled panels.
```json
{
"panels": {
"addon_slug": {
"enable": "boolean",
"icon": "mdi:...",
"title": "title",
"admin": "boolean"
}
"panels": {
"addon_slug": {
"enable": "boolean",
"icon": "mdi:...",
"title": "title",
"admin": "boolean"
}
}
}
```
@ -647,41 +656,45 @@ Need ingress session as cookie.
### discovery
- GET `/discovery`
```json
{
"discovery": [
{
"addon": "slug",
"service": "name",
"uuid": "uuid",
"config": {}
}
]
"discovery": [
{
"addon": "slug",
"service": "name",
"uuid": "uuid",
"config": {}
}
]
}
```
- GET `/discovery/{UUID}`
```json
{
"addon": "slug",
"service": "name",
"uuid": "uuid",
"config": {}
"addon": "slug",
"service": "name",
"uuid": "uuid",
"config": {}
}
```
- POST `/discovery`
```json
{
"service": "name",
"config": {}
"service": "name",
"config": {}
}
```
return:
```json
{
"uuid": "uuid"
"uuid": "uuid"
}
```
@ -690,42 +703,45 @@ return:
### Services
- GET `/services`
```json
{
"services": [
{
"slug": "name",
"available": "bool",
"providers": "list"
}
]
"services": [
{
"slug": "name",
"available": "bool",
"providers": "list"
}
]
}
```
#### MQTT
- GET `/services/mqtt`
```json
{
"addon": "name",
"host": "xy",
"port": "8883",
"ssl": "bool",
"username": "optional",
"password": "optional",
"protocol": "3.1.1"
"addon": "name",
"host": "xy",
"port": "8883",
"ssl": "bool",
"username": "optional",
"password": "optional",
"protocol": "3.1.1"
}
```
- POST `/services/mqtt`
```json
{
"host": "xy",
"port": "8883",
"ssl": "bool|optional",
"username": "optional",
"password": "optional",
"protocol": "3.1.1"
"host": "xy",
"port": "8883",
"ssl": "bool|optional",
"username": "optional",
"password": "optional",
"protocol": "3.1.1"
}
```
@ -734,45 +750,49 @@ return:
### Misc
- GET `/info`
```json
{
"supervisor": "version",
"homeassistant": "version",
"hassos": "null|version",
"hostname": "name",
"machine": "type",
"arch": "arch",
"supported_arch": ["arch1", "arch2"],
"channel": "stable|beta|dev",
"logging": "debug|info|warning|error|critical",
"timezone": "Europe/Zurich"
"supervisor": "version",
"homeassistant": "version",
"hassos": "null|version",
"hostname": "name",
"machine": "type",
"arch": "arch",
"supported_arch": ["arch1", "arch2"],
"channel": "stable|beta|dev",
"logging": "debug|info|warning|error|critical",
"timezone": "Europe/Zurich"
}
```
### DNS
- GET `/dns/info`
```json
{
"host": "ip-address",
"version": "1",
"latest_version": "2",
"servers": ["dns://8.8.8.8"],
"locals": ["dns://xy"]
"host": "ip-address",
"version": "1",
"latest_version": "2",
"servers": ["dns://8.8.8.8"],
"locals": ["dns://xy"]
}
```
- POST `/dns/options`
```json
{
"servers": ["dns://8.8.8.8"]
"servers": ["dns://8.8.8.8"]
}
```
- POST `/dns/update`
```json
{
"version": "VERSION"
"version": "VERSION"
}
```
@ -781,16 +801,17 @@ return:
- GET `/dns/logs`
- GET `/dns/stats`
```json
{
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
"blk_write": 0
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
"blk_write": 0
}
```
@ -802,6 +823,7 @@ supervisor.
You can call post `/auth`
We support:
- Json `{ "user|name": "...", "password": "..." }`
- application/x-www-form-urlencoded `user|name=...&password=...`
- BasicAuth

View File

@ -245,7 +245,7 @@ class AddonManager(CoreSysAttributes):
raise AddonsError() from None
else:
self.data.update(store)
_LOGGER.info("Add-on '%s' successfully rebuilded", slug)
_LOGGER.info("Add-on '%s' successfully rebuilt", slug)
# restore state
if last_state == STATE_STARTED:

View File

@ -90,12 +90,12 @@ from ..const import (
from ..coresys import CoreSys
from ..discovery.validate import valid_discovery_service
from ..validate import (
ALSA_DEVICE,
alsa_device,
DOCKER_PORTS,
DOCKER_PORTS_DESCRIPTION,
NETWORK_PORT,
TOKEN,
UUID_MATCH,
network_port,
token,
uuid_match,
)
_LOGGER: logging.Logger = logging.getLogger(__name__)
@ -182,7 +182,7 @@ SCHEMA_ADDON_CONFIG = vol.Schema(
),
vol.Optional(ATTR_INGRESS, default=False): vol.Boolean(),
vol.Optional(ATTR_INGRESS_PORT, default=8099): vol.Any(
NETWORK_PORT, vol.Equal(0)
network_port, vol.Equal(0)
),
vol.Optional(ATTR_INGRESS_ENTRY): vol.Coerce(str),
vol.Optional(ATTR_PANEL_ICON, default="mdi:puzzle"): vol.Coerce(str),
@ -269,8 +269,8 @@ SCHEMA_ADDON_USER = vol.Schema(
{
vol.Required(ATTR_VERSION): vol.Coerce(str),
vol.Optional(ATTR_IMAGE): vol.Coerce(str),
vol.Optional(ATTR_UUID, default=lambda: uuid.uuid4().hex): UUID_MATCH,
vol.Optional(ATTR_ACCESS_TOKEN): TOKEN,
vol.Optional(ATTR_UUID, default=lambda: uuid.uuid4().hex): uuid_match,
vol.Optional(ATTR_ACCESS_TOKEN): token,
vol.Optional(ATTR_INGRESS_TOKEN, default=secrets.token_urlsafe): vol.Coerce(
str
),
@ -278,8 +278,8 @@ SCHEMA_ADDON_USER = vol.Schema(
vol.Optional(ATTR_AUTO_UPDATE, default=False): vol.Boolean(),
vol.Optional(ATTR_BOOT): vol.In([BOOT_AUTO, BOOT_MANUAL]),
vol.Optional(ATTR_NETWORK): DOCKER_PORTS,
vol.Optional(ATTR_AUDIO_OUTPUT): ALSA_DEVICE,
vol.Optional(ATTR_AUDIO_INPUT): ALSA_DEVICE,
vol.Optional(ATTR_AUDIO_OUTPUT): alsa_device,
vol.Optional(ATTR_AUDIO_INPUT): alsa_device,
vol.Optional(ATTR_PROTECTED, default=True): vol.Boolean(),
vol.Optional(ATTR_INGRESS_PANEL, default=False): vol.Boolean(),
},
@ -386,7 +386,7 @@ def _single_validate(coresys: CoreSys, typ: str, value: Any, key: str):
elif typ.startswith(V_URL):
return vol.Url()(value)
elif typ.startswith(V_PORT):
return NETWORK_PORT(value)
return network_port(value)
elif typ.startswith(V_MATCH):
return vol.Match(match.group("match"))(str(value))
elif typ.startswith(V_LIST):

View File

@ -90,7 +90,7 @@ from ..const import (
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError
from ..validate import ALSA_DEVICE, DOCKER_PORTS
from ..validate import alsa_device, DOCKER_PORTS
from .utils import api_process, api_process_raw, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
@ -103,8 +103,8 @@ SCHEMA_OPTIONS = vol.Schema(
vol.Optional(ATTR_BOOT): vol.In([BOOT_AUTO, BOOT_MANUAL]),
vol.Optional(ATTR_NETWORK): vol.Any(None, DOCKER_PORTS),
vol.Optional(ATTR_AUTO_UPDATE): vol.Boolean(),
vol.Optional(ATTR_AUDIO_OUTPUT): ALSA_DEVICE,
vol.Optional(ATTR_AUDIO_INPUT): ALSA_DEVICE,
vol.Optional(ATTR_AUDIO_OUTPUT): alsa_device,
vol.Optional(ATTR_AUDIO_INPUT): alsa_device,
vol.Optional(ATTR_INGRESS_PANEL): vol.Boolean(),
}
)

View File

@ -24,13 +24,13 @@ from ..const import (
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError
from ..validate import DNS_SERVER_LIST
from ..validate import dns_server_list
from .utils import api_process, api_process_raw, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
SCHEMA_OPTIONS = vol.Schema({vol.Optional(ATTR_SERVERS): DNS_SERVER_LIST})
SCHEMA_OPTIONS = vol.Schema({vol.Optional(ATTR_SERVERS): dns_server_list})
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})

View File

@ -3,11 +3,12 @@ import asyncio
import logging
from typing import Any, Awaitable, Dict
import voluptuous as vol
from aiohttp import web
import voluptuous as vol
from ..const import (
ATTR_BOARD,
ATTR_BOOT,
ATTR_VERSION,
ATTR_VERSION_CLI,
ATTR_VERSION_CLI_LATEST,
@ -33,6 +34,7 @@ class APIHassOS(CoreSysAttributes):
ATTR_VERSION_LATEST: self.sys_hassos.version_latest,
ATTR_VERSION_CLI_LATEST: self.sys_hassos.version_cli_latest,
ATTR_BOARD: self.sys_hassos.board,
ATTR_BOOT: self.sys_dbus.rauc.boot_slot,
}
@api_process

View File

@ -33,7 +33,7 @@ from ..const import (
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError
from ..validate import DOCKER_IMAGE, NETWORK_PORT
from ..validate import docker_image, network_port
from .utils import api_process, api_process_raw, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
@ -42,9 +42,9 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
SCHEMA_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_BOOT): vol.Boolean(),
vol.Inclusive(ATTR_IMAGE, "custom_hass"): vol.Maybe(DOCKER_IMAGE),
vol.Inclusive(ATTR_IMAGE, "custom_hass"): vol.Maybe(docker_image),
vol.Inclusive(ATTR_LAST_VERSION, "custom_hass"): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_PORT): NETWORK_PORT,
vol.Optional(ATTR_PORT): network_port,
vol.Optional(ATTR_PASSWORD): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_SSL): vol.Boolean(),
vol.Optional(ATTR_WATCHDOG): vol.Boolean(),

View File

@ -41,7 +41,7 @@ from ..const import (
from ..coresys import CoreSysAttributes
from ..exceptions import APIError
from ..utils.validate import validate_timezone
from ..validate import CHANNELS, LOG_LEVEL, REPOSITORIES, WAIT_BOOT
from ..validate import channels, log_level, repositories, wait_boot
from .utils import api_process, api_process_raw, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
@ -49,11 +49,11 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
SCHEMA_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_CHANNEL): CHANNELS,
vol.Optional(ATTR_ADDONS_REPOSITORIES): REPOSITORIES,
vol.Optional(ATTR_CHANNEL): channels,
vol.Optional(ATTR_ADDONS_REPOSITORIES): repositories,
vol.Optional(ATTR_TIMEZONE): validate_timezone,
vol.Optional(ATTR_WAIT_BOOT): WAIT_BOOT,
vol.Optional(ATTR_LOGGING): LOG_LEVEL,
vol.Optional(ATTR_WAIT_BOOT): wait_boot,
vol.Optional(ATTR_LOGGING): log_level,
vol.Optional(ATTR_DEBUG): vol.Boolean(),
vol.Optional(ATTR_DEBUG_BLOCK): vol.Boolean(),
}

View File

@ -2,7 +2,7 @@
from pathlib import Path
from ipaddress import ip_network
HASSIO_VERSION = "193"
HASSIO_VERSION = "194"
URL_HASSIO_ADDONS = "https://github.com/home-assistant/hassio-addons"

View File

@ -79,7 +79,11 @@ class HassIO(CoreSysAttributes):
"""Start Hass.io orchestration."""
await self.sys_api.start()
# on release channel, try update itself
# Mark booted partition as healthy
if self.sys_hassos.available:
await self.sys_hassos.mark_healthy()
# On release channel, try update itself
if self.sys_supervisor.need_update:
try:
if self.sys_dev:
@ -92,7 +96,7 @@ class HassIO(CoreSysAttributes):
"future version of Home Assistant!"
)
# start addon mark as initialize
# Start addon mark as initialize
await self.sys_addons.boot(STARTUP_INITIALIZE)
try:

View File

@ -1,58 +1,17 @@
{
"raspberrypi": [
"armhf"
],
"raspberrypi2": [
"armv7",
"armhf"
],
"raspberrypi3": [
"armv7",
"armhf"
],
"raspberrypi3-64": [
"aarch64",
"armv7",
"armhf"
],
"raspberrypi4": [
"armv7",
"armhf"
],
"raspberrypi4-64": [
"aarch64",
"armv7",
"armhf"
],
"tinker": [
"armv7",
"armhf"
],
"odroid-c2": [
"aarch64"
],
"odroid-xu": [
"armv7",
"armhf"
],
"orangepi-prime": [
"aarch64"
],
"qemux86": [
"i386"
],
"qemux86-64": [
"amd64",
"i386"
],
"qemuarm": [
"armhf"
],
"qemuarm-64": [
"aarch64"
],
"intel-nuc": [
"amd64",
"i386"
]
}
"raspberrypi": ["armhf"],
"raspberrypi2": ["armv7", "armhf"],
"raspberrypi3": ["armv7", "armhf"],
"raspberrypi3-64": ["aarch64", "armv7", "armhf"],
"raspberrypi4": ["armv7", "armhf"],
"raspberrypi4-64": ["aarch64", "armv7", "armhf"],
"tinker": ["armv7", "armhf"],
"odroid-c2": ["aarch64", "armv7", "armhf"],
"odroid-n2": ["aarch64", "armv7", "armhf"],
"odroid-xu": ["armv7", "armhf"],
"qemux86": ["i386"],
"qemux86-64": ["amd64", "i386"],
"qemuarm": ["armhf"],
"qemuarm-64": ["aarch64"],
"intel-nuc": ["amd64", "i386"]
}

View File

@ -1,5 +1,7 @@
"""D-Bus interface for rauc."""
import logging
from typing import Optional
from enum import Enum
from .interface import DBusInterface
from .utils import dbus_connected
@ -12,9 +14,25 @@ DBUS_NAME = "de.pengutronix.rauc"
DBUS_OBJECT = "/"
class RaucState(str, Enum):
"""Rauc slot states."""
GOOD = "good"
BAD = "bad"
ACTIVE = "active"
class Rauc(DBusInterface):
"""Handle D-Bus interface for rauc."""
def __init__(self):
"""Initialize Properties."""
self._operation: Optional[str] = None
self._last_error: Optional[str] = None
self._compatible: Optional[str] = None
self._variant: Optional[str] = None
self._boot_slot: Optional[str] = None
async def connect(self):
"""Connect to D-Bus."""
try:
@ -24,6 +42,31 @@ class Rauc(DBusInterface):
except DBusInterfaceError:
_LOGGER.warning("Host has no rauc support. OTA updates have been disabled.")
@property
def operation(self) -> Optional[str]:
"""Return the current (global) operation."""
return self._operation
@property
def last_error(self) -> Optional[str]:
"""Return the last message of the last error that occurred."""
return self._last_error
@property
def compatible(self) -> Optional[str]:
"""Return the system compatible string."""
return self._compatible
@property
def variant(self) -> Optional[str]:
"""Return the system variant string."""
return self._variant
@property
def boot_slot(self) -> Optional[str]:
"""Return the used boot slot."""
return self._boot_slot
@dbus_connected
def install(self, raucb_file):
"""Install rauc bundle file.
@ -40,14 +83,6 @@ class Rauc(DBusInterface):
"""
return self.dbus.Installer.GetSlotStatus()
@dbus_connected
def get_properties(self):
"""Return rauc informations.
Return a coroutine.
"""
return self.dbus.get_properties(f"{DBUS_NAME}.Installer")
@dbus_connected
def signal_completed(self):
"""Return a signal wrapper for completed signal.
@ -55,3 +90,25 @@ class Rauc(DBusInterface):
Return a coroutine.
"""
return self.dbus.wait_signal(f"{DBUS_NAME}.Installer.Completed")
@dbus_connected
def mark(self, state: RaucState, slot_identifier: str):
"""Get slot status.
Return a coroutine.
"""
return self.dbus.Installer.Mark(state, slot_identifier)
@dbus_connected
async def update(self):
"""Update Properties."""
data = await self.dbus.get_properties(f"{DBUS_NAME}.Installer")
if not data:
_LOGGER.warning("Can't get properties for rauc")
return
self._operation = data.get("Operation")
self._last_error = data.get("LastError")
self._compatible = data.get("Compatible")
self._variant = data.get("Variant")
self._boot_slot = data.get("BootSlot")

View File

@ -1,11 +1,11 @@
"""Discovery service for AdGuard."""
import voluptuous as vol
from hassio.validate import NETWORK_PORT
from hassio.validate import network_port
from ..const import ATTR_HOST, ATTR_PORT
SCHEMA = vol.Schema(
{vol.Required(ATTR_HOST): vol.Coerce(str), vol.Required(ATTR_PORT): NETWORK_PORT}
{vol.Required(ATTR_HOST): vol.Coerce(str), vol.Required(ATTR_PORT): network_port}
)

View File

@ -1,11 +1,11 @@
"""Discovery service for Almond."""
import voluptuous as vol
from hassio.validate import NETWORK_PORT
from hassio.validate import network_port
from ..const import ATTR_HOST, ATTR_PORT
SCHEMA = vol.Schema(
{vol.Required(ATTR_HOST): vol.Coerce(str), vol.Required(ATTR_PORT): NETWORK_PORT}
{vol.Required(ATTR_HOST): vol.Coerce(str), vol.Required(ATTR_PORT): network_port}
)

View File

@ -1,7 +1,7 @@
"""Discovery service for MQTT."""
import voluptuous as vol
from hassio.validate import NETWORK_PORT
from hassio.validate import network_port
from ..const import ATTR_HOST, ATTR_PORT, ATTR_API_KEY, ATTR_SERIAL
@ -9,7 +9,7 @@ from ..const import ATTR_HOST, ATTR_PORT, ATTR_API_KEY, ATTR_SERIAL
SCHEMA = vol.Schema(
{
vol.Required(ATTR_HOST): vol.Coerce(str),
vol.Required(ATTR_PORT): NETWORK_PORT,
vol.Required(ATTR_PORT): network_port,
vol.Required(ATTR_SERIAL): vol.Coerce(str),
vol.Required(ATTR_API_KEY): vol.Coerce(str),
}

View File

@ -1,11 +1,11 @@
"""Discovery service for Home Panel."""
import voluptuous as vol
from hassio.validate import NETWORK_PORT
from hassio.validate import network_port
from ..const import ATTR_HOST, ATTR_PORT
SCHEMA = vol.Schema(
{vol.Required(ATTR_HOST): vol.Coerce(str), vol.Required(ATTR_PORT): NETWORK_PORT}
{vol.Required(ATTR_HOST): vol.Coerce(str), vol.Required(ATTR_PORT): network_port}
)

View File

@ -1,7 +1,7 @@
"""Discovery service for MQTT."""
import voluptuous as vol
from hassio.validate import NETWORK_PORT
from hassio.validate import network_port
from ..const import (
ATTR_HOST,
@ -16,7 +16,7 @@ from ..const import (
SCHEMA = vol.Schema(
{
vol.Required(ATTR_HOST): vol.Coerce(str),
vol.Required(ATTR_PORT): NETWORK_PORT,
vol.Required(ATTR_PORT): network_port,
vol.Optional(ATTR_USERNAME): vol.Coerce(str),
vol.Optional(ATTR_PASSWORD): vol.Coerce(str),
vol.Optional(ATTR_SSL, default=False): vol.Boolean(),

View File

@ -6,7 +6,7 @@ import voluptuous as vol
from ..const import ATTR_ADDON, ATTR_CONFIG, ATTR_DISCOVERY, ATTR_SERVICE, ATTR_UUID
from ..utils.validate import schema_or
from ..validate import UUID_MATCH
from ..validate import uuid_match
def valid_discovery_service(service):
@ -31,7 +31,7 @@ SCHEMA_DISCOVERY = vol.Schema(
[
vol.Schema(
{
vol.Required(ATTR_UUID): UUID_MATCH,
vol.Required(ATTR_UUID): uuid_match,
vol.Required(ATTR_ADDON): vol.Coerce(str),
vol.Required(ATTR_SERVICE): valid_discovery_service,
vol.Required(ATTR_CONFIG): vol.Maybe(dict),

View File

@ -17,7 +17,7 @@ from .docker.stats import DockerStats
from .exceptions import CoreDNSError, CoreDNSUpdateError, DockerAPIError
from .misc.forwarder import DNSForward
from .utils.json import JsonConfig
from .validate import DNS_URL, SCHEMA_DNS_CONFIG
from .validate import dns_url, SCHEMA_DNS_CONFIG
_LOGGER: logging.Logger = logging.getLogger(__name__)
@ -113,18 +113,20 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
self.version = self.instance.version
self.save_data()
# Fix dns server handling before 194 / Cleanup with version 200
if DNS_SERVERS == self.servers:
self.servers.clear()
self.save_data()
# Start DNS forwarder
self.sys_create_task(self.forwarder.start(self.sys_docker.network.dns))
self._update_local_resolv()
# Reset container configuration
if await self.instance.is_running():
with suppress(DockerAPIError):
await self.instance.stop()
# Run CoreDNS
with suppress(CoreDNSError):
await self.start()
if await self.instance.is_running():
await self.restart()
else:
await self.start()
async def unload(self) -> None:
"""Unload DNS forwarder."""
@ -195,8 +197,10 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
raise CoreDNSError() from None
async def reset(self) -> None:
"""Reset Config / Hosts."""
self.servers = DNS_SERVERS
"""Reset DNS and hosts."""
# Reset manually defined DNS
self.servers.clear()
self.save_data()
# Resets hosts
with suppress(OSError):
@ -216,11 +220,20 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
_LOGGER.error("Can't read coredns template file: %s", err)
raise CoreDNSError() from None
# Prepare DNS serverlist: Prio 1 Local, Prio 2 Manual, Prio 3 Fallback
# Prepare DNS serverlist: Prio 1 Manual, Prio 2 Local, Prio 3 Fallback
local_dns: List[str] = self.sys_host.network.dns_servers or ["dns://127.0.0.11"]
for server in local_dns + self.servers + DNS_SERVERS:
servers: List[str] = self.servers + local_dns + DNS_SERVERS
_LOGGER.debug(
"config-dns = %s, local-dns = %s , backup-dns = %s",
self.servers,
local_dns,
DNS_SERVERS,
)
for server in servers:
try:
DNS_URL(server)
dns_url(server)
if server not in dns_servers:
dns_servers.append(server)
except vol.Invalid:
@ -346,33 +359,3 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
await self.instance.install(self.version)
except DockerAPIError:
_LOGGER.error("Repairing of CoreDNS fails")
def _update_local_resolv(self) -> None:
"""Update local resolv file."""
resolv_lines: List[str] = []
nameserver = f"nameserver {self.sys_docker.network.dns!s}"
# Read resolv config
try:
with RESOLV_CONF.open("r") as resolv:
for line in resolv.readlines():
if not line:
continue
resolv_lines.append(line.strip())
except OSError as err:
_LOGGER.warning("Can't read local resolv: %s", err)
return
if nameserver in resolv_lines:
return
_LOGGER.info("Update resolv from Supervisor")
# Write config back to resolv
resolv_lines.append(nameserver)
try:
with RESOLV_CONF.open("w") as resolv:
for line in resolv_lines:
resolv.write(f"{line}\n")
except OSError as err:
_LOGGER.warning("Can't write local resolv: %s", err)
return

View File

@ -54,4 +54,9 @@ class DockerDNS(DockerInterface, CoreSysAttributes):
)
self._meta = docker_container.attrs
_LOGGER.info("Start DNS %s with version %s", self.image, self.version)
_LOGGER.info(
"Start DNS %s with version %s - %s",
self.image,
self.version,
self.sys_docker.network.dns,
)

View File

@ -427,9 +427,10 @@ class DockerInterface(CoreSysAttributes):
continue
available_version.append(version)
assert available_version
if not available_version:
raise ValueError()
except (docker.errors.DockerException, AssertionError):
except (docker.errors.DockerException, ValueError):
_LOGGER.debug("No version found for %s", self.image)
raise DockerAPIError()
else:

View File

@ -17,6 +17,7 @@ from .exceptions import (
HassOSUpdateError,
DockerAPIError,
)
from .dbus.rauc import RaucState
_LOGGER: logging.Logger = logging.getLogger(__name__)
@ -111,24 +112,27 @@ class HassOS(CoreSysAttributes):
async def load(self) -> None:
"""Load HassOS data."""
try:
# Check needed host functions
assert self.sys_dbus.rauc.is_connected
assert self.sys_dbus.systemd.is_connected
assert self.sys_dbus.hostname.is_connected
assert self.sys_host.info.cpe is not None
if self.sys_host.info.cpe is None:
raise TypeError()
cpe = CPE(self.sys_host.info.cpe)
assert cpe.get_product()[0] == "hassos"
except (AssertionError, NotImplementedError):
if cpe.get_product()[0] != "hassos":
raise TypeError()
except TypeError:
_LOGGER.debug("Found no HassOS")
return
else:
self._available = True
# Store meta data
self._available = True
self._version = cpe.get_version()[0]
self._board = cpe.get_target_hardware()[0]
_LOGGER.info("Detect HassOS %s on host system", self.version)
await self.sys_dbus.rauc.update()
_LOGGER.info(
"Detect HassOS %s / BootSlot %s", self.version, self.sys_dbus.rauc.boot_slot
)
with suppress(DockerAPIError):
await self.instance.attach(tag="latest")
@ -174,8 +178,8 @@ class HassOS(CoreSysAttributes):
return
# Update fails
rauc_status = await self.sys_dbus.get_properties()
_LOGGER.error("HassOS update fails with: %s", rauc_status.get("LastError"))
await self.sys_dbus.rauc.update()
_LOGGER.error("HassOS update fails with: %s", self.sys_dbus.rauc.last_error)
raise HassOSUpdateError()
async def update_cli(self, version: Optional[str] = None) -> None:
@ -206,3 +210,12 @@ class HassOS(CoreSysAttributes):
await self.instance.install(self.version_cli, latest=True)
except DockerAPIError:
_LOGGER.error("Repairing of HassOS CLI fails")
async def mark_healthy(self):
"""Set booted partition as good for rauc."""
try:
response = await self.sys_dbus.rauc.mark(RaucState.GOOD, "booted")
except DBusError:
_LOGGER.error("Can't mark booted partition as healty!")
else:
_LOGGER.info("Rauc: %s - %s", self.sys_dbus.rauc.boot_slot, response[1])

View File

@ -4,7 +4,7 @@ from typing import Any, Dict
from hassio.addons.addon import Addon
from hassio.exceptions import ServicesError
from hassio.validate import NETWORK_PORT
from hassio.validate import network_port
import voluptuous as vol
from ..const import (
@ -26,7 +26,7 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
SCHEMA_SERVICE_MQTT = vol.Schema(
{
vol.Required(ATTR_HOST): vol.Coerce(str),
vol.Required(ATTR_PORT): NETWORK_PORT,
vol.Required(ATTR_PORT): network_port,
vol.Optional(ATTR_USERNAME): vol.Coerce(str),
vol.Optional(ATTR_PASSWORD): vol.Coerce(str),
vol.Optional(ATTR_SSL, default=False): vol.Boolean(),

View File

@ -31,7 +31,7 @@ from ..const import (
SNAPSHOT_FULL,
SNAPSHOT_PARTIAL,
)
from ..validate import DOCKER_IMAGE, NETWORK_PORT, REPOSITORIES
from ..validate import docker_image, network_port, repositories
ALL_FOLDERS = [FOLDER_HOMEASSISTANT, FOLDER_SHARE, FOLDER_ADDONS, FOLDER_SSL]
@ -59,11 +59,11 @@ SCHEMA_SNAPSHOT = vol.Schema(
vol.Optional(ATTR_HOMEASSISTANT, default=dict): vol.Schema(
{
vol.Optional(ATTR_VERSION): vol.Coerce(str),
vol.Inclusive(ATTR_IMAGE, "custom_hass"): DOCKER_IMAGE,
vol.Inclusive(ATTR_IMAGE, "custom_hass"): docker_image,
vol.Inclusive(ATTR_LAST_VERSION, "custom_hass"): vol.Coerce(str),
vol.Optional(ATTR_BOOT, default=True): vol.Boolean(),
vol.Optional(ATTR_SSL, default=False): vol.Boolean(),
vol.Optional(ATTR_PORT, default=8123): NETWORK_PORT,
vol.Optional(ATTR_PORT, default=8123): network_port,
vol.Optional(ATTR_PASSWORD): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_REFRESH_TOKEN): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_WATCHDOG, default=True): vol.Boolean(),
@ -90,7 +90,7 @@ SCHEMA_SNAPSHOT = vol.Schema(
],
unique_addons,
),
vol.Optional(ATTR_REPOSITORIES, default=list): REPOSITORIES,
vol.Optional(ATTR_REPOSITORIES, default=list): repositories,
},
extra=vol.ALLOW_EXTRA,
)

View File

@ -124,9 +124,10 @@ def secure_path(tar: tarfile.TarFile) -> Generator[tarfile.TarInfo, None, None]:
for member in tar:
file_path = Path(member.name)
try:
assert not file_path.is_absolute()
if file_path.is_absolute():
raise ValueError()
Path("/fake", file_path).resolve().relative_to("/fake")
except (ValueError, RuntimeError, AssertionError):
except (ValueError, RuntimeError):
_LOGGER.warning("Issue with file %s", file_path)
continue
else:

View File

@ -1,6 +1,7 @@
"""Validate functions."""
import re
import uuid
import ipaddress
import voluptuous as vol
@ -35,27 +36,41 @@ from .const import (
CHANNEL_BETA,
CHANNEL_DEV,
CHANNEL_STABLE,
DNS_SERVERS,
)
from .utils.validate import validate_timezone
RE_REPOSITORY = re.compile(r"^(?P<url>[^#]+)(?:#(?P<branch>[\w\-]+))?$")
# pylint: disable=no-value-for-parameter
NETWORK_PORT = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
WAIT_BOOT = vol.All(vol.Coerce(int), vol.Range(min=1, max=60))
DOCKER_IMAGE = vol.Match(r"^[\w{}]+/[\-\w{}]+$")
ALSA_DEVICE = vol.Maybe(vol.Match(r"\d+,\d+"))
CHANNELS = vol.In([CHANNEL_STABLE, CHANNEL_BETA, CHANNEL_DEV])
UUID_MATCH = vol.Match(r"^[0-9a-f]{32}$")
SHA256 = vol.Match(r"^[0-9a-f]{64}$")
TOKEN = vol.Match(r"^[0-9a-f]{32,256}$")
LOG_LEVEL = vol.In(["debug", "info", "warning", "error", "critical"])
DNS_URL = vol.Match(r"^dns://\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
DNS_SERVER_LIST = vol.All([DNS_URL], vol.Length(max=8))
# pylint: disable=invalid-name
network_port = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
wait_boot = vol.All(vol.Coerce(int), vol.Range(min=1, max=60))
docker_image = vol.Match(r"^[\w{}]+/[\-\w{}]+$")
alsa_device = vol.Maybe(vol.Match(r"\d+,\d+"))
channels = vol.In([CHANNEL_STABLE, CHANNEL_BETA, CHANNEL_DEV])
uuid_match = vol.Match(r"^[0-9a-f]{32}$")
sha256 = vol.Match(r"^[0-9a-f]{64}$")
token = vol.Match(r"^[0-9a-f]{32,256}$")
log_level = vol.In(["debug", "info", "warning", "error", "critical"])
def validate_repository(repository):
def dns_url(url: str) -> str:
""" takes a DNS url (str) and validates that it matches the scheme dns://<ip address>."""
if not url.lower().startswith("dns://"):
raise vol.Invalid("Doesn't start with dns://")
address: str = url[6:] # strip the dns:// off
try:
ipaddress.ip_address(address) # matches ipv4 or ipv6 addresses
except ValueError:
raise vol.Invalid("Invalid DNS URL: {}".format(url))
return url
dns_server_list = vol.All(vol.Length(max=8), [dns_url])
def validate_repository(repository: str) -> str:
"""Validate a valid repository."""
data = RE_REPOSITORY.match(repository)
if not data:
@ -69,13 +84,13 @@ def validate_repository(repository):
# pylint: disable=no-value-for-parameter
REPOSITORIES = vol.All([validate_repository], vol.Unique())
repositories = vol.All([validate_repository], vol.Unique())
DOCKER_PORTS = vol.Schema(
{
vol.All(vol.Coerce(str), vol.Match(r"^\d+(?:/tcp|/udp)?$")): vol.Maybe(
NETWORK_PORT
network_port
)
}
)
@ -88,13 +103,13 @@ DOCKER_PORTS_DESCRIPTION = vol.Schema(
# pylint: disable=no-value-for-parameter
SCHEMA_HASS_CONFIG = vol.Schema(
{
vol.Optional(ATTR_UUID, default=lambda: uuid.uuid4().hex): UUID_MATCH,
vol.Optional(ATTR_UUID, default=lambda: uuid.uuid4().hex): uuid_match,
vol.Optional(ATTR_VERSION): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_ACCESS_TOKEN): TOKEN,
vol.Optional(ATTR_ACCESS_TOKEN): token,
vol.Optional(ATTR_BOOT, default=True): vol.Boolean(),
vol.Inclusive(ATTR_IMAGE, "custom_hass"): DOCKER_IMAGE,
vol.Inclusive(ATTR_IMAGE, "custom_hass"): docker_image,
vol.Inclusive(ATTR_LAST_VERSION, "custom_hass"): vol.Coerce(str),
vol.Optional(ATTR_PORT, default=8123): NETWORK_PORT,
vol.Optional(ATTR_PORT, default=8123): network_port,
vol.Optional(ATTR_PASSWORD): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_REFRESH_TOKEN): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_SSL, default=False): vol.Boolean(),
@ -109,7 +124,7 @@ SCHEMA_HASS_CONFIG = vol.Schema(
SCHEMA_UPDATER_CONFIG = vol.Schema(
{
vol.Optional(ATTR_CHANNEL, default=CHANNEL_STABLE): CHANNELS,
vol.Optional(ATTR_CHANNEL, default=CHANNEL_STABLE): channels,
vol.Optional(ATTR_HOMEASSISTANT): vol.Coerce(str),
vol.Optional(ATTR_HASSIO): vol.Coerce(str),
vol.Optional(ATTR_HASSOS): vol.Coerce(str),
@ -128,9 +143,9 @@ SCHEMA_HASSIO_CONFIG = vol.Schema(
vol.Optional(
ATTR_ADDONS_CUSTOM_LIST,
default=["https://github.com/hassio-addons/repository"],
): REPOSITORIES,
vol.Optional(ATTR_WAIT_BOOT, default=5): WAIT_BOOT,
vol.Optional(ATTR_LOGGING, default="info"): LOG_LEVEL,
): repositories,
vol.Optional(ATTR_WAIT_BOOT, default=5): wait_boot,
vol.Optional(ATTR_LOGGING, default="info"): log_level,
vol.Optional(ATTR_DEBUG, default=False): vol.Boolean(),
vol.Optional(ATTR_DEBUG_BLOCK, default=False): vol.Boolean(),
},
@ -138,16 +153,16 @@ SCHEMA_HASSIO_CONFIG = vol.Schema(
)
SCHEMA_AUTH_CONFIG = vol.Schema({SHA256: SHA256})
SCHEMA_AUTH_CONFIG = vol.Schema({sha256: sha256})
SCHEMA_INGRESS_CONFIG = vol.Schema(
{
vol.Required(ATTR_SESSION, default=dict): vol.Schema(
{TOKEN: vol.Coerce(float)}
{token: vol.Coerce(float)}
),
vol.Required(ATTR_PORTS, default=dict): vol.Schema(
{vol.Coerce(str): NETWORK_PORT}
{vol.Coerce(str): network_port}
),
},
extra=vol.REMOVE_EXTRA,
@ -157,7 +172,7 @@ SCHEMA_INGRESS_CONFIG = vol.Schema(
SCHEMA_DNS_CONFIG = vol.Schema(
{
vol.Optional(ATTR_VERSION): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_SERVERS, default=DNS_SERVERS): DNS_SERVER_LIST,
vol.Optional(ATTR_SERVERS, default=list): dns_server_list,
},
extra=vol.REMOVE_EXTRA,
)

View File

@ -1,16 +1,16 @@
aiohttp==3.6.1
async_timeout==3.0.1
attrs==19.3.0
cchardet==2.1.4
colorlog==4.0.2
cchardet==2.1.5
colorlog==4.1.0
cpe==1.2.1
cryptography==2.8
docker==4.1.0
gitpython==3.0.4
packaging==19.2
gitpython==3.0.5
packaging==20.0
pytz==2019.3
pyudev==0.21.0
ruamel.yaml==0.15.100
uvloop==0.13.0
uvloop==0.14.0
voluptuous==0.11.7
ptvsd==4.3.2

View File

@ -1,6 +1,6 @@
flake8==3.7.9
pylint==2.4.3
pytest==5.2.2
pytest-timeout==1.3.3
pylint==2.4.4
pytest==5.3.2
pytest-timeout==1.3.4
pytest-aiohttp==0.3.0
black==19.10b0

View File

@ -120,7 +120,17 @@ async def test_odroid_c2_arch(coresys, sys_machine, sys_supervisor):
await coresys.arch.load()
assert coresys.arch.default == "aarch64"
assert coresys.arch.supported == ["aarch64"]
assert coresys.arch.supported == ["aarch64", "armv7", "armhf"]
async def test_odroid_n2_arch(coresys, sys_machine, sys_supervisor):
"""Test arch for odroid-n2."""
sys_machine.return_value = "odroid-n2"
sys_supervisor.arch = "aarch64"
await coresys.arch.load()
assert coresys.arch.default == "aarch64"
assert coresys.arch.supported == ["aarch64", "armv7", "armhf"]
async def test_odroid_xu_arch(coresys, sys_machine, sys_supervisor):
@ -133,16 +143,6 @@ async def test_odroid_xu_arch(coresys, sys_machine, sys_supervisor):
assert coresys.arch.supported == ["armv7", "armhf"]
async def test_orangepi_prime_arch(coresys, sys_machine, sys_supervisor):
"""Test arch for orangepi_prime."""
sys_machine.return_value = "orangepi-prime"
sys_supervisor.arch = "aarch64"
await coresys.arch.load()
assert coresys.arch.default == "aarch64"
assert coresys.arch.supported == ["aarch64"]
async def test_intel_nuc_arch(coresys, sys_machine, sys_supervisor):
"""Test arch for intel-nuc."""
sys_machine.return_value = "intel-nuc"

67
tests/test_validate.py Normal file
View File

@ -0,0 +1,67 @@
"""Test validators."""
import hassio.validate
import voluptuous.error
import pytest
GOOD_V4 = [
"dns://10.0.0.1", # random local
"dns://254.254.254.254", # random high numbers
"DNS://1.1.1.1", # cloudflare
"dns://9.9.9.9", # quad-9
]
GOOD_V6 = [
"dns://2606:4700:4700::1111", # cloudflare
"DNS://2606:4700:4700::1001", # cloudflare
]
BAD = ["hello world", "https://foo.bar", "", "dns://example.com"]
async def test_dns_url_v4_good():
""" tests the DNS validator with known-good ipv6 DNS URLs """
for url in GOOD_V4:
assert hassio.validate.dns_url(url)
async def test_dns_url_v6_good():
""" tests the DNS validator with known-good ipv6 DNS URLs """
for url in GOOD_V6:
assert hassio.validate.dns_url(url)
async def test_dns_server_list_v4():
""" test a list with v4 addresses """
assert hassio.validate.dns_server_list(GOOD_V4)
async def test_dns_server_list_v6():
""" test a list with v6 addresses """
assert hassio.validate.dns_server_list(GOOD_V6)
async def test_dns_server_list_combined():
""" test a list with both v4 and v6 addresses """
combined = GOOD_V4 + GOOD_V6
# test the matches
assert hassio.validate.dns_server_list(combined)
# test max_length is OK still
assert hassio.validate.dns_server_list(combined)
# test that it fails when the list is too long
with pytest.raises(voluptuous.error.Invalid):
hassio.validate.dns_server_list(combined + combined + combined + combined)
async def test_dns_server_list_bad():
""" test the bad list """
# test the matches
with pytest.raises(voluptuous.error.Invalid):
assert hassio.validate.dns_server_list(BAD)
async def test_dns_server_list_bad_combined():
""" test the bad list, combined with the good """
combined = GOOD_V4 + GOOD_V6 + BAD
with pytest.raises(voluptuous.error.Invalid):
# bad list
assert hassio.validate.dns_server_list(combined)