Compare commits

...

53 Commits
170 ... 178

Author SHA1 Message Date
Pascal Vizeli
2e53a48504 Merge pull request #1224 from home-assistant/dev
Release 178
2019-08-16 13:26:45 +02:00
Pascal Vizeli
8e4db0c3ec Stripe resolv (#1226) 2019-08-16 13:22:07 +02:00
Pascal Vizeli
4072b06faf Fix issue on isntalled add-ons (#1225) 2019-08-16 13:12:39 +02:00
Pascal Vizeli
a2cf7ece70 Change handling with host files (#1223) 2019-08-16 12:47:32 +02:00
Pascal Vizeli
734fe3afde Bump version 178 2019-08-16 00:15:05 +02:00
Pascal Vizeli
7f3bc91c1d Merge pull request #1222 from home-assistant/dev
Release 177
2019-08-16 00:13:51 +02:00
Pascal Vizeli
9c2c95757d Validate dns better (#1221) 2019-08-15 23:48:14 +02:00
Franck Nijhof
b5ed6c586a Cleanup ingress panel on add-on uninstall (#1220)
* Cleanup ingress panel on add-on uninstall

* Update __init__.py
2019-08-15 23:05:03 +02:00
Franck Nijhof
35033d1f76 Allow manager role to access DNS API (#1219) 2019-08-15 22:38:34 +02:00
Pascal Vizeli
9e41d0c5b0 Bump version 177 2019-08-15 14:51:28 +02:00
Pascal Vizeli
62e92fada9 Merge pull request #1218 from home-assistant/dev
Release 176
2019-08-15 14:50:55 +02:00
dependabot-preview[bot]
ae0a1a657f Bump gitpython from 3.0.0 to 3.0.1 (#1216)
Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.0.0 to 3.0.1.
- [Release notes](https://github.com/gitpython-developers/GitPython/releases)
- [Changelog](https://github.com/gitpython-developers/GitPython/blob/master/CHANGES)
- [Commits](https://github.com/gitpython-developers/GitPython/compare/3.0.0...3.0.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-08-15 14:46:16 +02:00
Pascal Vizeli
81e511ba8e Fix spell 2019-08-15 12:42:34 +00:00
Pascal Vizeli
d89cb91c8c Revert "Call update of resolv later (#1215)" (#1217)
This reverts commit dc31b6e6fe.
2019-08-15 14:42:05 +02:00
Pascal Vizeli
dc31b6e6fe Call update of resolv later (#1215) 2019-08-15 13:57:44 +02:00
Pascal Vizeli
930a32de1a Fix latest issue (#1214)
* Fix latest issue

* Use also update now

* Fix style
2019-08-15 12:42:21 +02:00
Pascal Vizeli
e40f2ed8e3 Bump version 176 2019-08-15 11:36:47 +02:00
Pascal Vizeli
abbd3d1078 Merge pull request #1213 from home-assistant/dev
Release 175
2019-08-15 11:36:06 +02:00
Pascal Vizeli
63c9948456 Add CoreDNS to update process (#1212) 2019-08-15 11:05:08 +02:00
Pascal Vizeli
b6c81d779a Use own coredns for supervisor 2019-08-15 08:51:42 +00:00
Pascal Vizeli
2480c83169 Fix socat command (#1211) 2019-08-15 10:17:41 +02:00
Pascal Vizeli
334cc66cf6 Bump Version 175 2019-08-14 15:39:44 +02:00
Pascal Vizeli
3cf189ad94 Merge pull request #1209 from home-assistant/dev
Release 174
2019-08-14 15:38:57 +02:00
dependabot-preview[bot]
6ffb94a0f5 Bump ptvsd from 4.3.1 to 4.3.2 (#1207)
Bumps [ptvsd](https://github.com/Microsoft/ptvsd) from 4.3.1 to 4.3.2.
- [Release notes](https://github.com/Microsoft/ptvsd/releases)
- [Commits](https://github.com/Microsoft/ptvsd/compare/v4.3.1...v4.3.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-08-14 14:35:43 +02:00
Pascal Vizeli
3593826441 Fix issue with windows dev env 2019-08-14 10:37:39 +00:00
Pascal Vizeli
0a0a62f238 Addon provide his own udev support (#1206)
* Addon provide his own udev support

* upgrade logger
2019-08-14 12:29:00 +02:00
Pascal Vizeli
41ce9913d2 Stats percent (#1205)
* Fix stats and add Memory percent

* Fix tasks

* round percent
2019-08-14 10:47:11 +02:00
Pascal Vizeli
b77c42384d Add DNS to add-on (#1204) 2019-08-14 09:53:03 +02:00
Pascal Vizeli
138bb12f98 Add debug output to gdbus (#1203) 2019-08-13 21:25:04 +02:00
Pascal Vizeli
4fe2859f4e Rename scripts folder (#1202)
* Rename script folder

* Rename scripts
2019-08-13 14:39:32 +02:00
dependabot-preview[bot]
0768b2b4bc Bump ptvsd from 4.3.0 to 4.3.1 (#1200)
Bumps [ptvsd](https://github.com/Microsoft/ptvsd) from 4.3.0 to 4.3.1.
- [Release notes](https://github.com/Microsoft/ptvsd/releases)
- [Commits](https://github.com/Microsoft/ptvsd/compare/v4.3.0...v4.3.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-08-13 14:35:41 +02:00
dependabot-preview[bot]
e6f1772a93 Bump gitpython from 2.1.13 to 3.0.0 (#1199)
Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 2.1.13 to 3.0.0.
- [Release notes](https://github.com/gitpython-developers/GitPython/releases)
- [Changelog](https://github.com/gitpython-developers/GitPython/blob/master/CHANGES)
- [Commits](https://github.com/gitpython-developers/GitPython/compare/2.1.13...3.0.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-08-13 14:35:15 +02:00
dependabot-preview[bot]
5374b2b3b9 Bump voluptuous from 0.11.5 to 0.11.7 (#1201)
Bumps [voluptuous](https://github.com/alecthomas/voluptuous) from 0.11.5 to 0.11.7.
- [Release notes](https://github.com/alecthomas/voluptuous/releases)
- [Changelog](https://github.com/alecthomas/voluptuous/blob/master/CHANGELOG.md)
- [Commits](https://github.com/alecthomas/voluptuous/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-08-13 14:29:33 +02:00
Pascal Vizeli
1196788856 Add CoreDNS as DNS backend (#1195)
* Add CoreDNS / DNS configuration

* Support get version

* add version

* add coresys

* Add more logic

* move forwareder into dns

* Setup docker inside

* add docker to env

* Add more function

* more interface

* Update hosts template

* Add DNS folder

* Fix issues

* Add more logic

* Add handling for hosts

* Fix setting

* fix lint

* Fix some issues

* Fix issue

* Run with no cache

* Fix issue on validate

* Fix bug

* Allow to jump into dev mode

* Fix permission

* Fix issue

* Add dns search

* Add watchdog

* Fix set issues

* add API description

* Add API endpoint

* Add CLI support

* Fix logs + add hostname

* Add/remove DNS entry

* Fix attribute

* Fix style

* Better shutdown

* Remove ha from network mapping

* Add more options

* Fix env shutdown

* Add support for new repair function

* Start coreDNS faster after restart

* remove options

* Fix ha fix
2019-08-13 14:20:42 +02:00
Pascal Vizeli
9f3f47eb80 Bump version 174 2019-08-11 09:59:48 +02:00
Pascal Vizeli
1a90a478f2 Merge pull request #1197 from home-assistant/dev
Release 173
2019-08-11 09:39:17 +02:00
Pascal Vizeli
ee773f3b63 Fix hanging landingpage (#1196) 2019-08-11 09:05:47 +02:00
Pascal Vizeli
5ffc27f60c Bump version 173 2019-08-08 23:24:11 +02:00
Pascal Vizeli
4c13dfb43c Merge pull request #1194 from home-assistant/dev
Release 172
2019-08-08 23:21:26 +02:00
Pascal Vizeli
bc099f0d81 Fix Version detection with exists container (#1193) 2019-08-08 23:20:26 +02:00
Pascal Vizeli
b26dd0af19 Add better log output for repair (#1191) 2019-08-08 10:14:13 +02:00
Pascal Vizeli
0dee5bd763 Fix black formating args 2019-08-08 10:13:44 +02:00
Pascal Vizeli
0765387ad8 Bump version 172 2019-08-07 18:18:09 +02:00
Pascal Vizeli
a07517bd3c Merge pull request #1190 from home-assistant/dev
Release 171
2019-08-07 18:17:30 +02:00
Pascal Vizeli
e5f0d80d96 Start API server before he beform a self update (#1189) 2019-08-07 18:03:56 +02:00
Pascal Vizeli
2fc5e3b7d9 Repair / fixup docker overlayfs issues (#1170)
* Add a repair modus

* Add repair to add-ons

* repair to cli

* Add API call

* fix sync call

* Clean all images

* Fix repair

* Fix supervisor

* Add new function to core

* fix tagging

* better style

* use retag

* new retag function

* Fix lint

* Fix import export
2019-08-07 17:26:32 +02:00
Pascal Vizeli
778bc46848 Don't relay on latest with HA/Addons (#1175)
* Don't relay on latest with HA/Addons

* Fix latest on install

* Revert some options

* Fix attach

* migrate to new version handling

* Fix thread

* Fix is running

* Allow wait

* debug code

* Fix debug value

* Fix list

* Fix regex

* Some better log output

* Fix logic

* Improve cleanup handling

* Fix bug

* Cleanup old code

* Improve version handling

* Fix the way to attach
2019-08-07 09:51:27 +02:00
Pascal Vizeli
882586b246 Fix time adjustments on latest boot (#1187)
* Fix time adjustments on latest boot

* Fix spell
2019-08-06 09:24:22 +02:00
dependabot-preview[bot]
b7c07a2555 Bump pytz from 2019.1 to 2019.2 (#1184)
Bumps [pytz](https://github.com/stub42/pytz) from 2019.1 to 2019.2.
- [Release notes](https://github.com/stub42/pytz/releases)
- [Commits](https://github.com/stub42/pytz/compare/release_2019.1...release_2019.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-08-02 10:32:04 +02:00
dependabot-preview[bot]
814b504fa9 Bump ptvsd from 4.2.10 to 4.3.0 (#1179)
Bumps [ptvsd](https://github.com/Microsoft/ptvsd) from 4.2.10 to 4.3.0.
- [Release notes](https://github.com/Microsoft/ptvsd/releases)
- [Commits](https://github.com/Microsoft/ptvsd/compare/v4.2.10...v4.3.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-29 17:01:28 +02:00
dependabot-preview[bot]
7ae430e7a8 Bump gitpython from 2.1.12 to 2.1.13 (#1178)
Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 2.1.12 to 2.1.13.
- [Release notes](https://github.com/gitpython-developers/GitPython/releases)
- [Changelog](https://github.com/gitpython-developers/GitPython/blob/master/CHANGES)
- [Commits](https://github.com/gitpython-developers/GitPython/compare/2.1.12...2.1.13)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-29 14:53:54 +02:00
dependabot-preview[bot]
0e7e95ba20 Bump gitpython from 2.1.11 to 2.1.12 (#1171)
Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 2.1.11 to 2.1.12.
- [Release notes](https://github.com/gitpython-developers/GitPython/releases)
- [Changelog](https://github.com/gitpython-developers/GitPython/blob/master/CHANGES)
- [Commits](https://github.com/gitpython-developers/GitPython/compare/2.1.11...2.1.12)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-22 14:18:11 +02:00
Pascal Vizeli
e577d8acb2 Bump version 171 2019-07-19 11:49:00 +02:00
53 changed files with 1498 additions and 280 deletions

View File

@@ -1,8 +1,8 @@
FROM python:3.7
WORKDIR /workspace
WORKDIR /workspaces
# install Node/Yarn for Frontent
# Install Node/Yarn for Frontent
RUN apt-get update && apt-get install -y --no-install-recommends \
curl \
git \
@@ -17,8 +17,24 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
&& rm -rf /var/lib/apt/lists/*
ENV NVM_DIR /root/.nvm
# Install docker
# https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/
RUN apt-get update && apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
curl \
software-properties-common \
gpg-agent \
&& curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \
&& add-apt-repository "deb https://download.docker.com/linux/debian $(lsb_release -cs) stable" \
&& apt-get update && apt-get install -y --no-install-recommends \
docker-ce \
docker-ce-cli \
containerd.io \
&& rm -rf /var/lib/apt/lists/*
# Install Python dependencies from requirements.txt if it exists
COPY requirements.txt requirements_tests.txt /workspace/
COPY requirements.txt requirements_tests.txt /workspaces/
RUN pip install -r requirements.txt \
&& pip3 install -r requirements_tests.txt \
&& pip install black tox

View File

@@ -3,8 +3,11 @@
"name": "Hass.io dev",
"context": "..",
"dockerFile": "Dockerfile",
"appPort": "9123:8123",
"runArgs": [
"-e", "GIT_EDTIOR='code --wait'"
"-e",
"GIT_EDITOR='code --wait'",
"--privileged"
],
"extensions": [
"ms-python.python"
@@ -14,9 +17,13 @@
"python.linting.pylintEnabled": true,
"python.linting.enabled": true,
"python.formatting.provider": "black",
"python.formatting.blackArgs": [
"--target-version",
"py37"
],
"editor.formatOnPaste": false,
"editor.formatOnSave": true,
"editor.formatOnType": true,
"files.trimTrailingWhitespace": true
}
}
}

View File

@@ -18,3 +18,6 @@ venv/
home-assistant-polymer/*
misc/*
script/*
# Test ENV
data/

75
.vscode/tasks.json vendored
View File

@@ -1,10 +1,38 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "Run Testenv",
"type": "shell",
"command": "./scripts/test_env.sh",
"group": {
"kind": "test",
"isDefault": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
},
{
"label": "Run Testenv CLI",
"type": "shell",
"command": "docker run --rm -ti -v /etc/machine-id:/etc/machine-id --network=hassio --add-host hassio:172.30.32.2 homeassistant/amd64-hassio-cli:dev",
"group": {
"kind": "test",
"isDefault": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
},
{
"label": "Update UI",
"type": "shell",
"command": "./script/update-frontend.sh",
"command": "./scripts/update-frontend.sh",
"group": {
"kind": "build",
"isDefault": true
@@ -14,6 +42,51 @@
"panel": "new"
},
"problemMatcher": []
},
{
"label": "Pytest",
"type": "shell",
"command": "pytest --timeout=10 tests",
"group": {
"kind": "test",
"isDefault": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
},
{
"label": "Flake8",
"type": "shell",
"command": "flake8 hassio tests",
"group": {
"kind": "test",
"isDefault": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
},
{
"label": "Pylint",
"type": "shell",
"command": "pylint hassio",
"dependsOn": [
"Install all Requirements"
],
"group": {
"kind": "test",
"isDefault": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
}
]
}

52
API.md
View File

@@ -105,6 +105,7 @@ Output is the raw docker log.
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
@@ -112,6 +113,10 @@ Output is the raw docker log.
}
```
- GET `/supervisor/repair`
Repair overlayfs issue and restore lost images
### Snapshot
- GET `/snapshots`
@@ -417,6 +422,7 @@ Proxy to real websocket instance.
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
@@ -469,6 +475,8 @@ Get all available addons.
{
"name": "xy bla",
"slug": "xdssd_xybla",
"hostname": "xdssd-xybla",
"dns": [],
"description": "description",
"long_description": "null|markdown",
"auto_update": "bool",
@@ -494,6 +502,7 @@ Get all available addons.
"privileged": ["NET_ADMIN", "SYS_ADMIN"],
"apparmor": "disable|default|profile",
"devices": ["/dev/xy"],
"udev": "bool",
"auto_uart": "bool",
"icon": "bool",
"logo": "bool",
@@ -589,6 +598,7 @@ Write data to add-on stdin
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
@@ -735,6 +745,48 @@ return:
}
```
### DNS
- GET `/dns/info`
```json
{
"host": "ip-address",
"version": "1",
"latest_version": "2",
"servers": ["dns://8.8.8.8"]
}
```
- POST `/dns/options`
```json
{
"servers": ["dns://8.8.8.8"]
}
```
- POST `/dns/update`
```json
{
"version": "VERSION"
}
```
- GET `/dns/logs`
- GET `/dns/stats`
```json
{
"cpu_percent": 0.0,
"memory_usage": 283123,
"memory_limit": 329392,
"memory_percent": 1.4,
"network_tx": 0,
"network_rx": 0,
"blk_read": 0,
"blk_write": 0
}
```
### Auth / SSO API
You can use the user system on homeassistant. We handle this auth system on

View File

@@ -52,7 +52,7 @@ stages:
versionSpec: '3.7'
- script: pip install black
displayName: 'Install black'
- script: black --check hassio tests
- script: black --target-version py37 --check hassio tests
displayName: 'Run Black'
- job: 'JQ'
pool:

View File

@@ -38,9 +38,10 @@ if __name__ == "__main__":
_LOGGER.info("Initialize Hass.io setup")
coresys = loop.run_until_complete(bootstrap.initialize_coresys())
loop.run_until_complete(coresys.core.connect())
bootstrap.migrate_system_env(coresys)
bootstrap.supervisor_debugger(coresys)
bootstrap.migrate_system_env(coresys)
_LOGGER.info("Setup HassIO")
loop.run_until_complete(coresys.core.setup())

View File

@@ -10,7 +10,9 @@ from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import (
AddonsError,
AddonsNotSupportedError,
CoreDNSError,
DockerAPIError,
HomeAssistantAPIError,
HostAppArmorError,
)
from ..store.addon import AddonStore
@@ -73,6 +75,9 @@ class AddonManager(CoreSysAttributes):
if tasks:
await asyncio.wait(tasks)
# Sync DNS
await self.sync_dns()
async def boot(self, stage: str) -> None:
"""Boot add-ons with mode auto."""
tasks = []
@@ -130,6 +135,7 @@ class AddonManager(CoreSysAttributes):
raise AddonsError() from None
else:
self.local[slug] = addon
_LOGGER.info("Add-on '%s' successfully installed", slug)
async def uninstall(self, slug: str) -> None:
"""Remove an add-on."""
@@ -154,11 +160,20 @@ class AddonManager(CoreSysAttributes):
with suppress(HostAppArmorError):
await addon.uninstall_apparmor()
# Cleanup Ingress panel from sidebar
if addon.ingress_panel:
addon.ingress_panel = False
with suppress(HomeAssistantAPIError):
await self.sys_ingress.update_hass_panel(addon)
# Cleanup internal data
addon.remove_discovery()
self.data.uninstall(addon)
self.local.pop(slug)
_LOGGER.info("Add-on '%s' successfully removed", slug)
async def update(self, slug: str) -> None:
"""Update add-on."""
if slug not in self.local:
@@ -184,9 +199,15 @@ class AddonManager(CoreSysAttributes):
last_state = await addon.state()
try:
await addon.instance.update(store.version, store.image)
# Cleanup
with suppress(DockerAPIError):
await addon.instance.cleanup()
except DockerAPIError:
raise AddonsError() from None
self.data.update(store)
else:
self.data.update(store)
_LOGGER.info("Add-on '%s' successfully updated", slug)
# Setup/Fix AppArmor profile
await addon.install_apparmor()
@@ -224,6 +245,7 @@ class AddonManager(CoreSysAttributes):
raise AddonsError() from None
else:
self.data.update(store)
_LOGGER.info("Add-on '%s' successfully rebuilded", slug)
# restore state
if last_state == STATE_STARTED:
@@ -246,3 +268,52 @@ class AddonManager(CoreSysAttributes):
_LOGGER.info("Detect new Add-on after restore %s", slug)
self.local[slug] = addon
async def repair(self) -> None:
"""Repair local add-ons."""
needs_repair: List[Addon] = []
# Evaluate Add-ons to repair
for addon in self.installed:
if await addon.instance.exists():
continue
needs_repair.append(addon)
_LOGGER.info("Found %d add-ons to repair", len(needs_repair))
if not needs_repair:
return
for addon in needs_repair:
_LOGGER.info("Start repair for add-on: %s", addon.slug)
with suppress(DockerAPIError, KeyError):
# Need pull a image again
if not addon.need_build:
await addon.instance.install(addon.version, addon.image)
continue
# Need local lookup
elif addon.need_build and not addon.is_detached:
store = self.store[addon.slug]
# If this add-on is available for rebuild
if addon.version == store.version:
await addon.instance.install(addon.version, addon.image)
continue
_LOGGER.error("Can't repair %s", addon.slug)
with suppress(AddonsError):
await self.uninstall(addon.slug)
async def sync_dns(self) -> None:
"""Sync add-ons DNS names."""
# Update hosts
for addon in self.installed:
if not await addon.instance.is_running():
continue
self.sys_dns.add_host(
ipv4=addon.ip_address, names=[addon.hostname], write=False
)
# Write hosts files
with suppress(CoreDNSError):
self.sys_dns.write_hosts()

View File

@@ -1,7 +1,7 @@
"""Init file for Hass.io add-ons."""
from contextlib import suppress
from copy import deepcopy
from ipaddress import IPv4Address, ip_address
from ipaddress import IPv4Address
import logging
from pathlib import Path, PurePath
import re
@@ -9,7 +9,7 @@ import secrets
import shutil
import tarfile
from tempfile import TemporaryDirectory
from typing import Any, Awaitable, Dict, Optional
from typing import Any, Awaitable, Dict, List, Optional
import voluptuous as vol
from voluptuous.humanize import humanize_error
@@ -35,7 +35,7 @@ from ..const import (
ATTR_USER,
ATTR_UUID,
ATTR_VERSION,
STATE_NONE,
DNS_SUFFIX,
STATE_STARTED,
STATE_STOPPED,
)
@@ -75,13 +75,11 @@ class Addon(AddonModel):
async def load(self) -> None:
"""Async initialize of object."""
with suppress(DockerAPIError):
await self.instance.attach()
await self.instance.attach(tag=self.version)
@property
def ip_address(self) -> IPv4Address:
"""Return IP of Add-on instance."""
if not self.is_installed:
return ip_address("0.0.0.0")
return self.instance.ip_address
@property
@@ -119,6 +117,11 @@ class Addon(AddonModel):
"""Return installed version."""
return self.persist[ATTR_VERSION]
@property
def dns(self) -> List[str]:
"""Return list of DNS name for that add-on."""
return [f"{self.hostname}.{DNS_SUFFIX}"]
@property
def options(self) -> Dict[str, Any]:
"""Return options with local changes."""
@@ -447,9 +450,6 @@ class Addon(AddonModel):
async def state(self) -> str:
"""Return running state of add-on."""
if not self.is_installed:
return STATE_NONE
if await self.instance.is_running():
return STATE_STARTED
return STATE_STOPPED
@@ -618,7 +618,7 @@ class Addon(AddonModel):
image_file = Path(temp, "image.tar")
if image_file.is_file():
with suppress(DockerAPIError):
await self.instance.import_image(image_file, version)
await self.instance.import_image(image_file)
else:
with suppress(DockerAPIError):
await self.instance.install(version, restore_image)

View File

@@ -51,6 +51,7 @@ from ..const import (
ATTR_STDIN,
ATTR_TIMEOUT,
ATTR_TMPFS,
ATTR_UDEV,
ATTR_URL,
ATTR_VERSION,
ATTR_WEBUI,
@@ -109,6 +110,16 @@ class AddonModel(CoreSysAttributes):
"""Return name of add-on."""
return self.data[ATTR_NAME]
@property
def hostname(self) -> str:
"""Return slug/id of add-on."""
return self.slug.replace("_", "-")
@property
def dns(self) -> List[str]:
"""Return list of DNS name for that add-on."""
return []
@property
def timeout(self) -> int:
"""Return timeout of addon for docker stop."""
@@ -333,6 +344,11 @@ class AddonModel(CoreSysAttributes):
"""Return True if the add-on access to GPIO interface."""
return self.data[ATTR_GPIO]
@property
def with_udev(self) -> bool:
"""Return True if the add-on have his own udev."""
return self.data[ATTR_UDEV]
@property
def with_kernel_modules(self) -> bool:
"""Return True if the add-on access to kernel modules."""

View File

@@ -68,6 +68,7 @@ from ..const import (
ATTR_SYSTEM,
ATTR_TIMEOUT,
ATTR_TMPFS,
ATTR_UDEV,
ATTR_URL,
ATTR_USER,
ATTR_UUID,
@@ -186,6 +187,7 @@ SCHEMA_ADDON_CONFIG = vol.Schema(
vol.Optional(ATTR_HOST_DBUS, default=False): vol.Boolean(),
vol.Optional(ATTR_DEVICES): [vol.Match(r"^(.*):(.*):([rwm]{1,3})$")],
vol.Optional(ATTR_AUTO_UART, default=False): vol.Boolean(),
vol.Optional(ATTR_UDEV, default=False): vol.Boolean(),
vol.Optional(ATTR_TMPFS): vol.Match(r"^size=(\d)*[kmg](,uid=\d{1,4})?(,rw)?$"),
vol.Optional(ATTR_MAP, default=list): [vol.Match(RE_VOLUME)],
vol.Optional(ATTR_ENVIRONMENT): {vol.Match(r"\w*"): vol.Coerce(str)},

View File

@@ -9,6 +9,7 @@ from ..coresys import CoreSys, CoreSysAttributes
from .addons import APIAddons
from .auth import APIAuth
from .discovery import APIDiscovery
from .dns import APICoreDNS
from .hardware import APIHardware
from .hassos import APIHassOS
from .homeassistant import APIHomeAssistant
@@ -55,6 +56,7 @@ class RestAPI(CoreSysAttributes):
self._register_services()
self._register_info()
self._register_auth()
self._register_dns()
def _register_host(self) -> None:
"""Register hostcontrol functions."""
@@ -130,6 +132,7 @@ class RestAPI(CoreSysAttributes):
web.post("/supervisor/update", api_supervisor.update),
web.post("/supervisor/reload", api_supervisor.reload),
web.post("/supervisor/options", api_supervisor.options),
web.post("/supervisor/repair", api_supervisor.repair),
]
)
@@ -263,6 +266,21 @@ class RestAPI(CoreSysAttributes):
]
)
def _register_dns(self) -> None:
"""Register DNS functions."""
api_dns = APICoreDNS()
api_dns.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/dns/info", api_dns.info),
web.get("/dns/stats", api_dns.stats),
web.get("/dns/logs", api_dns.logs),
web.post("/dns/update", api_dns.update),
web.post("/dns/options", api_dns.options),
]
)
def _register_panel(self) -> None:
"""Register panel for Home Assistant."""
panel_dir = Path(__file__).parent.joinpath("panel")

View File

@@ -8,6 +8,7 @@ import voluptuous as vol
from voluptuous.humanize import humanize_error
from ..addons import AnyAddon
from ..docker.stats import DockerStats
from ..addons.utils import rating_security
from ..const import (
ATTR_ADDONS,
@@ -30,6 +31,7 @@ from ..const import (
ATTR_DEVICES,
ATTR_DEVICETREE,
ATTR_DISCOVERY,
ATTR_DNS,
ATTR_DOCKER_API,
ATTR_FULL_ACCESS,
ATTR_GPIO,
@@ -41,6 +43,7 @@ from ..const import (
ATTR_HOST_IPC,
ATTR_HOST_NETWORK,
ATTR_HOST_PID,
ATTR_HOSTNAME,
ATTR_ICON,
ATTR_INGRESS,
ATTR_INGRESS_ENTRY,
@@ -56,6 +59,7 @@ from ..const import (
ATTR_MACHINE,
ATTR_MAINTAINER,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_PERCENT,
ATTR_MEMORY_USAGE,
ATTR_NAME,
ATTR_NETWORK,
@@ -73,6 +77,7 @@ from ..const import (
ATTR_SOURCE,
ATTR_STATE,
ATTR_STDIN,
ATTR_UDEV,
ATTR_URL,
ATTR_VERSION,
ATTR_WEBUI,
@@ -116,7 +121,7 @@ class APIAddons(CoreSysAttributes):
self, request: web.Request, check_installed: bool = True
) -> AnyAddon:
"""Return addon, throw an exception it it doesn't exist."""
addon_slug = request.match_info.get("addon")
addon_slug: str = request.match_info.get("addon")
# Lookup itself
if addon_slug == "self":
@@ -175,11 +180,13 @@ class APIAddons(CoreSysAttributes):
@api_process
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return add-on information."""
addon = self._extract_addon(request, check_installed=False)
addon: AnyAddon = self._extract_addon(request, check_installed=False)
data = {
ATTR_NAME: addon.name,
ATTR_SLUG: addon.slug,
ATTR_HOSTNAME: addon.hostname,
ATTR_DNS: addon.dns,
ATTR_DESCRIPTON: addon.description,
ATTR_LONG_DESCRIPTION: addon.long_description,
ATTR_AUTO_UPDATE: None,
@@ -220,6 +227,7 @@ class APIAddons(CoreSysAttributes):
ATTR_GPIO: addon.with_gpio,
ATTR_KERNEL_MODULES: addon.with_kernel_modules,
ATTR_DEVICETREE: addon.with_devicetree,
ATTR_UDEV: addon.with_udev,
ATTR_DOCKER_API: addon.access_docker_api,
ATTR_AUDIO: addon.with_audio,
ATTR_AUDIO_INPUT: None,
@@ -256,12 +264,12 @@ class APIAddons(CoreSysAttributes):
@api_process
async def options(self, request: web.Request) -> None:
"""Store user options for add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
addon_schema = SCHEMA_OPTIONS.extend(
{vol.Optional(ATTR_OPTIONS): vol.Any(None, addon.schema)}
)
body = await api_validate(addon_schema, request)
body: Dict[str, Any] = await api_validate(addon_schema, request)
if ATTR_OPTIONS in body:
addon.options = body[ATTR_OPTIONS]
@@ -284,8 +292,8 @@ class APIAddons(CoreSysAttributes):
@api_process
async def security(self, request: web.Request) -> None:
"""Store security options for add-on."""
addon = self._extract_addon(request)
body = await api_validate(SCHEMA_SECURITY, request)
addon: AnyAddon = self._extract_addon(request)
body: Dict[str, Any] = await api_validate(SCHEMA_SECURITY, request)
if ATTR_PROTECTED in body:
_LOGGER.warning("Protected flag changing for %s!", addon.slug)
@@ -296,13 +304,14 @@ class APIAddons(CoreSysAttributes):
@api_process
async def stats(self, request: web.Request) -> Dict[str, Any]:
"""Return resource information."""
addon = self._extract_addon(request)
stats = await addon.stats()
addon: AnyAddon = self._extract_addon(request)
stats: DockerStats = await addon.stats()
return {
ATTR_CPU_PERCENT: stats.cpu_percent,
ATTR_MEMORY_USAGE: stats.memory_usage,
ATTR_MEMORY_LIMIT: stats.memory_limit,
ATTR_MEMORY_PERCENT: stats.memory_percent,
ATTR_NETWORK_RX: stats.network_rx,
ATTR_NETWORK_TX: stats.network_tx,
ATTR_BLK_READ: stats.blk_read,
@@ -312,19 +321,19 @@ class APIAddons(CoreSysAttributes):
@api_process
def install(self, request: web.Request) -> Awaitable[None]:
"""Install add-on."""
addon = self._extract_addon(request, check_installed=False)
addon: AnyAddon = self._extract_addon(request, check_installed=False)
return asyncio.shield(addon.install())
@api_process
def uninstall(self, request: web.Request) -> Awaitable[None]:
"""Uninstall add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
return asyncio.shield(addon.uninstall())
@api_process
def start(self, request: web.Request) -> Awaitable[None]:
"""Start add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
# check options
options = addon.options
@@ -338,13 +347,13 @@ class APIAddons(CoreSysAttributes):
@api_process
def stop(self, request: web.Request) -> Awaitable[None]:
"""Stop add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
return asyncio.shield(addon.stop())
@api_process
def update(self, request: web.Request) -> Awaitable[None]:
"""Update add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
if addon.latest_version == addon.version:
raise APIError("No update available!")
@@ -354,13 +363,13 @@ class APIAddons(CoreSysAttributes):
@api_process
def restart(self, request: web.Request) -> Awaitable[None]:
"""Restart add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
return asyncio.shield(addon.restart())
@api_process
def rebuild(self, request: web.Request) -> Awaitable[None]:
"""Rebuild local build add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
if not addon.need_build:
raise APIError("Only local build addons are supported")
@@ -369,13 +378,13 @@ class APIAddons(CoreSysAttributes):
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Awaitable[bytes]:
"""Return logs from add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
return addon.logs()
@api_process_raw(CONTENT_TYPE_PNG)
async def icon(self, request: web.Request) -> bytes:
"""Return icon from add-on."""
addon = self._extract_addon(request, check_installed=False)
addon: AnyAddon = self._extract_addon(request, check_installed=False)
if not addon.with_icon:
raise APIError("No icon found!")
@@ -385,7 +394,7 @@ class APIAddons(CoreSysAttributes):
@api_process_raw(CONTENT_TYPE_PNG)
async def logo(self, request: web.Request) -> bytes:
"""Return logo from add-on."""
addon = self._extract_addon(request, check_installed=False)
addon: AnyAddon = self._extract_addon(request, check_installed=False)
if not addon.with_logo:
raise APIError("No logo found!")
@@ -395,7 +404,7 @@ class APIAddons(CoreSysAttributes):
@api_process_raw(CONTENT_TYPE_TEXT)
async def changelog(self, request: web.Request) -> str:
"""Return changelog from add-on."""
addon = self._extract_addon(request, check_installed=False)
addon: AnyAddon = self._extract_addon(request, check_installed=False)
if not addon.with_changelog:
raise APIError("No changelog found!")
@@ -405,7 +414,7 @@ class APIAddons(CoreSysAttributes):
@api_process
async def stdin(self, request: web.Request) -> None:
"""Write to stdin of add-on."""
addon = self._extract_addon(request)
addon: AnyAddon = self._extract_addon(request)
if not addon.with_stdin:
raise APIError("STDIN not supported by add-on")

89
hassio/api/dns.py Normal file
View File

@@ -0,0 +1,89 @@
"""Init file for Hass.io DNS RESTful API."""
import asyncio
import logging
from typing import Any, Awaitable, Dict
from aiohttp import web
import voluptuous as vol
from ..const import (
ATTR_BLK_READ,
ATTR_BLK_WRITE,
ATTR_CPU_PERCENT,
ATTR_HOST,
ATTR_LATEST_VERSION,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_USAGE,
ATTR_MEMORY_PERCENT,
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
ATTR_SERVERS,
ATTR_VERSION,
CONTENT_TYPE_BINARY,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError
from ..validate import DNS_SERVER_LIST
from .utils import api_process, api_process_raw, api_validate
_LOGGER = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
SCHEMA_OPTIONS = vol.Schema({vol.Optional(ATTR_SERVERS): DNS_SERVER_LIST})
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})
class APICoreDNS(CoreSysAttributes):
"""Handle RESTful API for DNS functions."""
@api_process
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return DNS information."""
return {
ATTR_VERSION: self.sys_dns.version,
ATTR_LATEST_VERSION: self.sys_dns.latest_version,
ATTR_HOST: str(self.sys_docker.network.dns),
ATTR_SERVERS: self.sys_dns.servers,
}
@api_process
async def options(self, request: web.Request) -> None:
"""Set DNS options."""
body = await api_validate(SCHEMA_OPTIONS, request)
if ATTR_SERVERS in body:
self.sys_dns.servers = body[ATTR_SERVERS]
self.sys_dns.save_data()
@api_process
async def stats(self, request: web.Request) -> Dict[str, Any]:
"""Return resource information."""
stats = await self.sys_dns.stats()
return {
ATTR_CPU_PERCENT: stats.cpu_percent,
ATTR_MEMORY_USAGE: stats.memory_usage,
ATTR_MEMORY_LIMIT: stats.memory_limit,
ATTR_MEMORY_PERCENT: stats.memory_percent,
ATTR_NETWORK_RX: stats.network_rx,
ATTR_NETWORK_TX: stats.network_tx,
ATTR_BLK_READ: stats.blk_read,
ATTR_BLK_WRITE: stats.blk_write,
}
@api_process
async def update(self, request: web.Request) -> None:
"""Update DNS plugin."""
body = await api_validate(SCHEMA_VERSION, request)
version = body.get(ATTR_VERSION, self.sys_dns.latest_version)
if version == self.sys_dns.version:
raise APIError("Version {} is already in use".format(version))
await asyncio.shield(self.sys_dns.update(version))
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Awaitable[bytes]:
"""Return DNS Docker logs."""
return self.sys_dns.logs()

View File

@@ -22,7 +22,9 @@ class APIHardware(CoreSysAttributes):
async def info(self, request):
"""Show hardware info."""
return {
ATTR_SERIAL: list(self.sys_hardware.serial_devices),
ATTR_SERIAL: list(
self.sys_hardware.serial_devices | self.sys_hardware.serial_by_id
),
ATTR_INPUT: list(self.sys_hardware.input_devices),
ATTR_DISK: list(self.sys_hardware.disk_devices),
ATTR_GPIO: list(self.sys_hardware.gpio_devices),

View File

@@ -18,6 +18,7 @@ from ..const import (
ATTR_MACHINE,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_USAGE,
ATTR_MEMORY_PERCENT,
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
ATTR_PASSWORD,
@@ -121,6 +122,7 @@ class APIHomeAssistant(CoreSysAttributes):
ATTR_CPU_PERCENT: stats.cpu_percent,
ATTR_MEMORY_USAGE: stats.memory_usage,
ATTR_MEMORY_LIMIT: stats.memory_limit,
ATTR_MEMORY_PERCENT: stats.memory_percent,
ATTR_NETWORK_RX: stats.network_rx,
ATTR_NETWORK_TX: stats.network_tx,
ATTR_BLK_READ: stats.blk_read,

View File

@@ -67,6 +67,7 @@ ADDONS_ROLE_ACCESS = {
),
ROLE_MANAGER: re.compile(
r"^(?:"
r"|/dns/.*"
r"|/homeassistant/.+"
r"|/host/.+"
r"|/hardware/.+"

View File

@@ -25,6 +25,7 @@ from ..const import (
ATTR_LOGO,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_USAGE,
ATTR_MEMORY_PERCENT,
ATTR_NAME,
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
@@ -140,6 +141,7 @@ class APISupervisor(CoreSysAttributes):
ATTR_CPU_PERCENT: stats.cpu_percent,
ATTR_MEMORY_USAGE: stats.memory_usage,
ATTR_MEMORY_LIMIT: stats.memory_limit,
ATTR_MEMORY_PERCENT: stats.memory_percent,
ATTR_NETWORK_RX: stats.network_rx,
ATTR_NETWORK_TX: stats.network_tx,
ATTR_BLK_READ: stats.blk_read,
@@ -161,6 +163,11 @@ class APISupervisor(CoreSysAttributes):
"""Reload add-ons, configuration, etc."""
return asyncio.shield(self.sys_updater.reload())
@api_process
def repair(self, request: web.Request) -> Awaitable[None]:
"""Try to repair the local setup / overlayfs."""
return asyncio.shield(self.sys_core.repair())
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Awaitable[bytes]:
"""Return supervisor Docker logs."""

View File

@@ -10,6 +10,8 @@ from .utils.json import read_json_file
_LOGGER = logging.getLogger(__name__)
ARCH_JSON: Path = Path(__file__).parent.joinpath("data/arch.json")
MAP_CPU = {
"armv7": "armv7",
"armv6": "armhf",
@@ -47,7 +49,7 @@ class CpuArch(CoreSysAttributes):
async def load(self) -> None:
"""Load data and initialize default arch."""
try:
arch_data = read_json_file(Path(__file__).parent.joinpath("arch.json"))
arch_data = read_json_file(ARCH_JSON)
except JsonFileError:
_LOGGER.warning("Can't read arch json")
return

View File

@@ -11,11 +11,12 @@ from .addons import AddonManager
from .api import RestAPI
from .arch import CpuArch
from .auth import Auth
from .const import SOCKET_DOCKER
from .const import CHANNEL_DEV, SOCKET_DOCKER
from .core import HassIO
from .coresys import CoreSys
from .dbus import DBusManager
from .discovery import Discovery
from .dns import CoreDNS
from .hassos import HassOS
from .homeassistant import HomeAssistant
from .host import HostManager
@@ -43,6 +44,7 @@ async def initialize_coresys():
# Initialize core objects
coresys.core = HassIO(coresys)
coresys.dns = CoreDNS(coresys)
coresys.arch = CpuArch(coresys)
coresys.auth = Auth(coresys)
coresys.updater = Updater(coresys)
@@ -127,9 +129,21 @@ def initialize_system_data(coresys: CoreSys):
_LOGGER.info("Create Hass.io Apparmor folder %s", config.path_apparmor)
config.path_apparmor.mkdir()
# dns folder
if not config.path_dns.is_dir():
_LOGGER.info("Create Hass.io DNS folder %s", config.path_dns)
config.path_dns.mkdir()
# Update log level
coresys.config.modify_log_level()
# Check if ENV is in development mode
if bool(os.environ.get("SUPERVISOR_DEV", 0)):
_LOGGER.warning("SUPERVISOR_DEV is set")
coresys.updater.channel = CHANNEL_DEV
coresys.config.logging = "debug"
coresys.config.debug = True
def migrate_system_env(coresys: CoreSys):
"""Cleanup some stuff after update."""
@@ -218,7 +232,7 @@ def reg_signal(loop):
def supervisor_debugger(coresys: CoreSys) -> None:
"""Setup debugger if needed."""
if not coresys.config.debug or not coresys.dev:
if not coresys.config.debug:
return
import ptvsd
@@ -226,4 +240,5 @@ def supervisor_debugger(coresys: CoreSys) -> None:
ptvsd.enable_attach(address=("0.0.0.0", 33333), redirect_output=True)
if coresys.config.debug_block:
_LOGGER.info("Wait until debugger is attached")
ptvsd.wait_for_attach()

View File

@@ -34,6 +34,7 @@ BACKUP_DATA = PurePath("backup")
SHARE_DATA = PurePath("share")
TMP_DATA = PurePath("tmp")
APPARMOR_DATA = PurePath("apparmor")
DNS_DATA = PurePath("dns")
DEFAULT_BOOT_TIME = datetime.utcfromtimestamp(0).isoformat()
@@ -99,7 +100,7 @@ class CoreConfig(JsonConfig):
def modify_log_level(self) -> None:
"""Change log level."""
lvl = getattr(logging, self.logging.upper())
logging.basicConfig(level=lvl)
logging.getLogger("hassio").setLevel(lvl)
@property
def last_boot(self):
@@ -211,6 +212,16 @@ class CoreConfig(JsonConfig):
"""Return root share data folder external for Docker."""
return PurePath(self.path_extern_hassio, SHARE_DATA)
@property
def path_extern_dns(self):
"""Return dns path external for Docker."""
return str(PurePath(self.path_extern_hassio, DNS_DATA))
@property
def path_dns(self):
"""Return dns path inside supervisor."""
return Path(HASSIO_DATA, DNS_DATA)
@property
def addons_repositories(self):
"""Return list of custom Add-on repositories."""

View File

@@ -3,7 +3,7 @@ from pathlib import Path
from ipaddress import ip_network
HASSIO_VERSION = "170"
HASSIO_VERSION = "178"
URL_HASSIO_ADDONS = "https://github.com/home-assistant/hassio-addons"
URL_HASSIO_VERSION = "https://version.home-assistant.io/{channel}.json"
@@ -24,6 +24,7 @@ FILE_HASSIO_UPDATER = Path(HASSIO_DATA, "updater.json")
FILE_HASSIO_SERVICES = Path(HASSIO_DATA, "services.json")
FILE_HASSIO_DISCOVERY = Path(HASSIO_DATA, "discovery.json")
FILE_HASSIO_INGRESS = Path(HASSIO_DATA, "ingress.json")
FILE_HASSIO_DNS = Path(HASSIO_DATA, "dns.json")
SOCKET_DOCKER = Path("/var/run/docker.sock")
@@ -31,6 +32,9 @@ DOCKER_NETWORK = "hassio"
DOCKER_NETWORK_MASK = ip_network("172.30.32.0/23")
DOCKER_NETWORK_RANGE = ip_network("172.30.33.0/24")
DNS_SERVERS = ["dns://8.8.8.8", "dns://1.1.1.1"]
DNS_SUFFIX = "local.hass.io"
LABEL_VERSION = "io.hass.version"
LABEL_ARCH = "io.hass.arch"
LABEL_TYPE = "io.hass.type"
@@ -86,6 +90,7 @@ ATTR_VERSION_LATEST = "version_latest"
ATTR_AUTO_UART = "auto_uart"
ATTR_LAST_BOOT = "last_boot"
ATTR_LAST_VERSION = "last_version"
ATTR_LATEST_VERSION = "latest_version"
ATTR_CHANNEL = "channel"
ATTR_NAME = "name"
ATTR_SLUG = "slug"
@@ -159,6 +164,7 @@ ATTR_NETWORK_RX = "network_rx"
ATTR_NETWORK_TX = "network_tx"
ATTR_MEMORY_LIMIT = "memory_limit"
ATTR_MEMORY_USAGE = "memory_usage"
ATTR_MEMORY_PERCENT = "memory_percent"
ATTR_BLK_READ = "blk_read"
ATTR_BLK_WRITE = "blk_write"
ATTR_ADDON = "addon"
@@ -210,6 +216,9 @@ ATTR_ADMIN = "admin"
ATTR_PANELS = "panels"
ATTR_DEBUG = "debug"
ATTR_DEBUG_BLOCK = "debug_block"
ATTR_DNS = "dns"
ATTR_SERVERS = "servers"
ATTR_UDEV = "udev"
PROVIDE_SERVICE = "provide"
NEED_SERVICE = "need"

View File

@@ -24,10 +24,14 @@ class HassIO(CoreSysAttributes):
"""Initialize Hass.io object."""
self.coresys = coresys
async def connect(self):
"""Connect Supervisor container."""
await self.sys_supervisor.load()
async def setup(self):
"""Setup HassIO orchestration."""
# Load Supervisor
await self.sys_supervisor.load()
# Load CoreDNS
await self.sys_dns.load()
# Load DBus
await self.sys_dbus.load()
@@ -68,11 +72,10 @@ class HassIO(CoreSysAttributes):
# Load ingress
await self.sys_ingress.load()
# start dns forwarding
self.sys_create_task(self.sys_dns.start())
async def start(self):
"""Start Hass.io orchestration."""
await self.sys_api.start()
# on release channel, try update itself
if self.sys_supervisor.need_update:
try:
@@ -86,9 +89,6 @@ class HassIO(CoreSysAttributes):
"future version of Home Assistant!"
)
# start api
await self.sys_api.start()
# start addon mark as initialize
await self.sys_addons.boot(STARTUP_INITIALIZE)
@@ -116,8 +116,7 @@ class HassIO(CoreSysAttributes):
await self.sys_addons.boot(STARTUP_APPLICATION)
# store new last boot
self.sys_config.last_boot = self.sys_hardware.last_boot
self.sys_config.save_data()
self._update_last_boot()
finally:
# Add core tasks into scheduler
@@ -134,16 +133,19 @@ class HassIO(CoreSysAttributes):
# don't process scheduler anymore
self.sys_scheduler.suspend = True
# store new last boot / prevent time adjustments
self._update_last_boot()
# process async stop tasks
try:
with async_timeout.timeout(10):
await asyncio.wait(
[
self.sys_api.stop(),
self.sys_dns.stop(),
self.sys_websession.close(),
self.sys_websession_ssl.close(),
self.sys_ingress.unload(),
self.sys_dns.unload(),
]
)
except asyncio.TimeoutError:
@@ -162,3 +164,26 @@ class HassIO(CoreSysAttributes):
await self.sys_addons.shutdown(STARTUP_SERVICES)
await self.sys_addons.shutdown(STARTUP_SYSTEM)
await self.sys_addons.shutdown(STARTUP_INITIALIZE)
def _update_last_boot(self):
"""Update last boot time."""
self.sys_config.last_boot = self.sys_hardware.last_boot
self.sys_config.save_data()
async def repair(self):
"""Repair system integrity."""
_LOGGER.info("Start repairing of Hass.io Environment")
await self.sys_run_in_executor(self.sys_docker.repair)
# Restore core functionality
await self.sys_dns.repair()
await self.sys_addons.repair()
await self.sys_homeassistant.repair()
# Fix HassOS specific
if self.sys_hassos.available:
await self.sys_hassos.repair_cli()
# Tag version for latest
await self.sys_supervisor.repair()
_LOGGER.info("Finished repairing of Hass.io Environment")

View File

@@ -1,14 +1,13 @@
"""Handle core shared data."""
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Optional
import aiohttp
from .config import CoreConfig
from .const import CHANNEL_DEV
from .docker import DockerAPI
from .misc.dns import DNSForward
from .misc.hardware import Hardware
from .misc.scheduler import Scheduler
@@ -20,6 +19,7 @@ if TYPE_CHECKING:
from .core import HassIO
from .dbus import DBusManager
from .discovery import Discovery
from .dns import CoreDNS
from .hassos import HassOS
from .homeassistant import HomeAssistant
from .host import HostManager
@@ -52,26 +52,26 @@ class CoreSys:
self._hardware: Hardware = Hardware()
self._docker: DockerAPI = DockerAPI()
self._scheduler: Scheduler = Scheduler()
self._dns: DNSForward = DNSForward()
# Internal objects pointers
self._core: HassIO = None
self._arch: CpuArch = None
self._auth: Auth = None
self._homeassistant: HomeAssistant = None
self._supervisor: Supervisor = None
self._addons: AddonManager = None
self._api: RestAPI = None
self._updater: Updater = None
self._snapshots: SnapshotManager = None
self._tasks: Tasks = None
self._host: HostManager = None
self._ingress: Ingress = None
self._dbus: DBusManager = None
self._hassos: HassOS = None
self._services: ServiceManager = None
self._store: StoreManager = None
self._discovery: Discovery = None
self._core: Optional[HassIO] = None
self._arch: Optional[CpuArch] = None
self._auth: Optional[Auth] = None
self._dns: Optional[CoreDNS] = None
self._homeassistant: Optional[HomeAssistant] = None
self._supervisor: Optional[Supervisor] = None
self._addons: Optional[AddonManager] = None
self._api: Optional[RestAPI] = None
self._updater: Optional[Updater] = None
self._snapshots: Optional[SnapshotManager] = None
self._tasks: Optional[Tasks] = None
self._host: Optional[HostManager] = None
self._ingress: Optional[Ingress] = None
self._dbus: Optional[DBusManager] = None
self._hassos: Optional[HassOS] = None
self._services: Optional[ServiceManager] = None
self._store: Optional[StoreManager] = None
self._discovery: Optional[Discovery] = None
@property
def machine(self) -> str:
@@ -125,11 +125,6 @@ class CoreSys:
"""Return Scheduler object."""
return self._scheduler
@property
def dns(self) -> DNSForward:
"""Return DNSForward object."""
return self._dns
@property
def core(self) -> HassIO:
"""Return HassIO object."""
@@ -298,6 +293,18 @@ class CoreSys:
raise RuntimeError("DBusManager already set!")
self._dbus = value
@property
def dns(self) -> CoreDNS:
"""Return CoreDNS object."""
return self._dns
@dns.setter
def dns(self, value: CoreDNS):
"""Set a CoreDNS object."""
if self._dns:
raise RuntimeError("CoreDNS already set!")
self._dns = value
@property
def host(self) -> HostManager:
"""Return HostManager object."""
@@ -395,11 +402,6 @@ class CoreSysAttributes:
"""Return Scheduler object."""
return self.coresys.scheduler
@property
def sys_dns(self) -> DNSForward:
"""Return DNSForward object."""
return self.coresys.dns
@property
def sys_core(self) -> HassIO:
"""Return HassIO object."""
@@ -470,6 +472,11 @@ class CoreSysAttributes:
"""Return DBusManager object."""
return self.coresys.dbus
@property
def sys_dns(self) -> CoreDNS:
"""Return CoreDNS object."""
return self.coresys.dns
@property
def sys_host(self) -> HostManager:
"""Return HostManager object."""

9
hassio/data/coredns.tmpl Normal file
View File

@@ -0,0 +1,9 @@
.:53 {
log
hosts /config/hosts {
fallthrough
}
forward . $servers {
health_check 10s
}
}

2
hassio/data/hosts.tmpl Normal file
View File

@@ -0,0 +1,2 @@
$supervisor hassio supervisor.local.hass.io hassio.local.hass.io
$homeassistant homeassistant homeassistant.local.hass.io home-assistant.local.hass.io

341
hassio/dns.py Normal file
View File

@@ -0,0 +1,341 @@
"""Home Assistant control object."""
import asyncio
import logging
from contextlib import suppress
from ipaddress import IPv4Address
from pathlib import Path
from string import Template
from typing import Awaitable, Dict, List, Optional
from .const import ATTR_SERVERS, ATTR_VERSION, DNS_SERVERS, FILE_HASSIO_DNS, DNS_SUFFIX
from .coresys import CoreSys, CoreSysAttributes
from .docker.dns import DockerDNS
from .docker.stats import DockerStats
from .exceptions import CoreDNSError, CoreDNSUpdateError, DockerAPIError
from .misc.forwarder import DNSForward
from .utils.json import JsonConfig
from .validate import SCHEMA_DNS_CONFIG
_LOGGER = logging.getLogger(__name__)
COREDNS_TMPL: Path = Path(__file__).parents[0].joinpath("data/coredns.tmpl")
RESOLV_CONF: Path = Path("/etc/resolv.conf")
class CoreDNS(JsonConfig, CoreSysAttributes):
"""Home Assistant core object for handle it."""
def __init__(self, coresys: CoreSys):
"""Initialize hass object."""
super().__init__(FILE_HASSIO_DNS, SCHEMA_DNS_CONFIG)
self.coresys: CoreSys = coresys
self.instance: DockerDNS = DockerDNS(coresys)
self.forwarder: DNSForward = DNSForward()
self._hosts: Dict[IPv4Address, List[str]] = {}
@property
def corefile(self) -> Path:
"""Return Path to corefile."""
return Path(self.sys_config.path_dns, "corefile")
@property
def hosts(self) -> Path:
"""Return Path to corefile."""
return Path(self.sys_config.path_dns, "hosts")
@property
def servers(self) -> List[str]:
"""Return list of DNS servers."""
return self._data[ATTR_SERVERS]
@servers.setter
def servers(self, value: List[str]) -> None:
"""Return list of DNS servers."""
self._data[ATTR_SERVERS] = value
@property
def version(self) -> Optional[str]:
"""Return current version of DNS."""
return self._data.get(ATTR_VERSION)
@version.setter
def version(self, value: str) -> None:
"""Return current version of DNS."""
self._data[ATTR_VERSION] = value
@property
def latest_version(self) -> Optional[str]:
"""Return latest version of CoreDNS."""
return self.sys_updater.version_dns
@property
def in_progress(self) -> bool:
"""Return True if a task is in progress."""
return self.instance.in_progress
@property
def need_update(self) -> bool:
"""Return True if an update is available."""
return self.version != self.latest_version
async def load(self) -> None:
"""Load DNS setup."""
self._init_hosts()
# Check CoreDNS state
try:
# Evaluate Version if we lost this information
if not self.version:
self.version = await self.instance.get_latest_version(key=int)
await self.instance.attach(tag=self.version)
except DockerAPIError:
_LOGGER.info(
"No CoreDNS plugin Docker image %s found.", self.instance.image
)
# Install CoreDNS
with suppress(CoreDNSError):
await self.install()
else:
self.version = self.instance.version
self.save_data()
# Start DNS forwarder
self.sys_create_task(self.forwarder.start(self.sys_docker.network.dns))
with suppress(CoreDNSError):
self._update_local_resolv()
# Start is not Running
if await self.instance.is_running():
return
await self.start()
async def unload(self) -> None:
"""Unload DNS forwarder."""
await self.forwarder.stop()
async def install(self) -> None:
"""Install CoreDNS."""
_LOGGER.info("Setup CoreDNS plugin")
while True:
# read homeassistant tag and install it
if not self.latest_version:
await self.sys_updater.reload()
if self.latest_version:
with suppress(DockerAPIError):
await self.instance.install(self.latest_version)
break
_LOGGER.warning("Error on install CoreDNS plugin. Retry in 30sec")
await asyncio.sleep(30)
_LOGGER.info("CoreDNS plugin now installed")
self.version = self.instance.version
self.save_data()
await self.start()
async def update(self, version: Optional[str] = None) -> None:
"""Update CoreDNS plugin."""
version = version or self.latest_version
if version == self.version:
_LOGGER.warning("Version %s is already installed for CoreDNS", version)
return
try:
await self.instance.update(version)
except DockerAPIError:
_LOGGER.error("CoreDNS update fails")
raise CoreDNSUpdateError() from None
else:
# Cleanup
with suppress(DockerAPIError):
await self.instance.cleanup()
self.version = version
self.save_data()
# Start CoreDNS
await self.start()
async def restart(self) -> None:
"""Restart CoreDNS plugin."""
with suppress(DockerAPIError):
await self.instance.stop()
await self.start()
async def start(self) -> None:
"""Run CoreDNS."""
self._write_corefile()
# Start Instance
_LOGGER.info("Start CoreDNS plugin")
try:
await self.instance.run()
except DockerAPIError:
_LOGGER.error("Can't start CoreDNS plugin")
raise CoreDNSError() from None
async def reset(self) -> None:
"""Reset Config / Hosts."""
self.servers = DNS_SERVERS
with suppress(OSError):
self.hosts.unlink()
self._init_hosts()
await self.sys_addons.sync_dns()
def _write_corefile(self) -> None:
"""Write CoreDNS config."""
try:
corefile_template: Template = Template(COREDNS_TMPL.read_text())
except OSError as err:
_LOGGER.error("Can't read coredns template file: %s", err)
raise CoreDNSError() from None
# Generate config file
dns_servers = self.servers + list(set(DNS_SERVERS) - set(self.servers))
data = corefile_template.safe_substitute(servers=" ".join(dns_servers))
try:
self.corefile.write_text(data)
except OSError as err:
_LOGGER.error("Can't update corefile: %s", err)
raise CoreDNSError() from None
def _init_hosts(self) -> None:
"""Import hosts entry."""
# Generate Default
self.add_host(
self.sys_docker.network.supervisor, ["hassio", "supervisor"], write=False
)
self.add_host(
self.sys_docker.network.gateway,
["homeassistant", "home-assistant"],
write=False,
)
def write_hosts(self) -> None:
"""Write hosts from memory to file."""
try:
with self.hosts.open("w") as hosts:
for address, hostnames in self._hosts.items():
host = " ".join(hostnames)
hosts.write(f"{address!s} {host}\n")
except OSError as err:
_LOGGER.error("Can't write hosts file: %s", err)
raise CoreDNSError() from None
def add_host(self, ipv4: IPv4Address, names: List[str], write: bool = True) -> None:
"""Add a new host entry."""
if not ipv4 or ipv4 == IPv4Address("0.0.0.0"):
return
hostnames: List[str] = []
for name in names:
hostnames.append(name)
hostnames.append(f"{name}.{DNS_SUFFIX}")
self._hosts[ipv4] = hostnames
_LOGGER.debug("Add Host entry %s -> %s", ipv4, hostnames)
if write:
self.write_hosts()
def delete_host(
self,
ipv4: Optional[IPv4Address] = None,
host: Optional[str] = None,
write: bool = True,
) -> None:
"""Remove a entry from hosts."""
if host:
for address, hostnames in self._hosts.items():
if host not in hostnames:
continue
ipv4 = address
break
# Remove entry
if ipv4:
_LOGGER.debug("Remove Host entry %s", ipv4)
self._hosts.pop(ipv4, None)
if write:
self.write_hosts()
else:
_LOGGER.warning("Can't remove Host entry: %s/%s", ipv4, host)
def logs(self) -> Awaitable[bytes]:
"""Get CoreDNS docker logs.
Return Coroutine.
"""
return self.instance.logs()
async def stats(self) -> DockerStats:
"""Return stats of CoreDNS."""
try:
return await self.instance.stats()
except DockerAPIError:
raise CoreDNSError() from None
def is_running(self) -> Awaitable[bool]:
"""Return True if Docker container is running.
Return a coroutine.
"""
return self.instance.is_running()
def is_fails(self) -> Awaitable[bool]:
"""Return True if a Docker container is fails state.
Return a coroutine.
"""
return self.instance.is_fails()
async def repair(self) -> None:
"""Repair CoreDNS plugin."""
if await self.instance.exists():
return
_LOGGER.info("Repair CoreDNS %s", self.version)
try:
await self.instance.install(self.version)
except DockerAPIError:
_LOGGER.error("Repairing of CoreDNS fails")
def _update_local_resolv(self) -> None:
"""Update local resolv file."""
resolv_lines: List[str] = []
nameserver = f"nameserver {self.sys_docker.network.dns!s}"
# Read resolv config
try:
with RESOLV_CONF.open("r") as resolv:
for line in resolv.readlines():
if not line:
continue
resolv_lines.append(line.strip())
except OSError as err:
_LOGGER.error("Can't read local resolv: %s", err)
raise CoreDNSError() from None
if nameserver in resolv_lines:
return
_LOGGER.info("Update resolv from Supervisor")
# Write config back to resolv
resolv_lines.append(nameserver)
try:
with RESOLV_CONF.open("w") as resolv:
for line in resolv_lines:
resolv.write(f"{line}\n")
except OSError as err:
_LOGGER.error("Can't write local resolv: %s", err)
raise CoreDNSError() from None

View File

@@ -1,12 +1,13 @@
"""Init file for Hass.io Docker object."""
import logging
from contextlib import suppress
from ipaddress import IPv4Address
import logging
from typing import Any, Dict, Optional
import attr
import docker
from ..const import SOCKET_DOCKER
from ..const import SOCKET_DOCKER, DNS_SUFFIX
from ..exceptions import DockerAPIError
from .network import DockerNetwork
@@ -50,28 +51,33 @@ class DockerAPI:
return self.docker.api
def run(
self, image: str, **kwargs: Dict[str, Any]
self,
image: str,
version: str = "latest",
ipv4: Optional[IPv4Address] = None,
**kwargs: Dict[str, Any],
) -> docker.models.containers.Container:
""""Create a Docker container and run it.
Need run inside executor.
"""
name = kwargs.get("name", image)
network_mode = kwargs.get("network_mode")
hostname = kwargs.get("hostname")
name: str = kwargs.get("name", image)
network_mode: str = kwargs.get("network_mode")
hostname: str = kwargs.get("hostname")
# Setup DNS
kwargs["dns"] = [str(self.network.dns)]
kwargs["dns_search"] = [DNS_SUFFIX]
kwargs["domainname"] = DNS_SUFFIX
# Setup network
kwargs["dns_search"] = ["."]
if network_mode:
kwargs["dns"] = [str(self.network.supervisor)]
kwargs["dns_opt"] = ["ndots:0"]
else:
if not network_mode:
kwargs["network"] = None
# Create container
try:
container = self.docker.containers.create(
image, use_config_proxy=False, **kwargs
f"{image}:{version}", use_config_proxy=False, **kwargs
)
except docker.errors.DockerException as err:
_LOGGER.error("Can't create container from %s: %s", name, err)
@@ -81,7 +87,7 @@ class DockerAPI:
if not network_mode:
alias = [hostname] if hostname else None
try:
self.network.attach_container(container, alias=alias)
self.network.attach_container(container, alias=alias, ipv4=ipv4)
except DockerAPIError:
_LOGGER.warning("Can't attach %s to hassio-net!", name)
else:
@@ -102,7 +108,11 @@ class DockerAPI:
return container
def run_command(
self, image: str, command: Optional[str] = None, **kwargs: Dict[str, Any]
self,
image: str,
version: str = "latest",
command: Optional[str] = None,
**kwargs: Dict[str, Any],
) -> CommandReturn:
"""Create a temporary container and run command.
@@ -114,11 +124,11 @@ class DockerAPI:
_LOGGER.info("Run command '%s' on %s", command, image)
try:
container = self.docker.containers.run(
image,
f"{image}:{version}",
command=command,
network=self.network.name,
use_config_proxy=False,
**kwargs
**kwargs,
)
# wait until command is done
@@ -135,3 +145,34 @@ class DockerAPI:
container.remove(force=True)
return CommandReturn(result.get("StatusCode"), output)
def repair(self) -> None:
"""Repair local docker overlayfs2 issues."""
_LOGGER.info("Prune stale containers")
try:
output = self.docker.api.prune_containers()
_LOGGER.debug("Containers prune: %s", output)
except docker.errors.APIError as err:
_LOGGER.warning("Error for containers prune: %s", err)
_LOGGER.info("Prune stale images")
try:
output = self.docker.api.prune_images(filters={"dangling": False})
_LOGGER.debug("Images prune: %s", output)
except docker.errors.APIError as err:
_LOGGER.warning("Error for images prune: %s", err)
_LOGGER.info("Prune stale builds")
try:
output = self.docker.api.prune_builds()
_LOGGER.debug("Builds prune: %s", output)
except docker.errors.APIError as err:
_LOGGER.warning("Error for builds prune: %s", err)
_LOGGER.info("Prune stale volumes")
try:
output = self.docker.api.prune_builds()
_LOGGER.debug("Volumes prune: %s", output)
except docker.errors.APIError as err:
_LOGGER.warning("Error for volumes prune: %s", err)

View File

@@ -35,6 +35,7 @@ if TYPE_CHECKING:
_LOGGER = logging.getLogger(__name__)
AUDIO_DEVICE = "/dev/snd:/dev/snd:rwm"
NO_ADDDRESS = ip_address("0.0.0.0")
class DockerAddon(DockerInterface):
@@ -62,7 +63,7 @@ class DockerAddon(DockerInterface):
self._meta["NetworkSettings"]["Networks"]["hassio"]["IPAddress"]
)
except (KeyError, TypeError, ValueError):
return ip_address("0.0.0.0")
return NO_ADDDRESS
@property
def timeout(self) -> int:
@@ -100,11 +101,6 @@ class DockerAddon(DockerInterface):
"""Return True if full access is enabled."""
return not self.addon.protected and self.addon.with_full_access
@property
def hostname(self) -> str:
"""Return slug/id of add-on."""
return self.addon.slug.replace("_", "-")
@property
def environment(self) -> Dict[str, str]:
"""Return environment for Docker add-on."""
@@ -139,7 +135,14 @@ class DockerAddon(DockerInterface):
# Auto mapping UART devices
if self.addon.auto_uart:
for device in self.sys_hardware.serial_devices:
if self.addon.with_udev:
serial_devs = self.sys_hardware.serial_devices
else:
serial_devs = (
self.sys_hardware.serial_devices | self.sys_hardware.serial_by_id
)
for device in serial_devs:
devices.append(f"{device}:{device}:rwm")
# Return None if no devices is present
@@ -186,10 +189,7 @@ class DockerAddon(DockerInterface):
@property
def network_mapping(self) -> Dict[str, str]:
"""Return hosts mapping."""
return {
"homeassistant": self.sys_docker.network.gateway,
"hassio": self.sys_docker.network.supervisor,
}
return {"hassio": self.sys_docker.network.supervisor}
@property
def network_mode(self) -> Optional[str]:
@@ -327,8 +327,9 @@ class DockerAddon(DockerInterface):
# Create & Run container
docker_container = self.sys_docker.run(
self.image,
version=self.addon.version,
name=self.name,
hostname=self.hostname,
hostname=self.addon.hostname,
detach=True,
init=True,
privileged=self.full_access,
@@ -346,10 +347,15 @@ class DockerAddon(DockerInterface):
tmpfs=self.tmpfs,
)
_LOGGER.info("Start Docker add-on %s with version %s", self.image, self.version)
self._meta = docker_container.attrs
_LOGGER.info("Start Docker add-on %s with version %s", self.image, self.version)
def _install(self, tag: str, image: Optional[str] = None) -> None:
# Write data to DNS server
self.sys_dns.add_host(ipv4=self.ip_address, names=[self.addon.hostname])
def _install(
self, tag: str, image: Optional[str] = None, latest: bool = False
) -> None:
"""Pull Docker image or build it.
Need run inside executor.
@@ -357,7 +363,7 @@ class DockerAddon(DockerInterface):
if self.addon.need_build:
self._build(tag)
else:
super()._install(tag, image)
super()._install(tag, image, latest)
def _build(self, tag: str) -> None:
"""Build a Docker container.
@@ -373,7 +379,6 @@ class DockerAddon(DockerInterface):
)
_LOGGER.debug("Build %s:%s done: %s", self.image, tag, log)
image.tag(self.image, tag="latest")
# Update meta data
self._meta = image.attrs
@@ -395,7 +400,7 @@ class DockerAddon(DockerInterface):
Need run inside executor.
"""
try:
image = self.sys_docker.api.get_image(self.image)
image = self.sys_docker.api.get_image(f"{self.image}:{self.version}")
except docker.errors.DockerException as err:
_LOGGER.error("Can't fetch image %s: %s", self.image, err)
raise DockerAPIError() from None
@@ -412,11 +417,11 @@ class DockerAddon(DockerInterface):
_LOGGER.info("Export image %s done", self.image)
@process_lock
def import_image(self, tar_file: Path, tag: str) -> Awaitable[None]:
def import_image(self, tar_file: Path) -> Awaitable[None]:
"""Import a tar file as image."""
return self.sys_run_in_executor(self._import_image, tar_file, tag)
return self.sys_run_in_executor(self._import_image, tar_file)
def _import_image(self, tar_file: Path, tag: str) -> None:
def _import_image(self, tar_file: Path) -> None:
"""Import a tar file as image.
Need run inside executor.
@@ -425,14 +430,13 @@ class DockerAddon(DockerInterface):
with tar_file.open("rb") as read_tar:
self.sys_docker.api.load_image(read_tar, quiet=True)
docker_image = self.sys_docker.images.get(self.image)
docker_image.tag(self.image, tag=tag)
docker_image = self.sys_docker.images.get(f"{self.image}:{self.version}")
except (docker.errors.DockerException, OSError) as err:
_LOGGER.error("Can't import image %s: %s", self.image, err)
raise DockerAPIError() from None
_LOGGER.info("Import image %s and tag %s", tar_file, tag)
self._meta = docker_image.attrs
_LOGGER.info("Import image %s and version %s", tar_file, self.version)
with suppress(DockerAPIError):
self._cleanup()
@@ -466,3 +470,12 @@ class DockerAddon(DockerInterface):
except OSError as err:
_LOGGER.error("Can't write to %s stdin: %s", self.name, err)
raise DockerAPIError() from None
def _stop(self, remove_container=True) -> None:
"""Stop/remove Docker container.
Need run inside executor.
"""
if self.ip_address != NO_ADDDRESS:
self.sys_dns.delete_host(ipv4=self.ip_address)
super()._stop(remove_container)

56
hassio/docker/dns.py Normal file
View File

@@ -0,0 +1,56 @@
"""HassOS Cli docker object."""
from contextlib import suppress
import logging
from ..const import ENV_TIME
from ..coresys import CoreSysAttributes
from ..exceptions import DockerAPIError
from .interface import DockerInterface
_LOGGER = logging.getLogger(__name__)
DNS_DOCKER_NAME: str = "hassio_dns"
class DockerDNS(DockerInterface, CoreSysAttributes):
"""Docker Hass.io wrapper for Hass.io DNS."""
@property
def image(self) -> str:
"""Return name of Hass.io DNS image."""
return f"homeassistant/{self.sys_arch.supervisor}-hassio-dns"
@property
def name(self) -> str:
"""Return name of Docker container."""
return DNS_DOCKER_NAME
def _run(self) -> None:
"""Run Docker image.
Need run inside executor.
"""
if self._is_running():
return
# Cleanup
with suppress(DockerAPIError):
self._stop()
# Create & Run container
docker_container = self.sys_docker.run(
self.image,
version=self.sys_dns.version,
ipv4=self.sys_docker.network.dns,
name=self.name,
hostname=self.name.replace("_", "-"),
detach=True,
init=True,
environment={ENV_TIME: self.sys_timezone},
volumes={
str(self.sys_config.path_extern_dns): {"bind": "/config", "mode": "ro"}
},
)
self._meta = docker_container.attrs
_LOGGER.info("Start DNS %s with version %s", self.image, self.version)

View File

@@ -21,12 +21,12 @@ class DockerHassOSCli(DockerInterface, CoreSysAttributes):
"""Don't need stop."""
return True
def _attach(self):
def _attach(self, tag: str):
"""Attach to running Docker container.
Need run inside executor.
"""
try:
image = self.sys_docker.images.get(self.image)
image = self.sys_docker.images.get(f"{self.image}:{tag}")
except docker.errors.DockerException:
_LOGGER.warning("Can't find a HassOS CLI %s", self.image)

View File

@@ -2,7 +2,7 @@
from contextlib import suppress
from ipaddress import IPv4Address
import logging
from typing import Awaitable
from typing import Awaitable, Optional
import docker
@@ -19,24 +19,24 @@ class DockerHomeAssistant(DockerInterface):
"""Docker Hass.io wrapper for Home Assistant."""
@property
def machine(self):
def machine(self) -> Optional[str]:
"""Return machine of Home Assistant Docker image."""
if self._meta and LABEL_MACHINE in self._meta["Config"]["Labels"]:
return self._meta["Config"]["Labels"][LABEL_MACHINE]
return None
@property
def image(self):
def image(self) -> str:
"""Return name of Docker image."""
return self.sys_homeassistant.image
@property
def name(self):
def name(self) -> str:
"""Return name of Docker container."""
return HASS_DOCKER_NAME
@property
def timeout(self) -> str:
def timeout(self) -> int:
"""Return timeout for Docker actions."""
return 60
@@ -60,6 +60,7 @@ class DockerHomeAssistant(DockerInterface):
# Create & Run container
docker_container = self.sys_docker.run(
self.image,
version=self.sys_homeassistant.version,
name=self.name,
hostname=self.name,
detach=True,
@@ -84,8 +85,8 @@ class DockerHomeAssistant(DockerInterface):
},
)
_LOGGER.info("Start homeassistant %s with version %s", self.image, self.version)
self._meta = docker_container.attrs
_LOGGER.info("Start homeassistant %s with version %s", self.image, self.version)
def _execute_command(self, command: str) -> CommandReturn:
"""Create a temporary container and run command.
@@ -94,7 +95,8 @@ class DockerHomeAssistant(DockerInterface):
"""
return self.sys_docker.run_command(
self.image,
command,
version=self.sys_homeassistant.version,
command=command,
privileged=True,
init=True,
detach=True,

View File

@@ -2,16 +2,16 @@
import asyncio
from contextlib import suppress
import logging
from typing import Any, Dict, Optional, Awaitable
from typing import Any, Awaitable, Dict, List, Optional
import docker
from . import CommandReturn
from ..const import LABEL_ARCH, LABEL_VERSION
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import DockerAPIError
from ..utils import process_lock
from .stats import DockerStats
from . import CommandReturn
_LOGGER = logging.getLogger(__name__)
@@ -50,7 +50,10 @@ class DockerInterface(CoreSysAttributes):
@property
def image(self) -> Optional[str]:
"""Return name of Docker image."""
return self.meta_config.get("Image")
try:
return self.meta_config["Image"].partition(":")[0]
except KeyError:
return None
@property
def version(self) -> Optional[str]:
@@ -68,24 +71,25 @@ class DockerInterface(CoreSysAttributes):
return self.lock.locked()
@process_lock
def install(self, tag: str, image: Optional[str] = None):
def install(self, tag: str, image: Optional[str] = None, latest: bool = False):
"""Pull docker image."""
return self.sys_run_in_executor(self._install, tag, image)
return self.sys_run_in_executor(self._install, tag, image, latest)
def _install(self, tag: str, image: Optional[str] = None) -> None:
def _install(
self, tag: str, image: Optional[str] = None, latest: bool = False
) -> None:
"""Pull Docker image.
Need run inside executor.
"""
image = image or self.image
image = image.partition(":")[0] # remove potential tag
_LOGGER.info("Pull image %s tag %s.", image, tag)
try:
_LOGGER.info("Pull image %s tag %s.", image, tag)
docker_image = self.sys_docker.images.pull(f"{image}:{tag}")
_LOGGER.info("Tag image %s with version %s as latest", image, tag)
docker_image.tag(image, tag="latest")
if latest:
_LOGGER.info("Tag image %s with version %s as latest", image, tag)
docker_image.tag(image, tag="latest")
except docker.errors.APIError as err:
_LOGGER.error("Can't install %s:%s -> %s.", image, tag, err)
raise DockerAPIError() from None
@@ -101,13 +105,10 @@ class DockerInterface(CoreSysAttributes):
Need run inside executor.
"""
try:
docker_image = self.sys_docker.images.get(self.image)
assert f"{self.image}:{self.version}" in docker_image.tags
except (docker.errors.DockerException, AssertionError):
return False
return True
with suppress(docker.errors.DockerException):
self.sys_docker.images.get(f"{self.image}:{self.version}")
return True
return False
def is_running(self) -> Awaitable[bool]:
"""Return True if Docker is running.
@@ -123,7 +124,6 @@ class DockerInterface(CoreSysAttributes):
"""
try:
docker_container = self.sys_docker.containers.get(self.name)
docker_image = self.sys_docker.images.get(self.image)
except docker.errors.DockerException:
return False
@@ -131,28 +131,24 @@ class DockerInterface(CoreSysAttributes):
if docker_container.status != "running":
return False
# we run on an old image, stop and start it
if docker_container.image.id != docker_image.id:
return False
return True
@process_lock
def attach(self):
def attach(self, tag: str):
"""Attach to running Docker container."""
return self.sys_run_in_executor(self._attach)
return self.sys_run_in_executor(self._attach, tag)
def _attach(self) -> None:
def _attach(self, tag: str) -> None:
"""Attach to running docker container.
Need run inside executor.
"""
try:
if self.image:
self._meta = self.sys_docker.images.get(self.image).attrs
with suppress(docker.errors.DockerException):
self._meta = self.sys_docker.containers.get(self.name).attrs
except docker.errors.DockerException:
pass
with suppress(docker.errors.DockerException):
if not self._meta and self.image:
self._meta = self.sys_docker.images.get(f"{self.image}:{tag}").attrs
# Successfull?
if not self._meta:
@@ -250,11 +246,15 @@ class DockerInterface(CoreSysAttributes):
self._meta = None
@process_lock
def update(self, tag: str, image: Optional[str] = None) -> Awaitable[None]:
def update(
self, tag: str, image: Optional[str] = None, latest: bool = False
) -> Awaitable[None]:
"""Update a Docker image."""
return self.sys_run_in_executor(self._update, tag, image)
return self.sys_run_in_executor(self._update, tag, image, latest)
def _update(self, tag: str, image: Optional[str] = None) -> None:
def _update(
self, tag: str, image: Optional[str] = None, latest: bool = False
) -> None:
"""Update a docker image.
Need run inside executor.
@@ -266,14 +266,11 @@ class DockerInterface(CoreSysAttributes):
)
# Update docker image
self._install(tag, image)
self._install(tag, image=image, latest=latest)
# Stop container & cleanup
with suppress(DockerAPIError):
try:
self._stop()
finally:
self._cleanup()
self._stop()
def logs(self) -> Awaitable[bytes]:
"""Return Docker logs of container.
@@ -308,13 +305,13 @@ class DockerInterface(CoreSysAttributes):
Need run inside executor.
"""
try:
latest = self.sys_docker.images.get(self.image)
origin = self.sys_docker.images.get(f"{self.image}:{self.version}")
except docker.errors.DockerException:
_LOGGER.warning("Can't find %s for cleanup", self.image)
raise DockerAPIError() from None
for image in self.sys_docker.images.list(name=self.image):
if latest.id == image.id:
if origin.id == image.id:
continue
with suppress(docker.errors.DockerException):
@@ -402,3 +399,35 @@ class DockerInterface(CoreSysAttributes):
return True
return False
def get_latest_version(self, key: Any = int) -> Awaitable[str]:
"""Return latest version of local Home Asssistant image."""
return self.sys_run_in_executor(self._get_latest_version, key)
def _get_latest_version(self, key: Any = int) -> str:
"""Return latest version of local Home Asssistant image.
Need run inside executor.
"""
available_version: List[str] = []
try:
for image in self.sys_docker.images.list(self.image):
for tag in image.tags:
version = tag.partition(":")[2]
try:
key(version)
except (AttributeError, ValueError):
continue
available_version.append(version)
assert available_version
except (docker.errors.DockerException, AssertionError):
_LOGGER.debug("No version found for %s", self.image)
raise DockerAPIError()
else:
_LOGGER.debug("Found HA versions: %s", available_version)
# Sort version and return latest version
available_version.sort(key=key, reverse=True)
return available_version[0]

View File

@@ -42,6 +42,11 @@ class DockerNetwork:
"""Return supervisor of the network."""
return DOCKER_NETWORK_MASK[2]
@property
def dns(self) -> IPv4Address:
"""Return dns of the network."""
return DOCKER_NETWORK_MASK[3]
def _get_network(self) -> docker.models.networks.Network:
"""Get HassIO network."""
try:

View File

@@ -20,6 +20,12 @@ class DockerStats:
self._memory_usage = 0
self._memory_limit = 0
# Calculate percent usage
if self._memory_limit != 0:
self._memory_percent = self._memory_usage / self._memory_limit * 100.0
else:
self._memory_percent = 0
with suppress(KeyError):
self._calc_cpu_percent(stats)
@@ -39,13 +45,12 @@ class DockerStats:
stats["cpu_stats"]["system_cpu_usage"]
- stats["precpu_stats"]["system_cpu_usage"]
)
online_cpu = stats["cpu_stats"]["online_cpus"]
if online_cpu == 0.0:
online_cpu = len(stats["cpu_stats"]["cpu_usage"]["percpu_usage"])
if system_delta > 0.0 and cpu_delta > 0.0:
self._cpu = (
(cpu_delta / system_delta)
* len(stats["cpu_stats"]["cpu_usage"]["percpu_usage"])
* 100.0
)
self._cpu = (cpu_delta / system_delta) * online_cpu * 100.0
def _calc_network(self, networks):
"""Calculate Network IO stats."""
@@ -64,7 +69,7 @@ class DockerStats:
@property
def cpu_percent(self):
"""Return CPU percent."""
return self._cpu
return round(self._cpu, 2)
@property
def memory_usage(self):
@@ -76,6 +81,11 @@ class DockerStats:
"""Return memory limit."""
return self._memory_limit
@property
def memory_percent(self):
"""Return memory usage in percent."""
return round(self._memory_percent, 2)
@property
def network_rx(self):
"""Return network rx stats."""

View File

@@ -2,6 +2,7 @@
from ipaddress import IPv4Address
import logging
import os
from typing import Awaitable
import docker
@@ -25,7 +26,7 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
"""Return IP address of this container."""
return self.sys_docker.network.supervisor
def _attach(self) -> None:
def _attach(self, tag: str) -> None:
"""Attach to running docker container.
Need run inside executor.
@@ -37,7 +38,9 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
self._meta = docker_container.attrs
_LOGGER.info(
"Attach to Supervisor %s with version %s", self.image, self.version
"Attach to Supervisor %s with version %s",
self.image,
self.sys_supervisor.version,
)
# If already attach
@@ -49,3 +52,21 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
self.sys_docker.network.attach_container(
docker_container, alias=["hassio"], ipv4=self.sys_docker.network.supervisor
)
def retag(self) -> Awaitable[None]:
"""Retag latest image to version."""
return self.sys_run_in_executor(self._retag)
def _retag(self) -> None:
"""Retag latest image to version.
Need run inside executor.
"""
try:
docker_container = self.sys_docker.containers.get(self.name)
docker_container.image.tag(self.image, tag=self.version)
docker_container.image.tag(self.image, tag="latest")
except docker.errors.DockerException as err:
_LOGGER.error("Can't retag supervisor version: %s", err)
raise DockerAPIError() from None

View File

@@ -54,6 +54,17 @@ class HassOSNotSupportedError(HassioNotSupportedError):
"""Function not supported by HassOS."""
# DNS
class CoreDNSError(HassioError):
"""CoreDNS exception."""
class CoreDNSUpdateError(CoreDNSError):
"""Error on update of a CoreDNS."""
# Addons

View File

@@ -130,7 +130,7 @@ class HassOS(CoreSysAttributes):
_LOGGER.info("Detect HassOS %s on host system", self.version)
with suppress(DockerAPIError):
await self.instance.attach()
await self.instance.attach(tag="latest")
def config_sync(self) -> Awaitable[None]:
"""Trigger a host config reload from usb.
@@ -187,7 +187,22 @@ class HassOS(CoreSysAttributes):
return
try:
await self.instance.update(version)
await self.instance.update(version, latest=True)
except DockerAPIError:
_LOGGER.error("HassOS CLI update fails")
raise HassOSUpdateError() from None
else:
# Cleanup
with suppress(DockerAPIError):
await self.instance.cleanup()
async def repair_cli(self) -> None:
"""Repair CLI container."""
if await self.instance.exists():
return
_LOGGER.info("Repair HassOS CLI %s", self.version_cli)
try:
await self.instance.install(self.version_cli, latest=True)
except DockerAPIError:
_LOGGER.error("Repairing of HassOS CLI fails")

View File

@@ -2,6 +2,7 @@
import asyncio
from contextlib import asynccontextmanager, suppress
from datetime import datetime, timedelta
from distutils.version import StrictVersion
from ipaddress import IPv4Address
import logging
import os
@@ -26,6 +27,7 @@ from .const import (
ATTR_REFRESH_TOKEN,
ATTR_SSL,
ATTR_UUID,
ATTR_VERSION,
ATTR_WAIT_BOOT,
ATTR_WATCHDOG,
FILE_HASSIO_HOMEASSISTANT,
@@ -41,7 +43,7 @@ from .exceptions import (
HomeAssistantError,
HomeAssistantUpdateError,
)
from .utils import convert_to_ascii, process_lock, check_port
from .utils import check_port, convert_to_ascii, process_lock
from .utils.json import JsonConfig
from .validate import SCHEMA_HASS_CONFIG
@@ -75,12 +77,18 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
async def load(self) -> None:
"""Prepare Home Assistant object."""
with suppress(DockerAPIError):
await self.instance.attach()
return
try:
# Evaluate Version if we lost this information
if not self.version:
self.version = await self.instance.get_latest_version(key=StrictVersion)
_LOGGER.info("No Home Assistant Docker image %s found.", self.image)
await self.install_landingpage()
await self.instance.attach(tag=self.version)
except DockerAPIError:
_LOGGER.info("No Home Assistant Docker image %s found.", self.image)
await self.install_landingpage()
else:
self.version = self.instance.version
self.save_data()
@property
def machine(self) -> str:
@@ -159,11 +167,6 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
"""Set time to wait for Home Assistant startup."""
self._data[ATTR_WAIT_BOOT] = value
@property
def version(self) -> str:
"""Return version of running Home Assistant."""
return self.instance.version
@property
def latest_version(self) -> str:
"""Return last available version of Home Assistant."""
@@ -199,6 +202,16 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
"""Return True if a custom image is used."""
return all(attr in self._data for attr in (ATTR_IMAGE, ATTR_LAST_VERSION))
@property
def version(self) -> Optional[str]:
"""Return version of local version."""
return self._data.get(ATTR_VERSION)
@version.setter
def version(self, value: str) -> None:
"""Set installed version."""
self._data[ATTR_VERSION] = value
@property
def boot(self) -> bool:
"""Return True if Home Assistant boot is enabled."""
@@ -234,11 +247,16 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
"""Install a landing page."""
_LOGGER.info("Setup HomeAssistant landingpage")
while True:
with suppress(DockerAPIError):
try:
await self.instance.install("landingpage")
return
_LOGGER.warning("Fails install landingpage, retry after 30sec")
await asyncio.sleep(30)
except DockerAPIError:
_LOGGER.warning("Fails install landingpage, retry after 30sec")
await asyncio.sleep(30)
else:
break
self.version = self.instance.version
self.save_data()
@process_lock
async def install(self) -> None:
@@ -252,26 +270,28 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
tag = self.latest_version
if tag:
with suppress(DockerAPIError):
await self.instance.install(tag)
await self.instance.update(tag)
break
_LOGGER.warning("Error on install Home Assistant. Retry in 30sec")
await asyncio.sleep(30)
# finishing
_LOGGER.info("Home Assistant docker now installed")
self.version = self.instance.version
self.save_data()
# finishing
try:
if not self.boot:
return
_LOGGER.info("Start Home Assistant")
await self._start()
except HomeAssistantError:
_LOGGER.error("Can't start Home Assistant!")
finally:
with suppress(DockerAPIError):
await self.instance.cleanup()
# Cleanup
with suppress(DockerAPIError):
await self.instance.cleanup()
@process_lock
async def update(self, version=None) -> None:
async def update(self, version: Optional[str] = None) -> None:
"""Update HomeAssistant version."""
version = version or self.latest_version
rollback = self.version if not self.error_state else None
@@ -283,7 +303,7 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
return
# process an update
async def _update(to_version):
async def _update(to_version: str) -> None:
"""Run Home Assistant update."""
_LOGGER.info("Update Home Assistant to version %s", to_version)
try:
@@ -291,10 +311,16 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
except DockerAPIError:
_LOGGER.warning("Update Home Assistant image fails")
raise HomeAssistantUpdateError() from None
else:
self.version = self.instance.version
if running:
await self._start()
_LOGGER.info("Successful run Home Assistant %s", to_version)
self.save_data()
with suppress(DockerAPIError):
await self.instance.cleanup()
# Update Home Assistant
with suppress(HomeAssistantError):
@@ -570,3 +596,14 @@ class HomeAssistant(JsonConfig, CoreSysAttributes):
self._error_state = True
raise HomeAssistantError()
async def repair(self):
"""Repair local Home Assistant data."""
if await self.instance.exists():
return
_LOGGER.info("Repair Home Assistant %s", self.version)
try:
await self.instance.install(self.version)
except DockerAPIError:
_LOGGER.error("Repairing of Home Assistant fails")

View File

@@ -15,6 +15,10 @@ _LOGGER = logging.getLogger(__name__)
DefaultConfig = attr.make_class("DefaultConfig", ["input", "output"])
AUDIODB_JSON: Path = Path(__file__).parents[1].joinpath("data/audiodb.json")
ASOUND_TMPL: Path = Path(__file__).parents[1].joinpath("data/asound.tmpl")
class AlsaAudio(CoreSysAttributes):
"""Handle Audio ALSA host data."""
@@ -82,12 +86,8 @@ class AlsaAudio(CoreSysAttributes):
@staticmethod
def _audio_database():
"""Read local json audio data into dict."""
json_file = Path(__file__).parent.joinpath("data/audiodb.json")
try:
# pylint: disable=no-member
with json_file.open("r") as database:
return json.loads(database.read())
return json.loads(AUDIODB_JSON.read_text())
except (ValueError, OSError) as err:
_LOGGER.warning("Can't read audio DB: %s", err)
@@ -122,11 +122,8 @@ class AlsaAudio(CoreSysAttributes):
alsa_output = alsa_output or self.default.output
# Read Template
asound_file = Path(__file__).parent.joinpath("data/asound.tmpl")
try:
# pylint: disable=no-member
with asound_file.open("r") as asound:
asound_data = asound.read()
asound_data = ASOUND_TMPL.read_text()
except OSError as err:
_LOGGER.error("Can't read asound.tmpl: %s", err)
return ""

View File

@@ -2,12 +2,14 @@
import asyncio
import logging
import shlex
from ipaddress import IPv4Address
from typing import Optional
import async_timeout
_LOGGER = logging.getLogger(__name__)
COMMAND = "socat UDP-RECVFROM:53,fork UDP-SENDTO:127.0.0.11:53"
COMMAND = "socat UDP-RECVFROM:53,fork UDP-SENDTO:{!s}:53"
class DNSForward:
@@ -15,23 +17,23 @@ class DNSForward:
def __init__(self):
"""Initialize DNS forwarding."""
self.proc = None
self.proc: Optional[asyncio.Process] = None
async def start(self):
async def start(self, dns_server: IPv4Address) -> None:
"""Start DNS forwarding."""
try:
self.proc = await asyncio.create_subprocess_exec(
*shlex.split(COMMAND),
*shlex.split(COMMAND.format(dns_server)),
stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL
stderr=asyncio.subprocess.DEVNULL,
)
except OSError as err:
_LOGGER.error("Can't start DNS forwarding: %s", err)
else:
_LOGGER.info("Start DNS port forwarding for host add-ons")
_LOGGER.info("Start DNS port forwarding to %s", dns_server)
async def stop(self):
async def stop(self) -> None:
"""Stop DNS forwarding."""
if not self.proc:
_LOGGER.warning("DNS forwarding is not running!")

View File

@@ -3,25 +3,26 @@ from datetime import datetime
import logging
from pathlib import Path
import re
from typing import Any, Dict, Optional, Set
import pyudev
from ..const import ATTR_NAME, ATTR_TYPE, ATTR_DEVICES, CHAN_ID, CHAN_TYPE
from ..const import ATTR_DEVICES, ATTR_NAME, ATTR_TYPE, CHAN_ID, CHAN_TYPE
_LOGGER = logging.getLogger(__name__)
ASOUND_CARDS = Path("/proc/asound/cards")
RE_CARDS = re.compile(r"(\d+) \[(\w*) *\]: (.*\w)")
ASOUND_CARDS: Path = Path("/proc/asound/cards")
RE_CARDS: re.Pattern = re.compile(r"(\d+) \[(\w*) *\]: (.*\w)")
ASOUND_DEVICES = Path("/proc/asound/devices")
RE_DEVICES = re.compile(r"\[.*(\d+)- (\d+).*\]: ([\w ]*)")
ASOUND_DEVICES: Path = Path("/proc/asound/devices")
RE_DEVICES: re.Pattern = re.compile(r"\[.*(\d+)- (\d+).*\]: ([\w ]*)")
PROC_STAT = Path("/proc/stat")
RE_BOOT_TIME = re.compile(r"btime (\d+)")
PROC_STAT: Path = Path("/proc/stat")
RE_BOOT_TIME: re.Pattern = re.compile(r"btime (\d+)")
GPIO_DEVICES = Path("/sys/class/gpio")
SOC_DEVICES = Path("/sys/devices/platform/soc")
RE_TTY = re.compile(r"tty[A-Z]+")
GPIO_DEVICES: Path = Path("/sys/class/gpio")
SOC_DEVICES: Path = Path("/sys/devices/platform/soc")
RE_TTY: re.Pattern = re.compile(r"tty[A-Z]+")
class Hardware:
@@ -32,13 +33,21 @@ class Hardware:
self.context = pyudev.Context()
@property
def serial_devices(self):
def serial_devices(self) -> Set[str]:
"""Return all serial and connected devices."""
dev_list = set()
dev_list: Set[str] = set()
for device in self.context.list_devices(subsystem="tty"):
if "ID_VENDOR" in device.properties or RE_TTY.search(device.device_node):
dev_list.add(device.device_node)
return dev_list
@property
def serial_by_id(self) -> Set[str]:
"""Return all /dev/serial/by-id for serial devices."""
dev_list: Set[str] = set()
for device in self.context.list_devices(subsystem="tty"):
if "ID_VENDOR" in device.properties or RE_TTY.search(device.device_node):
# Add /dev/serial/by-id devlink for current device
for dev_link in device.device_links:
if not dev_link.startswith("/dev/serial/by-id"):
@@ -48,9 +57,9 @@ class Hardware:
return dev_list
@property
def input_devices(self):
def input_devices(self) -> Set[str]:
"""Return all input devices."""
dev_list = set()
dev_list: Set[str] = set()
for device in self.context.list_devices(subsystem="input"):
if "NAME" in device.properties:
dev_list.add(device.properties["NAME"].replace('"', ""))
@@ -58,9 +67,9 @@ class Hardware:
return dev_list
@property
def disk_devices(self):
def disk_devices(self) -> Set[str]:
"""Return all disk devices."""
dev_list = set()
dev_list: Set[str] = set()
for device in self.context.list_devices(subsystem="block"):
if "ID_NAME" in device.properties:
dev_list.add(device.device_node)
@@ -68,15 +77,15 @@ class Hardware:
return dev_list
@property
def support_audio(self):
def support_audio(self) -> bool:
"""Return True if the system have audio support."""
return bool(self.audio_devices)
@property
def audio_devices(self):
def audio_devices(self) -> Dict[str, Any]:
"""Return all available audio interfaces."""
if not ASOUND_CARDS.exists():
_LOGGER.debug("No audio devices found")
_LOGGER.info("No audio devices found")
return {}
try:
@@ -86,7 +95,7 @@ class Hardware:
_LOGGER.error("Can't read asound data: %s", err)
return {}
audio_list = {}
audio_list: Dict[str, Any] = {}
# parse cards
for match in RE_CARDS.finditer(cards):
@@ -109,31 +118,31 @@ class Hardware:
return audio_list
@property
def support_gpio(self):
def support_gpio(self) -> bool:
"""Return True if device support GPIOs."""
return SOC_DEVICES.exists() and GPIO_DEVICES.exists()
@property
def gpio_devices(self):
def gpio_devices(self) -> Set[str]:
"""Return list of GPIO interface on device."""
dev_list = set()
dev_list: Set[str] = set()
for interface in GPIO_DEVICES.glob("gpio*"):
dev_list.add(interface.name)
return dev_list
@property
def last_boot(self):
def last_boot(self) -> Optional[str]:
"""Return last boot time."""
try:
with PROC_STAT.open("r") as stat_file:
stats = stat_file.read()
stats: str = stat_file.read()
except OSError as err:
_LOGGER.error("Can't read stat data: %s", err)
return None
# parse stat file
found = RE_BOOT_TIME.search(stats)
found: Optional[re.Match] = RE_BOOT_TIME.search(stats)
if not found:
_LOGGER.error("Can't found last boot time!")
return None

View File

@@ -9,7 +9,7 @@ from typing import Awaitable, Optional
import aiohttp
from .const import URL_HASSIO_APPARMOR
from .const import URL_HASSIO_APPARMOR, HASSIO_VERSION
from .coresys import CoreSys, CoreSysAttributes
from .docker.stats import DockerStats
from .docker.supervisor import DockerSupervisor
@@ -34,7 +34,7 @@ class Supervisor(CoreSysAttributes):
async def load(self) -> None:
"""Prepare Home Assistant object."""
try:
await self.instance.attach()
await self.instance.attach(tag="latest")
except DockerAPIError:
_LOGGER.fatal("Can't setup Supervisor Docker container!")
@@ -54,7 +54,7 @@ class Supervisor(CoreSysAttributes):
@property
def version(self) -> str:
"""Return version of running Home Assistant."""
return self.instance.version
return HASSIO_VERSION
@property
def latest_version(self) -> str:
@@ -109,7 +109,7 @@ class Supervisor(CoreSysAttributes):
_LOGGER.info("Update Supervisor to version %s", version)
try:
await self.instance.install(version)
await self.instance.update(version, latest=True)
except DockerAPIError:
_LOGGER.error("Update of Hass.io fails!")
raise SupervisorUpdateError() from None
@@ -136,3 +136,14 @@ class Supervisor(CoreSysAttributes):
return await self.instance.stats()
except DockerAPIError:
raise SupervisorError() from None
async def repair(self):
"""Repair local Supervisor data."""
if await self.instance.exists():
return
_LOGGER.info("Repair Supervisor %s", self.version)
try:
await self.instance.retag()
except DockerAPIError:
_LOGGER.error("Repairing of Supervisor fails")

View File

@@ -3,7 +3,7 @@ import asyncio
import logging
from .coresys import CoreSysAttributes
from .exceptions import HomeAssistantError
from .exceptions import HomeAssistantError, CoreDNSError
_LOGGER = logging.getLogger(__name__)
@@ -11,17 +11,20 @@ HASS_WATCHDOG_API = "HASS_WATCHDOG_API"
RUN_UPDATE_SUPERVISOR = 29100
RUN_UPDATE_ADDONS = 57600
RUN_UPDATE_HASSOSCLI = 29100
RUN_UPDATE_HASSOSCLI = 28100
RUN_UPDATE_DNS = 30100
RUN_RELOAD_ADDONS = 21600
RUN_RELOAD_ADDONS = 10800
RUN_RELOAD_SNAPSHOTS = 72000
RUN_RELOAD_HOST = 72000
RUN_RELOAD_UPDATER = 21600
RUN_RELOAD_UPDATER = 7200
RUN_RELOAD_INGRESS = 930
RUN_WATCHDOG_HOMEASSISTANT_DOCKER = 15
RUN_WATCHDOG_HOMEASSISTANT_API = 300
RUN_WATCHDOG_DNS_DOCKER = 20
class Tasks(CoreSysAttributes):
"""Handle Tasks inside Hass.io."""
@@ -48,6 +51,9 @@ class Tasks(CoreSysAttributes):
self._update_hassos_cli, RUN_UPDATE_HASSOSCLI
)
)
self.jobs.add(
self.sys_scheduler.register_task(self._update_dns, RUN_UPDATE_DNS)
)
# Reload
self.jobs.add(
@@ -83,6 +89,11 @@ class Tasks(CoreSysAttributes):
self._watchdog_homeassistant_api, RUN_WATCHDOG_HOMEASSISTANT_API
)
)
self.jobs.add(
self.sys_scheduler.register_task(
self._watchdog_dns_docker, RUN_WATCHDOG_DNS_DOCKER
)
)
_LOGGER.info("All core tasks are scheduled")
@@ -194,3 +205,32 @@ class Tasks(CoreSysAttributes):
_LOGGER.info("Found new HassOS CLI version")
await self.sys_hassos.update_cli()
async def _update_dns(self):
"""Check and run update of CoreDNS plugin."""
if not self.sys_dns.need_update:
return
# don't perform an update on dev channel
if self.sys_dev:
_LOGGER.warning("Ignore CoreDNS update on dev channel!")
return
_LOGGER.info("Found new CoreDNS plugin version")
await self.sys_dns.update()
async def _watchdog_dns_docker(self):
"""Check running state of Docker and start if they is close."""
# if Home Assistant is active
if await self.sys_dns.is_running():
return
_LOGGER.warning("Watchdog found a problem with CoreDNS plugin!")
if await self.sys_dns.is_fails():
_LOGGER.warning("CoreDNS plugin is in fails state / Reset config")
await self.sys_dns.reset()
try:
await self.sys_dns.start()
except CoreDNSError:
_LOGGER.error("Watchdog CoreDNS reanimation fails!")

View File

@@ -4,23 +4,25 @@ from contextlib import suppress
from datetime import timedelta
import json
import logging
from typing import Optional
import aiohttp
from .const import (
URL_HASSIO_VERSION,
FILE_HASSIO_UPDATER,
ATTR_HOMEASSISTANT,
ATTR_HASSIO,
ATTR_CHANNEL,
ATTR_DNS,
ATTR_HASSIO,
ATTR_HASSOS,
ATTR_HASSOS_CLI,
ATTR_HOMEASSISTANT,
FILE_HASSIO_UPDATER,
URL_HASSIO_VERSION,
)
from .coresys import CoreSysAttributes
from .exceptions import HassioUpdaterError
from .utils import AsyncThrottle
from .utils.json import JsonConfig
from .validate import SCHEMA_UPDATER_CONFIG
from .exceptions import HassioUpdaterError
_LOGGER = logging.getLogger(__name__)
@@ -33,43 +35,48 @@ class Updater(JsonConfig, CoreSysAttributes):
super().__init__(FILE_HASSIO_UPDATER, SCHEMA_UPDATER_CONFIG)
self.coresys = coresys
async def load(self):
async def load(self) -> None:
"""Update internal data."""
with suppress(HassioUpdaterError):
await self.fetch_data()
async def reload(self):
async def reload(self) -> None:
"""Update internal data."""
with suppress(HassioUpdaterError):
await self.fetch_data()
@property
def version_homeassistant(self):
"""Return last version of Home Assistant."""
def version_homeassistant(self) -> Optional[str]:
"""Return latest version of Home Assistant."""
return self._data.get(ATTR_HOMEASSISTANT)
@property
def version_hassio(self):
"""Return last version of Hass.io."""
def version_hassio(self) -> Optional[str]:
"""Return latest version of Hass.io."""
return self._data.get(ATTR_HASSIO)
@property
def version_hassos(self):
"""Return last version of HassOS."""
def version_hassos(self) -> Optional[str]:
"""Return latest version of HassOS."""
return self._data.get(ATTR_HASSOS)
@property
def version_hassos_cli(self):
"""Return last version of HassOS cli."""
def version_hassos_cli(self) -> Optional[str]:
"""Return latest version of HassOS cli."""
return self._data.get(ATTR_HASSOS_CLI)
@property
def channel(self):
def version_dns(self) -> Optional[str]:
"""Return latest version of Hass.io DNS."""
return self._data.get(ATTR_DNS)
@property
def channel(self) -> str:
"""Return upstream channel of Hass.io instance."""
return self._data[ATTR_CHANNEL]
@channel.setter
def channel(self, value):
def channel(self, value: str):
"""Set upstream mode."""
self._data[ATTR_CHANNEL] = value
@@ -104,6 +111,7 @@ class Updater(JsonConfig, CoreSysAttributes):
try:
# update supervisor version
self._data[ATTR_HASSIO] = data["supervisor"]
self._data[ATTR_DNS] = data["dns"]
# update Home Assistant version
self._data[ATTR_HOMEASSISTANT] = data["homeassistant"][machine]

View File

@@ -142,6 +142,7 @@ class DBus:
data = await self._send(command)
# Parse and return data
_LOGGER.debug("Receive from %s: %s", method, data)
return self.parse_gvariant(data)
async def get_properties(self, interface):

View File

@@ -11,6 +11,7 @@ from .const import (
ATTR_CHANNEL,
ATTR_DEBUG,
ATTR_DEBUG_BLOCK,
ATTR_DNS,
ATTR_HASSIO,
ATTR_HASSOS,
ATTR_HASSOS_CLI,
@@ -23,20 +24,24 @@ from .const import (
ATTR_PORT,
ATTR_PORTS,
ATTR_REFRESH_TOKEN,
ATTR_SERVERS,
ATTR_SESSION,
ATTR_SSL,
ATTR_TIMEZONE,
ATTR_UUID,
ATTR_VERSION,
ATTR_WAIT_BOOT,
ATTR_WATCHDOG,
CHANNEL_BETA,
CHANNEL_DEV,
CHANNEL_STABLE,
DNS_SERVERS,
)
from .utils.validate import validate_timezone
RE_REPOSITORY = re.compile(r"^(?P<url>[^#]+)(?:#(?P<branch>[\w\-]+))?$")
# pylint: disable=no-value-for-parameter
NETWORK_PORT = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
WAIT_BOOT = vol.All(vol.Coerce(int), vol.Range(min=1, max=60))
DOCKER_IMAGE = vol.Match(r"^[\w{}]+/[\-\w{}]+$")
@@ -46,6 +51,8 @@ UUID_MATCH = vol.Match(r"^[0-9a-f]{32}$")
SHA256 = vol.Match(r"^[0-9a-f]{64}$")
TOKEN = vol.Match(r"^[0-9a-f]{32,256}$")
LOG_LEVEL = vol.In(["debug", "info", "warning", "error", "critical"])
DNS_URL = vol.Match(r"^dns://\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
DNS_SERVER_LIST = vol.All([DNS_URL], vol.Length(max=8))
def validate_repository(repository):
@@ -82,6 +89,7 @@ DOCKER_PORTS_DESCRIPTION = vol.Schema(
SCHEMA_HASS_CONFIG = vol.Schema(
{
vol.Optional(ATTR_UUID, default=lambda: uuid.uuid4().hex): UUID_MATCH,
vol.Optional(ATTR_VERSION): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_ACCESS_TOKEN): TOKEN,
vol.Optional(ATTR_BOOT, default=True): vol.Boolean(),
vol.Inclusive(ATTR_IMAGE, "custom_hass"): DOCKER_IMAGE,
@@ -106,6 +114,7 @@ SCHEMA_UPDATER_CONFIG = vol.Schema(
vol.Optional(ATTR_HASSIO): vol.Coerce(str),
vol.Optional(ATTR_HASSOS): vol.Coerce(str),
vol.Optional(ATTR_HASSOS_CLI): vol.Coerce(str),
vol.Optional(ATTR_DNS): vol.Coerce(str),
},
extra=vol.REMOVE_EXTRA,
)
@@ -143,3 +152,12 @@ SCHEMA_INGRESS_CONFIG = vol.Schema(
},
extra=vol.REMOVE_EXTRA,
)
SCHEMA_DNS_CONFIG = vol.Schema(
{
vol.Optional(ATTR_VERSION): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_SERVERS, default=DNS_SERVERS): DNS_SERVER_LIST,
},
extra=vol.REMOVE_EXTRA,
)

View File

@@ -6,9 +6,9 @@ colorlog==4.0.2
cpe==1.2.1
cryptography==2.7
docker==4.0.2
gitpython==2.1.11
pytz==2019.1
gitpython==3.0.1
pytz==2019.2
pyudev==0.21.0
uvloop==0.12.2
voluptuous==0.11.5
ptvsd==4.2.10
voluptuous==0.11.7
ptvsd==4.3.2

102
scripts/test_env.sh Executable file
View File

@@ -0,0 +1,102 @@
#!/bin/bash
set -eE
DOCKER_TIMEOUT=30
DOCKER_PID=0
function start_docker() {
local starttime
local endtime
echo "Starting docker."
dockerd 2> /dev/null &
DOCKER_PID=$!
echo "Waiting for docker to initialize..."
starttime="$(date +%s)"
endtime="$(date +%s)"
until docker info >/dev/null 2>&1; do
if [ $((endtime - starttime)) -le $DOCKER_TIMEOUT ]; then
sleep 1
endtime=$(date +%s)
else
echo "Timeout while waiting for docker to come up"
exit 1
fi
done
echo "Docker was initialized"
}
function stop_docker() {
local starttime
local endtime
echo "Stopping in container docker..."
if [ "$DOCKER_PID" -gt 0 ] && kill -0 "$DOCKER_PID" 2> /dev/null; then
starttime="$(date +%s)"
endtime="$(date +%s)"
# Now wait for it to die
kill "$DOCKER_PID"
while kill -0 "$DOCKER_PID" 2> /dev/null; do
if [ $((endtime - starttime)) -le $DOCKER_TIMEOUT ]; then
sleep 1
endtime=$(date +%s)
else
echo "Timeout while waiting for container docker to die"
exit 1
fi
done
else
echo "Your host might have been left with unreleased resources"
fi
}
function build_supervisor() {
docker pull homeassistant/amd64-builder:dev
docker run --rm --privileged \
-v /run/docker.sock:/run/docker.sock -v "$(pwd):/data" \
homeassistant/amd64-builder:dev \
--supervisor 3.7-alpine3.10 --version dev \
-t /data --test --amd64 \
--no-cache --docker-hub homeassistant
}
function install_cli() {
docker pull homeassistant/amd64-hassio-cli:dev
}
function setup_test_env() {
mkdir -p /workspaces/test_hassio
docker run --rm --privileged \
--name hassio_supervisor \
--security-opt seccomp=unconfined \
--security-opt apparmor:unconfined \
-v /run/docker.sock:/run/docker.sock \
-v /run/dbus:/run/dbus \
-v "/workspaces/test_hassio":/data \
-v /etc/machine-id:/etc/machine-id:ro \
-e SUPERVISOR_SHARE="/workspaces/test_hassio" \
-e SUPERVISOR_NAME=hassio_supervisor \
-e SUPERVISOR_DEV=1 \
-e HOMEASSISTANT_REPOSITORY="homeassistant/qemux86-64-homeassistant" \
homeassistant/amd64-hassio-supervisor:latest
}
echo "Start Test-Env"
start_docker
trap "stop_docker" ERR
build_supervisor
install_cli
setup_test_env
stop_docker

View File