Fix handling with docker container (#7)

* Fix handling with docker container

* Fix lint

* update version

* fix lint v2

* fix signal handling

* fix log output
This commit is contained in:
Pascal Vizeli 2017-04-20 14:59:03 +02:00 committed by GitHub
parent 7a0b9cc1ac
commit d285fd4ad4
10 changed files with 82 additions and 37 deletions

View File

@ -1,7 +1,6 @@
"""Main file for HassIO."""
import asyncio
import logging
import signal
import sys
import hassio.bootstrap as bootstrap
@ -25,12 +24,7 @@ if __name__ == "__main__":
_LOGGER.info("Start Hassio task")
loop.call_soon_threadsafe(loop.create_task, hassio.start())
try:
loop.add_signal_handler(
signal.SIGTERM, lambda: loop.create_task(hassio.stop()))
except ValueError:
_LOGGER.warning("Could not bind to SIGTERM")
loop.call_soon_threadsafe(bootstrap.reg_signal, loop, hassio)
loop.run_forever()
loop.close()

View File

@ -2,6 +2,7 @@
import logging
import os
import stat
import signal
from colorlog import ColoredFormatter
@ -81,3 +82,24 @@ def check_environment():
return False
return True
def reg_signal(loop, hassio):
"""Register SIGTERM, SIGKILL to stop system."""
try:
loop.add_signal_handler(
signal.SIGTERM, lambda: loop.create_task(hassio.stop()))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGTERM")
try:
loop.add_signal_handler(
signal.SIGHUP, lambda: loop.create_task(hassio.stop()))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGHUP")
try:
loop.add_signal_handler(
signal.SIGINT, lambda: loop.create_task(hassio.stop()))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGINT")

View File

@ -22,6 +22,8 @@ ADDONS_CUSTOM = "{}/addons_custom"
UPSTREAM_BETA = 'upstream_beta'
API_ENDPOINT = 'api_endpoint'
class Config(object):
"""Hold all config data."""
@ -78,6 +80,16 @@ class CoreConfig(Config):
return False
@property
def api_endpoint(self):
"""Return IP address of api endpoint."""
return self._data[API_ENDPOINT]
@api_endpoint.setter
def api_endpoint(self, value):
"""Store IP address of api endpoint."""
self._data[API_ENDPOINT] = value
@property
def upstream_beta(self):
"""Return True if we run in beta upstream."""

View File

@ -1,5 +1,5 @@
"""Const file for HassIO."""
HASSIO_VERSION = '0.8'
HASSIO_VERSION = '0.9'
URL_HASSIO_VERSION = \
'https://raw.githubusercontent.com/pvizeli/hassio/master/version.json'

View File

@ -15,7 +15,7 @@ from .const import (
from .scheduler import Scheduler
from .dock.homeassistant import DockerHomeAssistant
from .dock.supervisor import DockerSupervisor
from .tools import get_arch_from_image
from .tools import get_arch_from_image, get_local_ip
_LOGGER = logging.getLogger(__name__)
@ -52,6 +52,9 @@ class HassIO(object):
await self.supervisor.attach()
await self.supervisor.cleanup()
# set api endpoint
self.config.api_endpoint = await get_local_ip(self.loop)
# hostcontroll
host_info = await self.host_controll.info()
if host_info:
@ -72,7 +75,7 @@ class HassIO(object):
# schedule update info tasks
self.scheduler.register_task(
self.config.fetch_update_infos, RUN_UPDATE_INFO_TASKS,
first_run=True)
now=True)
# first start of supervisor?
if not await self.homeassistant.exists():
@ -85,7 +88,7 @@ class HassIO(object):
# schedule addon update task
self.scheduler.register_task(
self.addons.relaod, RUN_RELOAD_ADDONS_TASKS, first_run=True)
self.addons.relaod, RUN_RELOAD_ADDONS_TASKS, now=True)
# schedule self update task
self.scheduler.register_task(
@ -95,6 +98,7 @@ class HassIO(object):
"""Start HassIO orchestration."""
# start api
await self.api.start()
_LOGGER.info("Start hassio api on %s", self.config.api_endpoint)
# HomeAssistant is already running / supervisor have only reboot
if await self.homeassistant.is_running():
@ -112,6 +116,10 @@ class HassIO(object):
async def stop(self, exit_code=0):
"""Stop a running orchestration."""
# don't process scheduler anymore
self.scheduler.stop()
# process stop task pararell
tasks = [self.websession.close(), self.api.stop()]
await asyncio.wait(tasks, loop=self.loop)

View File

@ -138,8 +138,6 @@ class DockerBase(object):
return False
async with self._lock:
_LOGGER.info("Run docker image %s with version %s",
self.image, self.version)
return await self.loop.run_in_executor(None, self._run)
def _run(self):

View File

@ -65,15 +65,15 @@ class DockerAddon(DockerBase):
detach=True,
network_mode='bridge',
ports=self.addons_data.get_ports(self.addon),
restart_policy={
"Name": "on-failure",
"MaximumRetryCount": 10,
},
volumes=volumes,
)
self.version = get_version_from_env(
self.container.attrs['Config']['Env'])
_LOGGER.info("Start docker addon %s with version %s",
self.image, self.version)
except docker.errors.DockerException as err:
_LOGGER.error("Can't run %s -> %s", self.image, err)
return False

View File

@ -4,7 +4,7 @@ import logging
import docker
from . import DockerBase
from ..tools import get_version_from_env, get_local_ip
from ..tools import get_version_from_env
_LOGGER = logging.getLogger(__name__)
@ -31,8 +31,6 @@ class DockerHomeAssistant(DockerBase):
if self._is_running():
return
api_endpoint = get_local_ip(self.loop)
# cleanup old container
self._stop()
@ -43,12 +41,8 @@ class DockerHomeAssistant(DockerBase):
detach=True,
privileged=True,
network_mode='host',
restart_policy={
"Name": "always",
"MaximumRetryCount": 10,
},
environment={
'HASSIO': api_endpoint,
'HASSIO': self.config.api_endpoint,
},
volumes={
self.config.path_config_docker:
@ -59,6 +53,10 @@ class DockerHomeAssistant(DockerBase):
self.version = get_version_from_env(
self.container.attrs['Config']['Env'])
_LOGGER.info("Start docker addon %s with version %s",
self.image, self.version)
except docker.errors.DockerException as err:
_LOGGER.error("Can't run %s -> %s", self.image, err)
return False

View File

@ -16,9 +16,14 @@ class Scheduler(object):
"""Initialize task schedule."""
self.loop = loop
self._data = {}
self._stop = False
def stop(self):
"""Stop to execute tasks in scheduler."""
self._stop = True
def register_task(self, coro_callback, seconds, repeat=True,
first_run=False):
now=False):
"""Schedule a coroutine.
The coroutien need to be a callback without arguments.
@ -34,7 +39,7 @@ class Scheduler(object):
self._data[idx] = opts
# schedule task
if first_run:
if now:
self._run_task(idx)
else:
task = self.loop.call_later(seconds, self._run_task, idx)
@ -46,6 +51,10 @@ class Scheduler(object):
"""Run a scheduled task."""
data = self._data.pop(idx)
# stop execute tasks
if self._stop:
return
self.loop.create_task(data[CALL]())
if data[REPEAT]:

View File

@ -55,19 +55,23 @@ def get_version_from_env(env_list):
def get_local_ip(loop):
"""Retrieve local IP address.
Need run inside executor.
Return a future.
"""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def local_ip():
"""Return local ip."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use Google Public DNS server to determine own IP
sock.connect(('8.8.8.8', 80))
# Use Google Public DNS server to determine own IP
sock.connect(('8.8.8.8', 80))
return sock.getsockname()[0]
except socket.error:
return socket.gethostbyname(socket.gethostname())
finally:
sock.close()
return sock.getsockname()[0]
except socket.error:
return socket.gethostbyname(socket.gethostname())
finally:
sock.close()
return loop.run_in_executor(None, local_ip)
def write_json_file(jsonfile, data):