mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-07-19 15:16:33 +00:00
Bump pylint from 2.5.3 to 2.6.0 (#1962)
* Bump pylint from 2.5.3 to 2.6.0 Bumps [pylint](https://github.com/PyCQA/pylint) from 2.5.3 to 2.6.0. - [Release notes](https://github.com/PyCQA/pylint/releases) - [Changelog](https://github.com/PyCQA/pylint/blob/master/ChangeLog) - [Commits](https://github.com/PyCQA/pylint/compare/pylint-2.5.3...pylint-2.6.0) Signed-off-by: dependabot[bot] <support@github.com> * Address lint issues Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Pascal Vizeli <pvizeli@syshack.ch>
This commit is contained in:
parent
cd31fad56d
commit
979586cdb2
@ -5,7 +5,7 @@ flake8-docstrings==1.5.0
|
||||
flake8==3.8.3
|
||||
pre-commit==2.7.1
|
||||
pydocstyle==5.1.0
|
||||
pylint==2.5.3
|
||||
pylint==2.6.0
|
||||
pytest-aiohttp==0.3.0
|
||||
pytest-cov==2.10.1
|
||||
pytest-timeout==1.4.2
|
||||
|
@ -153,9 +153,9 @@ class AddonManager(CoreSysAttributes):
|
||||
|
||||
try:
|
||||
await addon.instance.install(store.version, store.image)
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
self.data.uninstall(addon)
|
||||
raise AddonsError()
|
||||
raise AddonsError() from err
|
||||
else:
|
||||
self.local[slug] = addon
|
||||
|
||||
@ -174,8 +174,8 @@ class AddonManager(CoreSysAttributes):
|
||||
|
||||
try:
|
||||
await addon.instance.remove()
|
||||
except DockerAPIError:
|
||||
raise AddonsError()
|
||||
except DockerAPIError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
await addon.remove_data()
|
||||
|
||||
@ -245,8 +245,8 @@ class AddonManager(CoreSysAttributes):
|
||||
# Cleanup
|
||||
with suppress(DockerAPIError):
|
||||
await addon.instance.cleanup()
|
||||
except DockerAPIError:
|
||||
raise AddonsError()
|
||||
except DockerAPIError as err:
|
||||
raise AddonsError() from err
|
||||
else:
|
||||
self.data.update(store)
|
||||
_LOGGER.info("Add-on '%s' successfully updated", slug)
|
||||
@ -283,8 +283,8 @@ class AddonManager(CoreSysAttributes):
|
||||
try:
|
||||
await addon.instance.remove()
|
||||
await addon.instance.install(addon.version)
|
||||
except DockerAPIError:
|
||||
raise AddonsError()
|
||||
except DockerAPIError as err:
|
||||
raise AddonsError() from err
|
||||
else:
|
||||
self.data.update(store)
|
||||
_LOGGER.info("Add-on '%s' successfully rebuilt", slug)
|
||||
|
@ -488,15 +488,15 @@ class Addon(AddonModel):
|
||||
# Start Add-on
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError:
|
||||
raise AddonsError()
|
||||
except DockerAPIError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop add-on."""
|
||||
try:
|
||||
return await self.instance.stop()
|
||||
except DockerAPIError:
|
||||
raise AddonsError()
|
||||
except DockerAPIError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
async def restart(self) -> None:
|
||||
"""Restart add-on."""
|
||||
@ -515,8 +515,8 @@ class Addon(AddonModel):
|
||||
"""Return stats of container."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError:
|
||||
raise AddonsError()
|
||||
except DockerAPIError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
async def write_stdin(self, data) -> None:
|
||||
"""Write data to add-on stdin.
|
||||
@ -529,8 +529,8 @@ class Addon(AddonModel):
|
||||
|
||||
try:
|
||||
return await self.instance.write_stdin(data)
|
||||
except DockerAPIError:
|
||||
raise AddonsError()
|
||||
except DockerAPIError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
async def snapshot(self, tar_file: tarfile.TarFile) -> None:
|
||||
"""Snapshot state of an add-on."""
|
||||
@ -541,8 +541,8 @@ class Addon(AddonModel):
|
||||
if self.need_build:
|
||||
try:
|
||||
await self.instance.export_image(temp_path.joinpath("image.tar"))
|
||||
except DockerAPIError:
|
||||
raise AddonsError()
|
||||
except DockerAPIError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
data = {
|
||||
ATTR_USER: self.persist,
|
||||
@ -554,18 +554,18 @@ class Addon(AddonModel):
|
||||
# Store local configs/state
|
||||
try:
|
||||
write_json_file(temp_path.joinpath("addon.json"), data)
|
||||
except JsonFileError:
|
||||
except JsonFileError as err:
|
||||
_LOGGER.error("Can't save meta for %s", self.slug)
|
||||
raise AddonsError()
|
||||
raise AddonsError() from err
|
||||
|
||||
# Store AppArmor Profile
|
||||
if self.sys_host.apparmor.exists(self.slug):
|
||||
profile = temp_path.joinpath("apparmor.txt")
|
||||
try:
|
||||
self.sys_host.apparmor.backup_profile(self.slug, profile)
|
||||
except HostAppArmorError:
|
||||
except HostAppArmorError as err:
|
||||
_LOGGER.error("Can't backup AppArmor profile")
|
||||
raise AddonsError()
|
||||
raise AddonsError() from err
|
||||
|
||||
# write into tarfile
|
||||
def _write_tarfile():
|
||||
@ -588,7 +588,7 @@ class Addon(AddonModel):
|
||||
await self.sys_run_in_executor(_write_tarfile)
|
||||
except (tarfile.TarError, OSError) as err:
|
||||
_LOGGER.error("Can't write tarfile %s: %s", tar_file, err)
|
||||
raise AddonsError()
|
||||
raise AddonsError() from err
|
||||
|
||||
_LOGGER.info("Finish snapshot for addon %s", self.slug)
|
||||
|
||||
@ -605,13 +605,13 @@ class Addon(AddonModel):
|
||||
await self.sys_run_in_executor(_extract_tarfile)
|
||||
except tarfile.TarError as err:
|
||||
_LOGGER.error("Can't read tarfile %s: %s", tar_file, err)
|
||||
raise AddonsError()
|
||||
raise AddonsError() from err
|
||||
|
||||
# Read snapshot data
|
||||
try:
|
||||
data = read_json_file(Path(temp, "addon.json"))
|
||||
except JsonFileError:
|
||||
raise AddonsError()
|
||||
except JsonFileError as err:
|
||||
raise AddonsError() from err
|
||||
|
||||
# Validate
|
||||
try:
|
||||
@ -622,7 +622,7 @@ class Addon(AddonModel):
|
||||
self.slug,
|
||||
humanize_error(data, err),
|
||||
)
|
||||
raise AddonsError()
|
||||
raise AddonsError() from err
|
||||
|
||||
# If available
|
||||
if not self._available(data[ATTR_SYSTEM]):
|
||||
@ -669,18 +669,18 @@ class Addon(AddonModel):
|
||||
await self.sys_run_in_executor(_restore_data)
|
||||
except shutil.Error as err:
|
||||
_LOGGER.error("Can't restore origin data: %s", err)
|
||||
raise AddonsError()
|
||||
raise AddonsError() from err
|
||||
|
||||
# Restore AppArmor
|
||||
profile_file = Path(temp, "apparmor.txt")
|
||||
if profile_file.exists():
|
||||
try:
|
||||
await self.sys_host.apparmor.load_profile(self.slug, profile_file)
|
||||
except HostAppArmorError:
|
||||
except HostAppArmorError as err:
|
||||
_LOGGER.error(
|
||||
"Can't restore AppArmor profile for add-on %s", self.slug
|
||||
)
|
||||
raise AddonsError()
|
||||
raise AddonsError() from err
|
||||
|
||||
# Run add-on
|
||||
if data[ATTR_STATE] == STATE_STARTED:
|
||||
|
@ -364,7 +364,7 @@ def validate_options(coresys: CoreSys, raw_schema: Dict[str, Any]):
|
||||
# normal value
|
||||
options[key] = _single_validate(coresys, typ, value, key)
|
||||
except (IndexError, KeyError):
|
||||
raise vol.Invalid(f"Type error for {key}")
|
||||
raise vol.Invalid(f"Type error for {key}") from None
|
||||
|
||||
_check_missing_options(raw_schema, options, "root")
|
||||
return options
|
||||
@ -378,20 +378,20 @@ def _single_validate(coresys: CoreSys, typ: str, value: Any, key: str):
|
||||
"""Validate a single element."""
|
||||
# if required argument
|
||||
if value is None:
|
||||
raise vol.Invalid(f"Missing required option '{key}'")
|
||||
raise vol.Invalid(f"Missing required option '{key}'") from None
|
||||
|
||||
# Lookup secret
|
||||
if str(value).startswith("!secret "):
|
||||
secret: str = value.partition(" ")[2]
|
||||
value = coresys.secrets.get(secret)
|
||||
if value is None:
|
||||
raise vol.Invalid(f"Unknown secret {secret}")
|
||||
raise vol.Invalid(f"Unknown secret {secret}") from None
|
||||
|
||||
# parse extend data from type
|
||||
match = RE_SCHEMA_ELEMENT.match(typ)
|
||||
|
||||
if not match:
|
||||
raise vol.Invalid(f"Unknown type {typ}")
|
||||
raise vol.Invalid(f"Unknown type {typ}") from None
|
||||
|
||||
# prepare range
|
||||
range_args = {}
|
||||
@ -419,7 +419,7 @@ def _single_validate(coresys: CoreSys, typ: str, value: Any, key: str):
|
||||
elif typ.startswith(V_LIST):
|
||||
return vol.In(match.group("list").split("|"))(str(value))
|
||||
|
||||
raise vol.Invalid(f"Fatal error for {key} type {typ}")
|
||||
raise vol.Invalid(f"Fatal error for {key} type {typ}") from None
|
||||
|
||||
|
||||
def _nested_validate_list(coresys, typ, data_list, key):
|
||||
@ -428,7 +428,7 @@ def _nested_validate_list(coresys, typ, data_list, key):
|
||||
|
||||
# Make sure it is a list
|
||||
if not isinstance(data_list, list):
|
||||
raise vol.Invalid(f"Invalid list for {key}")
|
||||
raise vol.Invalid(f"Invalid list for {key}") from None
|
||||
|
||||
# Process list
|
||||
for element in data_list:
|
||||
@ -448,7 +448,7 @@ def _nested_validate_dict(coresys, typ, data_dict, key):
|
||||
|
||||
# Make sure it is a dict
|
||||
if not isinstance(data_dict, dict):
|
||||
raise vol.Invalid(f"Invalid dict for {key}")
|
||||
raise vol.Invalid(f"Invalid dict for {key}") from None
|
||||
|
||||
# Process dict
|
||||
for c_key, c_value in data_dict.items():
|
||||
@ -475,7 +475,7 @@ def _check_missing_options(origin, exists, root):
|
||||
for miss_opt in missing:
|
||||
if isinstance(origin[miss_opt], str) and origin[miss_opt].endswith("?"):
|
||||
continue
|
||||
raise vol.Invalid(f"Missing option {miss_opt} in {root}")
|
||||
raise vol.Invalid(f"Missing option {miss_opt} in {root}") from None
|
||||
|
||||
|
||||
def schema_ui_options(raw_schema: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
|
@ -48,8 +48,8 @@ def json_loads(data: Any) -> Dict[str, Any]:
|
||||
return {}
|
||||
try:
|
||||
return json.loads(data)
|
||||
except json.JSONDecodeError:
|
||||
raise APIError("Invalid json")
|
||||
except json.JSONDecodeError as err:
|
||||
raise APIError("Invalid json") from err
|
||||
|
||||
|
||||
def api_process(method):
|
||||
@ -120,7 +120,7 @@ async def api_validate(
|
||||
try:
|
||||
data_validated = schema(data)
|
||||
except vol.Invalid as ex:
|
||||
raise APIError(humanize_error(data, ex))
|
||||
raise APIError(humanize_error(data, ex)) from None
|
||||
|
||||
if not origin:
|
||||
return data_validated
|
||||
|
@ -79,7 +79,7 @@ class Discovery(CoreSysAttributes, JsonConfig):
|
||||
config = valid_discovery_config(service, config)
|
||||
except vol.Invalid as err:
|
||||
_LOGGER.error("Invalid discovery %s config", humanize_error(config, err))
|
||||
raise DiscoveryError()
|
||||
raise DiscoveryError() from err
|
||||
|
||||
# Create message
|
||||
message = Message(addon.slug, service, config)
|
||||
|
@ -13,7 +13,7 @@ def valid_discovery_service(service):
|
||||
"""Validate service name."""
|
||||
service_file = Path(__file__).parent.joinpath(f"services/{service}.py")
|
||||
if not service_file.exists():
|
||||
raise vol.Invalid(f"Service {service} not found")
|
||||
raise vol.Invalid(f"Service {service} not found") from None
|
||||
return service
|
||||
|
||||
|
||||
@ -22,7 +22,7 @@ def valid_discovery_config(service, config):
|
||||
try:
|
||||
service_mod = import_module(f".services.{service}", "supervisor.discovery")
|
||||
except ImportError:
|
||||
raise vol.Invalid(f"Service {service} not found")
|
||||
raise vol.Invalid(f"Service {service} not found") from None
|
||||
|
||||
return service_mod.SCHEMA(config)
|
||||
|
||||
|
@ -131,7 +131,7 @@ class DockerAPI:
|
||||
)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't create container from %s: %s", name, err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
# Attach network
|
||||
if not network_mode:
|
||||
@ -149,7 +149,7 @@ class DockerAPI:
|
||||
container.start()
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't start %s: %s", name, err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
# Update metadata
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
@ -187,7 +187,7 @@ class DockerAPI:
|
||||
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't execute command: %s", err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
finally:
|
||||
# cleanup container
|
||||
@ -249,7 +249,7 @@ class DockerAPI:
|
||||
denied_images.add(image_name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Corrupt docker overlayfs detect: %s", err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
if not denied_images:
|
||||
return False
|
||||
|
@ -402,7 +402,7 @@ class DockerAddon(DockerInterface):
|
||||
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't build %s:%s: %s", self.image, tag, err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
_LOGGER.info("Build %s:%s done", self.image, tag)
|
||||
|
||||
@ -420,7 +420,7 @@ class DockerAddon(DockerInterface):
|
||||
image = self.sys_docker.api.get_image(f"{self.image}:{self.version}")
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't fetch image %s: %s", self.image, err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
_LOGGER.info("Export image %s to %s", self.image, tar_file)
|
||||
try:
|
||||
@ -429,7 +429,7 @@ class DockerAddon(DockerInterface):
|
||||
write_tar.write(chunk)
|
||||
except (OSError, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't write tar file %s: %s", tar_file, err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
_LOGGER.info("Export image %s done", self.image)
|
||||
|
||||
@ -450,7 +450,7 @@ class DockerAddon(DockerInterface):
|
||||
docker_image = self.sys_docker.images.get(f"{self.image}:{self.version}")
|
||||
except (docker.errors.DockerException, OSError) as err:
|
||||
_LOGGER.error("Can't import image %s: %s", self.image, err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
self._meta = docker_image.attrs
|
||||
_LOGGER.info("Import image %s and version %s", tar_file, self.version)
|
||||
@ -477,7 +477,7 @@ class DockerAddon(DockerInterface):
|
||||
socket = container.attach_socket(params={"stdin": 1, "stream": 1})
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't attach to %s stdin: %s", self.name, err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
try:
|
||||
# Write to stdin
|
||||
@ -486,7 +486,7 @@ class DockerAddon(DockerInterface):
|
||||
socket.close()
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't write to %s stdin: %s", self.name, err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
def _stop(self, remove_container=True) -> None:
|
||||
"""Stop/remove Docker container.
|
||||
|
@ -107,11 +107,11 @@ class DockerInterface(CoreSysAttributes):
|
||||
"Available space in /data is: %s GiB",
|
||||
free_space,
|
||||
)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Unknown error with %s:%s -> %s", image, tag, err)
|
||||
self.sys_capture_exception(err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
else:
|
||||
self._meta = docker_image.attrs
|
||||
|
||||
@ -145,8 +145,8 @@ class DockerInterface(CoreSysAttributes):
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except docker.errors.NotFound:
|
||||
return False
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
raise DockerAPIError()
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerAPIError() from err
|
||||
|
||||
return docker_container.status == "running"
|
||||
|
||||
@ -198,8 +198,8 @@ class DockerInterface(CoreSysAttributes):
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except docker.errors.NotFound:
|
||||
return
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
raise DockerAPIError()
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerAPIError() from err
|
||||
|
||||
if docker_container.status == "running":
|
||||
_LOGGER.info("Stop %s application", self.name)
|
||||
@ -223,16 +223,16 @@ class DockerInterface(CoreSysAttributes):
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("%s not found for starting up", self.name)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
_LOGGER.info("Start %s", self.name)
|
||||
try:
|
||||
docker_container.start()
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't start %s: %s", self.name, err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
@process_lock
|
||||
def remove(self) -> Awaitable[None]:
|
||||
@ -261,7 +261,7 @@ class DockerInterface(CoreSysAttributes):
|
||||
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.warning("Can't remove image %s: %s", self.image, err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
self._meta = None
|
||||
|
||||
@ -328,9 +328,9 @@ class DockerInterface(CoreSysAttributes):
|
||||
"""
|
||||
try:
|
||||
origin = self.sys_docker.images.get(f"{self.image}:{self.version}")
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.warning("Can't find %s for cleanup", self.image)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
# Cleanup Current
|
||||
for image in self.sys_docker.images.list(name=self.image):
|
||||
@ -362,15 +362,15 @@ class DockerInterface(CoreSysAttributes):
|
||||
"""
|
||||
try:
|
||||
container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
raise DockerAPIError()
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerAPIError() from err
|
||||
|
||||
_LOGGER.info("Restart %s", self.image)
|
||||
try:
|
||||
container.restart(timeout=self.timeout)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.warning("Can't restart %s: %s", self.image, err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
@process_lock
|
||||
def execute_command(self, command: str) -> Awaitable[CommandReturn]:
|
||||
@ -395,15 +395,15 @@ class DockerInterface(CoreSysAttributes):
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
raise DockerAPIError()
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerAPIError() from err
|
||||
|
||||
try:
|
||||
stats = docker_container.stats(stream=False)
|
||||
return DockerStats(stats)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't read stats from %s: %s", self.name, err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
def is_fails(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker is failing state.
|
||||
@ -452,9 +452,9 @@ class DockerInterface(CoreSysAttributes):
|
||||
if not available_version:
|
||||
raise ValueError()
|
||||
|
||||
except (docker.errors.DockerException, ValueError):
|
||||
except (docker.errors.DockerException, ValueError) as err:
|
||||
_LOGGER.debug("No version found for %s", self.image)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
else:
|
||||
_LOGGER.debug("Found %s versions: %s", self.image, available_version)
|
||||
|
||||
|
@ -108,7 +108,7 @@ class DockerNetwork:
|
||||
self.network.connect(container, aliases=alias, ipv4_address=ipv4_address)
|
||||
except docker.errors.APIError as err:
|
||||
_LOGGER.error("Can't link container to hassio-net: %s", err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
self.network.reload()
|
||||
|
||||
@ -128,7 +128,7 @@ class DockerNetwork:
|
||||
|
||||
except docker.errors.APIError as err:
|
||||
_LOGGER.warning("Can't disconnect container from default: %s", err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
def stale_cleanup(self, container_name: str):
|
||||
"""Remove force a container from Network.
|
||||
|
@ -39,8 +39,8 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
raise DockerAPIError()
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerAPIError() from err
|
||||
|
||||
self._meta = docker_container.attrs
|
||||
_LOGGER.info(
|
||||
@ -77,7 +77,7 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
docker_container.image.tag(self.image, tag="latest")
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't retag supervisor version: %s", err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
||||
def update_start_tag(self, image: str, version: str) -> Awaitable[None]:
|
||||
"""Update start tag to new version."""
|
||||
@ -104,4 +104,4 @@ class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't fix start tag: %s", err)
|
||||
raise DockerAPIError()
|
||||
raise DockerAPIError() from err
|
||||
|
@ -140,9 +140,9 @@ class HassOS(CoreSysAttributes):
|
||||
await self.sys_dbus.rauc.install(ext_ota)
|
||||
completed = await self.sys_dbus.rauc.signal_completed()
|
||||
|
||||
except DBusError:
|
||||
except DBusError as err:
|
||||
_LOGGER.error("Rauc communication error")
|
||||
raise HassOSUpdateError()
|
||||
raise HassOSUpdateError() from err
|
||||
|
||||
finally:
|
||||
int_ota.unlink()
|
||||
|
@ -162,9 +162,9 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
await self.instance.update(
|
||||
to_version, image=self.sys_updater.image_homeassistant
|
||||
)
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.warning("Update Home Assistant image fails")
|
||||
raise HomeAssistantUpdateError()
|
||||
raise HomeAssistantUpdateError() from err
|
||||
else:
|
||||
self.sys_homeassistant.version = self.instance.version
|
||||
self.sys_homeassistant.image = self.sys_updater.image_homeassistant
|
||||
@ -212,8 +212,8 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError:
|
||||
raise HomeAssistantError()
|
||||
except DockerAPIError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
|
||||
@ -228,8 +228,8 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
if await self.instance.is_initialize():
|
||||
try:
|
||||
await self.instance.start()
|
||||
except DockerAPIError:
|
||||
raise HomeAssistantError()
|
||||
except DockerAPIError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
# No Instance/Container found, extended start
|
||||
@ -244,16 +244,16 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
"""
|
||||
try:
|
||||
return await self.instance.stop(remove_container=False)
|
||||
except DockerAPIError:
|
||||
raise HomeAssistantError()
|
||||
except DockerAPIError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
@process_lock
|
||||
async def restart(self) -> None:
|
||||
"""Restart Home Assistant Docker."""
|
||||
try:
|
||||
await self.instance.restart()
|
||||
except DockerAPIError:
|
||||
raise HomeAssistantError()
|
||||
except DockerAPIError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
|
||||
@ -278,8 +278,8 @@ class HomeAssistantCore(CoreSysAttributes):
|
||||
"""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError:
|
||||
raise HomeAssistantError()
|
||||
except DockerAPIError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker container is running.
|
||||
|
@ -76,7 +76,7 @@ class AppArmorControl(CoreSysAttributes):
|
||||
shutil.copyfile(profile_file, dest_profile)
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't copy %s: %s", profile_file, err)
|
||||
raise HostAppArmorError()
|
||||
raise HostAppArmorError() from err
|
||||
|
||||
# Load profiles
|
||||
_LOGGER.info("Add or Update AppArmor profile: %s", profile_name)
|
||||
@ -94,7 +94,7 @@ class AppArmorControl(CoreSysAttributes):
|
||||
profile_file.unlink()
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't remove profile: %s", err)
|
||||
raise HostAppArmorError()
|
||||
raise HostAppArmorError() from err
|
||||
return
|
||||
|
||||
# Marks als remove and start host process
|
||||
@ -103,7 +103,7 @@ class AppArmorControl(CoreSysAttributes):
|
||||
profile_file.rename(remove_profile)
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't mark profile as remove: %s", err)
|
||||
raise HostAppArmorError()
|
||||
raise HostAppArmorError() from err
|
||||
|
||||
_LOGGER.info("Remove AppArmor profile: %s", profile_name)
|
||||
self._profiles.remove(profile_name)
|
||||
@ -117,4 +117,4 @@ class AppArmorControl(CoreSysAttributes):
|
||||
shutil.copy(profile_file, backup_file)
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't backup profile %s: %s", profile_name, err)
|
||||
raise HostAppArmorError()
|
||||
raise HostAppArmorError() from err
|
||||
|
@ -83,7 +83,7 @@ class InfoCenter(CoreSysAttributes):
|
||||
stdout, _ = await proc.communicate()
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't read kernel log: %s", err)
|
||||
raise HostError()
|
||||
raise HostError() from err
|
||||
|
||||
return stdout
|
||||
|
||||
@ -96,4 +96,4 @@ class InfoCenter(CoreSysAttributes):
|
||||
_LOGGER.warning("Can't update host system information!")
|
||||
except DBusNotConnectedError:
|
||||
_LOGGER.error("No hostname D-Bus connection available")
|
||||
raise HostNotSupportedError()
|
||||
raise HostNotSupportedError() from None
|
||||
|
@ -34,6 +34,6 @@ class NetworkManager(CoreSysAttributes):
|
||||
await self.sys_dbus.nmi_dns.update()
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't update host DNS system information!")
|
||||
except DBusNotConnectedError:
|
||||
except DBusNotConnectedError as err:
|
||||
_LOGGER.error("No hostname D-Bus connection available")
|
||||
raise HostNotSupportedError()
|
||||
raise HostNotSupportedError() from err
|
||||
|
@ -104,6 +104,7 @@ class SoundControl(CoreSysAttributes):
|
||||
"""Set a stream to default input/output."""
|
||||
|
||||
def _set_default():
|
||||
source = sink = None
|
||||
try:
|
||||
with Pulse(PULSE_NAME) as pulse:
|
||||
if stream_type == StreamType.INPUT:
|
||||
@ -115,12 +116,12 @@ class SoundControl(CoreSysAttributes):
|
||||
sink = pulse.get_sink_by_name(name)
|
||||
pulse.sink_default_set(sink)
|
||||
|
||||
except PulseIndexError:
|
||||
except PulseIndexError as err:
|
||||
_LOGGER.error("Can't find %s stream %s", source, name)
|
||||
raise PulseAudioError()
|
||||
raise PulseAudioError() from err
|
||||
except PulseError as err:
|
||||
_LOGGER.error("Can't set %s as stream: %s", name, err)
|
||||
raise PulseAudioError()
|
||||
raise PulseAudioError() from err
|
||||
|
||||
# Run and Reload data
|
||||
await self.sys_run_in_executor(_set_default)
|
||||
@ -147,14 +148,14 @@ class SoundControl(CoreSysAttributes):
|
||||
|
||||
# Set volume
|
||||
pulse.volume_set_all_chans(stream, volume)
|
||||
except PulseIndexError:
|
||||
except PulseIndexError as err:
|
||||
_LOGGER.error(
|
||||
"Can't find %s stream %d (App: %s)", stream_type, index, application
|
||||
)
|
||||
raise PulseAudioError()
|
||||
raise PulseAudioError() from err
|
||||
except PulseError as err:
|
||||
_LOGGER.error("Can't set %d volume: %s", index, err)
|
||||
raise PulseAudioError()
|
||||
raise PulseAudioError() from err
|
||||
|
||||
# Run and Reload data
|
||||
await self.sys_run_in_executor(_set_volume)
|
||||
@ -181,14 +182,14 @@ class SoundControl(CoreSysAttributes):
|
||||
|
||||
# Mute stream
|
||||
pulse.mute(stream, mute)
|
||||
except PulseIndexError:
|
||||
except PulseIndexError as err:
|
||||
_LOGGER.error(
|
||||
"Can't find %s stream %d (App: %s)", stream_type, index, application
|
||||
)
|
||||
raise PulseAudioError()
|
||||
raise PulseAudioError() from err
|
||||
except PulseError as err:
|
||||
_LOGGER.error("Can't set %d volume: %s", index, err)
|
||||
raise PulseAudioError()
|
||||
raise PulseAudioError() from err
|
||||
|
||||
# Run and Reload data
|
||||
await self.sys_run_in_executor(_set_mute)
|
||||
@ -203,14 +204,14 @@ class SoundControl(CoreSysAttributes):
|
||||
card = pulse.get_sink_by_name(card_name)
|
||||
pulse.card_profile_set(card, profile_name)
|
||||
|
||||
except PulseIndexError:
|
||||
except PulseIndexError as err:
|
||||
_LOGGER.error("Can't find %s profile %s", card_name, profile_name)
|
||||
raise PulseAudioError()
|
||||
raise PulseAudioError() from err
|
||||
except PulseError as err:
|
||||
_LOGGER.error(
|
||||
"Can't activate %s profile %s: %s", card_name, profile_name, err
|
||||
)
|
||||
raise PulseAudioError()
|
||||
raise PulseAudioError() from err
|
||||
|
||||
# Run and Reload data
|
||||
await self.sys_run_in_executor(_activate_profile)
|
||||
@ -331,7 +332,7 @@ class SoundControl(CoreSysAttributes):
|
||||
|
||||
except PulseOperationFailed as err:
|
||||
_LOGGER.error("Error while processing pulse update: %s", err)
|
||||
raise PulseAudioError()
|
||||
raise PulseAudioError() from err
|
||||
except PulseError as err:
|
||||
_LOGGER.debug("Can't update PulseAudio data: %s", err)
|
||||
|
||||
|
@ -154,9 +154,9 @@ class Audio(JsonConfig, CoreSysAttributes):
|
||||
|
||||
try:
|
||||
await self.instance.update(version, image=self.sys_updater.image_audio)
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Audio update fails")
|
||||
raise AudioUpdateError()
|
||||
raise AudioUpdateError() from err
|
||||
else:
|
||||
self.version = version
|
||||
self.image = self.sys_updater.image_audio
|
||||
@ -174,27 +174,27 @@ class Audio(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Restart Audio plugin")
|
||||
try:
|
||||
await self.instance.restart()
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Can't start Audio plugin")
|
||||
raise AudioError()
|
||||
raise AudioError() from err
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Run CoreDNS."""
|
||||
_LOGGER.info("Start Audio plugin")
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Can't start Audio plugin")
|
||||
raise AudioError()
|
||||
raise AudioError() from err
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop CoreDNS."""
|
||||
_LOGGER.info("Stop Audio plugin")
|
||||
try:
|
||||
await self.instance.stop()
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Can't stop Audio plugin")
|
||||
raise AudioError()
|
||||
raise AudioError() from err
|
||||
|
||||
def logs(self) -> Awaitable[bytes]:
|
||||
"""Get CoreDNS docker logs.
|
||||
@ -207,8 +207,8 @@ class Audio(JsonConfig, CoreSysAttributes):
|
||||
"""Return stats of CoreDNS."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError:
|
||||
raise AudioError()
|
||||
except DockerAPIError as err:
|
||||
raise AudioError() from err
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker container is running.
|
||||
|
@ -132,9 +132,9 @@ class HaCli(CoreSysAttributes, JsonConfig):
|
||||
await self.instance.update(
|
||||
version, image=self.sys_updater.image_cli, latest=True
|
||||
)
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("HA cli update fails")
|
||||
raise CliUpdateError()
|
||||
raise CliUpdateError() from err
|
||||
else:
|
||||
self.version = version
|
||||
self.image = self.sys_updater.image_cli
|
||||
@ -157,25 +157,25 @@ class HaCli(CoreSysAttributes, JsonConfig):
|
||||
_LOGGER.info("Start cli plugin")
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Can't start cli plugin")
|
||||
raise CliError()
|
||||
raise CliError() from err
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop cli."""
|
||||
_LOGGER.info("Stop cli plugin")
|
||||
try:
|
||||
await self.instance.stop()
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Can't stop cli plugin")
|
||||
raise CliError()
|
||||
raise CliError() from err
|
||||
|
||||
async def stats(self) -> DockerStats:
|
||||
"""Return stats of cli."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError:
|
||||
raise CliError()
|
||||
except DockerAPIError as err:
|
||||
raise CliError() from err
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker container is running.
|
||||
|
@ -196,9 +196,9 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
# Update
|
||||
try:
|
||||
await self.instance.update(version, image=self.sys_updater.image_dns)
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("CoreDNS update fails")
|
||||
raise CoreDNSUpdateError()
|
||||
raise CoreDNSUpdateError() from err
|
||||
else:
|
||||
self.version = version
|
||||
self.image = self.sys_updater.image_dns
|
||||
@ -217,9 +217,9 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Restart CoreDNS plugin")
|
||||
try:
|
||||
await self.instance.restart()
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Can't start CoreDNS plugin")
|
||||
raise CoreDNSError()
|
||||
raise CoreDNSError() from err
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Run CoreDNS."""
|
||||
@ -229,18 +229,18 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Start CoreDNS plugin")
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Can't start CoreDNS plugin")
|
||||
raise CoreDNSError()
|
||||
raise CoreDNSError() from err
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop CoreDNS."""
|
||||
_LOGGER.info("Stop CoreDNS plugin")
|
||||
try:
|
||||
await self.instance.stop()
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Can't stop CoreDNS plugin")
|
||||
raise CoreDNSError()
|
||||
raise CoreDNSError() from err
|
||||
|
||||
async def reset(self) -> None:
|
||||
"""Reset DNS and hosts."""
|
||||
@ -307,7 +307,7 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
self.corefile.write_text(data)
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't update corefile: %s", err)
|
||||
raise CoreDNSError()
|
||||
raise CoreDNSError() from err
|
||||
|
||||
def _init_hosts(self) -> None:
|
||||
"""Import hosts entry."""
|
||||
@ -331,7 +331,7 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
hosts.write(f"{entry.ip_address!s} {' '.join(entry.names)}\n")
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't write hosts file: %s", err)
|
||||
raise CoreDNSError()
|
||||
raise CoreDNSError() from err
|
||||
|
||||
def add_host(self, ipv4: IPv4Address, names: List[str], write: bool = True) -> None:
|
||||
"""Add a new host entry."""
|
||||
@ -394,8 +394,8 @@ class CoreDNS(JsonConfig, CoreSysAttributes):
|
||||
"""Return stats of CoreDNS."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError:
|
||||
raise CoreDNSError()
|
||||
except DockerAPIError as err:
|
||||
raise CoreDNSError() from err
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker container is running.
|
||||
|
@ -127,9 +127,9 @@ class Multicast(JsonConfig, CoreSysAttributes):
|
||||
# Update
|
||||
try:
|
||||
await self.instance.update(version, image=self.sys_updater.image_multicast)
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Multicast update fails")
|
||||
raise MulticastUpdateError()
|
||||
raise MulticastUpdateError() from err
|
||||
else:
|
||||
self.version = version
|
||||
self.image = self.sys_updater.image_multicast
|
||||
@ -147,27 +147,27 @@ class Multicast(JsonConfig, CoreSysAttributes):
|
||||
_LOGGER.info("Restart Multicast plugin")
|
||||
try:
|
||||
await self.instance.restart()
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Can't start Multicast plugin")
|
||||
raise MulticastError()
|
||||
raise MulticastError() from err
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Run Multicast."""
|
||||
_LOGGER.info("Start Multicast plugin")
|
||||
try:
|
||||
await self.instance.run()
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Can't start Multicast plugin")
|
||||
raise MulticastError()
|
||||
raise MulticastError() from err
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop Multicast."""
|
||||
_LOGGER.info("Stop Multicast plugin")
|
||||
try:
|
||||
await self.instance.stop()
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Can't stop Multicast plugin")
|
||||
raise MulticastError()
|
||||
raise MulticastError() from err
|
||||
|
||||
def logs(self) -> Awaitable[bytes]:
|
||||
"""Get Multicast docker logs.
|
||||
@ -180,8 +180,8 @@ class Multicast(JsonConfig, CoreSysAttributes):
|
||||
"""Return stats of Multicast."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError:
|
||||
raise MulticastError()
|
||||
except DockerAPIError as err:
|
||||
raise MulticastError() from err
|
||||
|
||||
def is_running(self) -> Awaitable[bool]:
|
||||
"""Return True if Docker container is running.
|
||||
|
@ -277,7 +277,7 @@ class Snapshot(CoreSysAttributes):
|
||||
_LOGGER.error(
|
||||
"Invalid data for %s: %s", self.tarfile, humanize_error(self._data, err)
|
||||
)
|
||||
raise ValueError("Invalid config")
|
||||
raise ValueError("Invalid config") from None
|
||||
|
||||
# new snapshot, build it
|
||||
def _create_snapshot():
|
||||
|
@ -41,7 +41,7 @@ def unique_addons(addons_list):
|
||||
single = {addon[ATTR_SLUG] for addon in addons_list}
|
||||
|
||||
if len(single) != len(addons_list):
|
||||
raise vol.Invalid("Invalid addon list on snapshot!")
|
||||
raise vol.Invalid("Invalid addon list on snapshot!") from None
|
||||
return addons_list
|
||||
|
||||
|
||||
|
@ -87,7 +87,7 @@ class Supervisor(CoreSysAttributes):
|
||||
|
||||
except (aiohttp.ClientError, asyncio.TimeoutError) as err:
|
||||
_LOGGER.warning("Can't fetch AppArmor profile: %s", err)
|
||||
raise SupervisorError()
|
||||
raise SupervisorError() from err
|
||||
|
||||
with TemporaryDirectory(dir=self.sys_config.path_tmp) as tmp_dir:
|
||||
profile_file = Path(tmp_dir, "apparmor.txt")
|
||||
@ -95,15 +95,15 @@ class Supervisor(CoreSysAttributes):
|
||||
profile_file.write_text(data)
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't write temporary profile: %s", err)
|
||||
raise SupervisorError()
|
||||
raise SupervisorError() from err
|
||||
|
||||
try:
|
||||
await self.sys_host.apparmor.load_profile(
|
||||
"hassio-supervisor", profile_file
|
||||
)
|
||||
except HostAppArmorError:
|
||||
except HostAppArmorError as err:
|
||||
_LOGGER.error("Can't update AppArmor profile!")
|
||||
raise SupervisorError()
|
||||
raise SupervisorError() from err
|
||||
|
||||
async def update(self, version: Optional[str] = None) -> None:
|
||||
"""Update Home Assistant version."""
|
||||
@ -121,9 +121,9 @@ class Supervisor(CoreSysAttributes):
|
||||
await self.instance.update_start_tag(
|
||||
self.sys_updater.image_supervisor, version
|
||||
)
|
||||
except DockerAPIError:
|
||||
except DockerAPIError as err:
|
||||
_LOGGER.error("Update of Supervisor fails!")
|
||||
raise SupervisorUpdateError()
|
||||
raise SupervisorUpdateError() from err
|
||||
else:
|
||||
self.sys_config.version = version
|
||||
self.sys_config.save_data()
|
||||
@ -148,8 +148,8 @@ class Supervisor(CoreSysAttributes):
|
||||
"""Return stats of Supervisor."""
|
||||
try:
|
||||
return await self.instance.stats()
|
||||
except DockerAPIError:
|
||||
raise SupervisorError()
|
||||
except DockerAPIError as err:
|
||||
raise SupervisorError() from err
|
||||
|
||||
async def repair(self):
|
||||
"""Repair local Supervisor data."""
|
||||
|
@ -158,11 +158,11 @@ class Updater(JsonConfig, CoreSysAttributes):
|
||||
|
||||
except (aiohttp.ClientError, asyncio.TimeoutError) as err:
|
||||
_LOGGER.warning("Can't fetch versions from %s: %s", url, err)
|
||||
raise HassioUpdaterError()
|
||||
raise HassioUpdaterError() from err
|
||||
|
||||
except json.JSONDecodeError as err:
|
||||
_LOGGER.warning("Can't parse versions from %s: %s", url, err)
|
||||
raise HassioUpdaterError()
|
||||
raise HassioUpdaterError() from err
|
||||
|
||||
# data valid?
|
||||
if not data or data.get(ATTR_CHANNEL) != self.channel:
|
||||
@ -196,7 +196,7 @@ class Updater(JsonConfig, CoreSysAttributes):
|
||||
|
||||
except KeyError as err:
|
||||
_LOGGER.warning("Can't process version data: %s", err)
|
||||
raise HassioUpdaterError()
|
||||
raise HassioUpdaterError() from err
|
||||
|
||||
else:
|
||||
self.save_data()
|
||||
|
@ -22,7 +22,7 @@ def get_profile_name(profile_file):
|
||||
profiles.add(match.group(1))
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't read AppArmor profile: %s", err)
|
||||
raise AppArmorFileError()
|
||||
raise AppArmorFileError() from err
|
||||
|
||||
if len(profiles) != 1:
|
||||
_LOGGER.error("To many profiles inside file: %s", profiles)
|
||||
@ -54,7 +54,7 @@ def adjust_profile(profile_name, profile_file, profile_new):
|
||||
profile_data.append(line.replace(org_profile, profile_name))
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't adjust origin profile: %s", err)
|
||||
raise AppArmorFileError()
|
||||
raise AppArmorFileError() from err
|
||||
|
||||
# Write into new file
|
||||
try:
|
||||
@ -62,4 +62,4 @@ def adjust_profile(profile_name, profile_file, profile_new):
|
||||
profile.writelines(profile_data)
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't write new profile: %s", err)
|
||||
raise AppArmorFileError()
|
||||
raise AppArmorFileError() from err
|
||||
|
@ -89,7 +89,7 @@ class DBus:
|
||||
except ET.ParseError as err:
|
||||
_LOGGER.error("Can't parse introspect data: %s", err)
|
||||
_LOGGER.debug("Introspect %s on %s", self.bus_name, self.object_path)
|
||||
raise DBusParseError()
|
||||
raise DBusParseError() from err
|
||||
|
||||
# Read available methods
|
||||
for interface in xml.findall("./interface"):
|
||||
@ -137,7 +137,7 @@ class DBus:
|
||||
except json.JSONDecodeError as err:
|
||||
_LOGGER.error("Can't parse '%s': %s", json_raw, err)
|
||||
_LOGGER.debug("GVariant data: '%s'", raw)
|
||||
raise DBusParseError()
|
||||
raise DBusParseError() from err
|
||||
|
||||
@staticmethod
|
||||
def gvariant_args(args: List[Any]) -> str:
|
||||
@ -177,9 +177,9 @@ class DBus:
|
||||
"""Read all properties from interface."""
|
||||
try:
|
||||
return (await self.call_dbus(DBUS_METHOD_GETALL, interface))[0]
|
||||
except IndexError:
|
||||
except IndexError as err:
|
||||
_LOGGER.error("No attributes returned for %s", interface)
|
||||
raise DBusFatalError
|
||||
raise DBusFatalError() from err
|
||||
|
||||
async def _send(self, command: List[str]) -> str:
|
||||
"""Send command over dbus."""
|
||||
@ -196,7 +196,7 @@ class DBus:
|
||||
data, error = await proc.communicate()
|
||||
except OSError as err:
|
||||
_LOGGER.error("DBus fatal error: %s", err)
|
||||
raise DBusFatalError()
|
||||
raise DBusFatalError() from err
|
||||
|
||||
# Success?
|
||||
if proc.returncode == 0:
|
||||
@ -294,18 +294,18 @@ class DBusSignalWrapper:
|
||||
async def __anext__(self):
|
||||
"""Get next data."""
|
||||
if not self._proc:
|
||||
raise StopAsyncIteration()
|
||||
raise StopAsyncIteration() from None
|
||||
|
||||
# Read signals
|
||||
while True:
|
||||
try:
|
||||
data = await self._proc.stdout.readline()
|
||||
except asyncio.TimeoutError:
|
||||
raise StopAsyncIteration()
|
||||
raise StopAsyncIteration() from None
|
||||
|
||||
# Program close
|
||||
if not data:
|
||||
raise StopAsyncIteration()
|
||||
raise StopAsyncIteration() from None
|
||||
|
||||
# Extract metadata
|
||||
match = RE_MONITOR_OUTPUT.match(data.decode())
|
||||
@ -321,5 +321,5 @@ class DBusSignalWrapper:
|
||||
|
||||
try:
|
||||
return self.dbus.parse_gvariant(data)
|
||||
except DBusParseError:
|
||||
raise StopAsyncIteration()
|
||||
except DBusParseError as err:
|
||||
raise StopAsyncIteration() from err
|
||||
|
@ -20,7 +20,7 @@ def write_json_file(jsonfile: Path, data: Any) -> None:
|
||||
jsonfile.write_text(json.dumps(data, indent=2))
|
||||
except (OSError, ValueError, TypeError) as err:
|
||||
_LOGGER.error("Can't write %s: %s", jsonfile, err)
|
||||
raise JsonFileError()
|
||||
raise JsonFileError() from err
|
||||
|
||||
|
||||
def read_json_file(jsonfile: Path) -> Any:
|
||||
@ -29,7 +29,7 @@ def read_json_file(jsonfile: Path) -> Any:
|
||||
return json.loads(jsonfile.read_text())
|
||||
except (OSError, ValueError, TypeError, UnicodeDecodeError) as err:
|
||||
_LOGGER.error("Can't read json from %s: %s", jsonfile, err)
|
||||
raise JsonFileError()
|
||||
raise JsonFileError() from err
|
||||
|
||||
|
||||
class JsonConfig:
|
||||
|
@ -24,6 +24,6 @@ def validate_timezone(timezone):
|
||||
raise vol.Invalid(
|
||||
"Invalid time zone passed in. Valid options can be found here: "
|
||||
"http://en.wikipedia.org/wiki/List_of_tz_database_time_zones"
|
||||
)
|
||||
) from None
|
||||
|
||||
return timezone
|
||||
|
@ -64,19 +64,19 @@ def version_tag(value: Union[str, None, int, float]) -> Optional[str]:
|
||||
value = str(value)
|
||||
pkg_version.parse(value)
|
||||
except (pkg_version.InvalidVersion, TypeError):
|
||||
raise vol.Invalid(f"Invalid version format {value}")
|
||||
raise vol.Invalid(f"Invalid version format {value}") from None
|
||||
return value
|
||||
|
||||
|
||||
def dns_url(url: str) -> str:
|
||||
"""Take a DNS url (str) and validates that it matches the scheme dns://<ip address>."""
|
||||
if not url.lower().startswith("dns://"):
|
||||
raise vol.Invalid("Doesn't start with dns://")
|
||||
raise vol.Invalid("Doesn't start with dns://") from None
|
||||
address: str = url[6:] # strip the dns:// off
|
||||
try:
|
||||
ipaddress.ip_address(address) # matches ipv4 or ipv6 addresses
|
||||
except ValueError:
|
||||
raise vol.Invalid(f"Invalid DNS URL: {url}")
|
||||
raise vol.Invalid(f"Invalid DNS URL: {url}") from None
|
||||
return url
|
||||
|
||||
|
||||
@ -87,7 +87,7 @@ def validate_repository(repository: str) -> str:
|
||||
"""Validate a valid repository."""
|
||||
data = RE_REPOSITORY.match(repository)
|
||||
if not data:
|
||||
raise vol.Invalid("No valid repository format!")
|
||||
raise vol.Invalid("No valid repository format!") from None
|
||||
|
||||
# Validate URL
|
||||
# pylint: disable=no-value-for-parameter
|
||||
|
Loading…
x
Reference in New Issue
Block a user