This commit is contained in:
Franck Nijhof 2024-05-06 19:49:06 +02:00 committed by GitHub
commit a8f3b699b3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
33 changed files with 304 additions and 137 deletions

View File

@ -16,7 +16,7 @@ from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util.unit_system import METRIC_SYSTEM
from .const import DEFAULT_SCAN_INTERVAL, DOMAIN
from .const import DEFAULT_SCAN_INTERVAL, DOMAIN, MAX_RETRIES_AFTER_STARTUP
PLATFORMS: list[Platform] = [Platform.SENSOR]
@ -61,6 +61,12 @@ async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
await coordinator.async_config_entry_first_refresh()
# Once its setup and we know we are not going to delay
# the startup of Home Assistant, we can set the max attempts
# to a higher value. If the first connection attempt fails,
# Home Assistant's built-in retry logic will take over.
airthings.set_max_attempts(MAX_RETRIES_AFTER_STARTUP)
hass.data[DOMAIN][entry.entry_id] = coordinator
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)

View File

@ -7,3 +7,5 @@ VOLUME_BECQUEREL = "Bq/m³"
VOLUME_PICOCURIE = "pCi/L"
DEFAULT_SCAN_INTERVAL = 300
MAX_RETRIES_AFTER_STARTUP = 5

View File

@ -24,5 +24,5 @@
"dependencies": ["bluetooth_adapters"],
"documentation": "https://www.home-assistant.io/integrations/airthings_ble",
"iot_class": "local_polling",
"requirements": ["airthings-ble==0.8.0"]
"requirements": ["airthings-ble==0.9.0"]
}

View File

@ -35,7 +35,7 @@ from homeassistant.const import (
HTTP_BASIC_AUTHENTICATION,
Platform,
)
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import Unauthorized, UnknownUser
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
@ -177,7 +177,8 @@ class AmcrestChecker(ApiWrapper):
"""Return event flag that indicates if camera's API is responding."""
return self._async_wrap_event_flag
def _start_recovery(self) -> None:
@callback
def _async_start_recovery(self) -> None:
self.available_flag.clear()
self.async_available_flag.clear()
async_dispatcher_send(
@ -222,50 +223,98 @@ class AmcrestChecker(ApiWrapper):
yield
except LoginError as ex:
async with self._async_wrap_lock:
self._handle_offline(ex)
self._async_handle_offline(ex)
raise
except AmcrestError:
async with self._async_wrap_lock:
self._handle_error()
self._async_handle_error()
raise
async with self._async_wrap_lock:
self._set_online()
self._async_set_online()
def _handle_offline(self, ex: Exception) -> None:
def _handle_offline_thread_safe(self, ex: Exception) -> bool:
"""Handle camera offline status shared between threads and event loop.
Returns if the camera was online as a bool.
"""
with self._wrap_lock:
was_online = self.available
was_login_err = self._wrap_login_err
self._wrap_login_err = True
if not was_login_err:
_LOGGER.error("%s camera offline: Login error: %s", self._wrap_name, ex)
if was_online:
self._start_recovery()
return was_online
def _handle_error(self) -> None:
def _handle_offline(self, ex: Exception) -> None:
"""Handle camera offline status from a thread."""
if self._handle_offline_thread_safe(ex):
self._hass.loop.call_soon_threadsafe(self._async_start_recovery)
@callback
def _async_handle_offline(self, ex: Exception) -> None:
if self._handle_offline_thread_safe(ex):
self._async_start_recovery()
def _handle_error_thread_safe(self) -> bool:
"""Handle camera error status shared between threads and event loop.
Returns if the camera was online and is now offline as
a bool.
"""
with self._wrap_lock:
was_online = self.available
errs = self._wrap_errors = self._wrap_errors + 1
offline = not self.available
_LOGGER.debug("%s camera errs: %i", self._wrap_name, errs)
if was_online and offline:
_LOGGER.error("%s camera offline: Too many errors", self._wrap_name)
self._start_recovery()
return was_online and offline
def _set_online(self) -> None:
def _handle_error(self) -> None:
"""Handle camera error status from a thread."""
if self._handle_error_thread_safe():
_LOGGER.error("%s camera offline: Too many errors", self._wrap_name)
self._hass.loop.call_soon_threadsafe(self._async_start_recovery)
@callback
def _async_handle_error(self) -> None:
"""Handle camera error status from the event loop."""
if self._handle_error_thread_safe():
_LOGGER.error("%s camera offline: Too many errors", self._wrap_name)
self._async_start_recovery()
def _set_online_thread_safe(self) -> bool:
"""Set camera online status shared between threads and event loop.
Returns if the camera was offline as a bool.
"""
with self._wrap_lock:
was_offline = not self.available
self._wrap_errors = 0
self._wrap_login_err = False
if was_offline:
assert self._unsub_recheck is not None
self._unsub_recheck()
self._unsub_recheck = None
_LOGGER.error("%s camera back online", self._wrap_name)
self.available_flag.set()
self.async_available_flag.set()
async_dispatcher_send(
self._hass, service_signal(SERVICE_UPDATE, self._wrap_name)
)
return was_offline
def _set_online(self) -> None:
"""Set camera online status from a thread."""
if self._set_online_thread_safe():
self._hass.loop.call_soon_threadsafe(self._async_signal_online)
@callback
def _async_set_online(self) -> None:
"""Set camera online status from the event loop."""
if self._set_online_thread_safe():
self._async_signal_online()
@callback
def _async_signal_online(self) -> None:
"""Signal that camera is back online."""
assert self._unsub_recheck is not None
self._unsub_recheck()
self._unsub_recheck = None
_LOGGER.error("%s camera back online", self._wrap_name)
self.available_flag.set()
self.async_available_flag.set()
async_dispatcher_send(
self._hass, service_signal(SERVICE_UPDATE, self._wrap_name)
)
async def _wrap_test_online(self, now: datetime) -> None:
"""Test if camera is back online."""

View File

@ -16,7 +16,7 @@ import voluptuous as vol
from homeassistant.components.camera import Camera, CameraEntityFeature
from homeassistant.components.ffmpeg import FFmpegManager, get_ffmpeg_manager
from homeassistant.const import ATTR_ENTITY_ID, CONF_NAME, STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import (
async_aiohttp_proxy_stream,
@ -325,7 +325,8 @@ class AmcrestCam(Camera):
# Other Entity method overrides
async def async_on_demand_update(self) -> None:
@callback
def async_on_demand_update(self) -> None:
"""Update state."""
self.async_schedule_update_ha_state(True)

View File

@ -8,6 +8,6 @@
"iot_class": "local_push",
"loggers": ["androidtvremote2"],
"quality_scale": "platinum",
"requirements": ["androidtvremote2==0.0.14"],
"requirements": ["androidtvremote2==0.0.15"],
"zeroconf": ["_androidtvremote2._tcp.local."]
}

View File

@ -16,7 +16,7 @@
"requirements": [
"bleak==0.21.1",
"bleak-retry-connector==3.5.0",
"bluetooth-adapters==0.19.1",
"bluetooth-adapters==0.19.2",
"bluetooth-auto-recovery==1.4.2",
"bluetooth-data-tools==1.19.0",
"dbus-fast==2.21.1",

View File

@ -43,21 +43,21 @@ SWITCH_TYPES: dict[str, SHCSwitchEntityDescription] = {
"smartplug": SHCSwitchEntityDescription(
key="smartplug",
device_class=SwitchDeviceClass.OUTLET,
on_key="state",
on_key="switchstate",
on_value=SHCSmartPlug.PowerSwitchService.State.ON,
should_poll=False,
),
"smartplugcompact": SHCSwitchEntityDescription(
key="smartplugcompact",
device_class=SwitchDeviceClass.OUTLET,
on_key="state",
on_key="switchstate",
on_value=SHCSmartPlugCompact.PowerSwitchService.State.ON,
should_poll=False,
),
"lightswitch": SHCSwitchEntityDescription(
key="lightswitch",
device_class=SwitchDeviceClass.SWITCH,
on_key="state",
on_key="switchstate",
on_value=SHCLightSwitch.PowerSwitchService.State.ON,
should_poll=False,
),

View File

@ -142,6 +142,9 @@ async def websocket_list_agents(
agent = manager.async_get_agent(agent_info.id)
assert agent is not None
if isinstance(agent, ConversationEntity):
continue
supported_languages = agent.supported_languages
if language and supported_languages != MATCH_ALL:
supported_languages = language_util.matches(

View File

@ -20,5 +20,5 @@
"documentation": "https://www.home-assistant.io/integrations/frontend",
"integration_type": "system",
"quality_scale": "internal",
"requirements": ["home-assistant-frontend==20240501.0"]
"requirements": ["home-assistant-frontend==20240501.1"]
}

View File

@ -6,5 +6,5 @@
"documentation": "https://www.home-assistant.io/integrations/goodwe",
"iot_class": "local_polling",
"loggers": ["goodwe"],
"requirements": ["goodwe==0.3.2"]
"requirements": ["goodwe==0.3.4"]
}

View File

@ -75,7 +75,7 @@ CONFIG_SCHEMA = vol.Schema(
vol.Optional(CONF_FOLDER, default="INBOX"): str,
vol.Optional(CONF_SEARCH, default="UnSeen UnDeleted"): str,
# The default for new entries is to not include text and headers
vol.Optional(CONF_EVENT_MESSAGE_DATA, default=[]): cv.ensure_list,
vol.Optional(CONF_EVENT_MESSAGE_DATA, default=[]): EVENT_MESSAGE_DATA_SELECTOR,
}
)
CONFIG_SCHEMA_ADVANCED = {

View File

@ -106,4 +106,4 @@ class LutronEventEntity(LutronKeypad, EventEntity):
}
self.hass.bus.fire("lutron_event", data)
self._trigger_event(action)
self.async_write_ha_state()
self.schedule_update_ha_state()

View File

@ -84,7 +84,7 @@ if TYPE_CHECKING:
_LOGGER = logging.getLogger(__name__)
DISCOVERY_COOLDOWN = 5
INITIAL_SUBSCRIBE_COOLDOWN = 1.0
INITIAL_SUBSCRIBE_COOLDOWN = 3.0
SUBSCRIBE_COOLDOWN = 0.1
UNSUBSCRIBE_COOLDOWN = 0.1
TIMEOUT_ACK = 10
@ -891,6 +891,7 @@ class MQTT:
qos=birth_message.qos,
retain=birth_message.retain,
)
_LOGGER.info("MQTT client initialized, birth message sent")
@callback
def _async_mqtt_on_connect(
@ -950,6 +951,7 @@ class MQTT:
name="mqtt re-subscribe",
)
self._subscribe_debouncer.set_timeout(SUBSCRIBE_COOLDOWN)
_LOGGER.info("MQTT client initialized")
self._async_connection_result(True)

View File

@ -69,7 +69,6 @@ ELEC_SENSORS: tuple[OpowerEntityDescription, ...] = (
name="Current bill electric cost to date",
device_class=SensorDeviceClass.MONETARY,
native_unit_of_measurement="USD",
suggested_unit_of_measurement="USD",
state_class=SensorStateClass.TOTAL,
suggested_display_precision=0,
value_fn=lambda data: data.cost_to_date,
@ -79,7 +78,6 @@ ELEC_SENSORS: tuple[OpowerEntityDescription, ...] = (
name="Current bill electric forecasted cost",
device_class=SensorDeviceClass.MONETARY,
native_unit_of_measurement="USD",
suggested_unit_of_measurement="USD",
state_class=SensorStateClass.TOTAL,
suggested_display_precision=0,
value_fn=lambda data: data.forecasted_cost,
@ -89,7 +87,6 @@ ELEC_SENSORS: tuple[OpowerEntityDescription, ...] = (
name="Typical monthly electric cost",
device_class=SensorDeviceClass.MONETARY,
native_unit_of_measurement="USD",
suggested_unit_of_measurement="USD",
state_class=SensorStateClass.TOTAL,
suggested_display_precision=0,
value_fn=lambda data: data.typical_cost,
@ -101,7 +98,6 @@ GAS_SENSORS: tuple[OpowerEntityDescription, ...] = (
name="Current bill gas usage to date",
device_class=SensorDeviceClass.GAS,
native_unit_of_measurement=UnitOfVolume.CENTUM_CUBIC_FEET,
suggested_unit_of_measurement=UnitOfVolume.CENTUM_CUBIC_FEET,
state_class=SensorStateClass.TOTAL,
suggested_display_precision=0,
value_fn=lambda data: data.usage_to_date,
@ -111,7 +107,6 @@ GAS_SENSORS: tuple[OpowerEntityDescription, ...] = (
name="Current bill gas forecasted usage",
device_class=SensorDeviceClass.GAS,
native_unit_of_measurement=UnitOfVolume.CENTUM_CUBIC_FEET,
suggested_unit_of_measurement=UnitOfVolume.CENTUM_CUBIC_FEET,
state_class=SensorStateClass.TOTAL,
suggested_display_precision=0,
value_fn=lambda data: data.forecasted_usage,
@ -121,7 +116,6 @@ GAS_SENSORS: tuple[OpowerEntityDescription, ...] = (
name="Typical monthly gas usage",
device_class=SensorDeviceClass.GAS,
native_unit_of_measurement=UnitOfVolume.CENTUM_CUBIC_FEET,
suggested_unit_of_measurement=UnitOfVolume.CENTUM_CUBIC_FEET,
state_class=SensorStateClass.TOTAL,
suggested_display_precision=0,
value_fn=lambda data: data.typical_usage,
@ -131,7 +125,6 @@ GAS_SENSORS: tuple[OpowerEntityDescription, ...] = (
name="Current bill gas cost to date",
device_class=SensorDeviceClass.MONETARY,
native_unit_of_measurement="USD",
suggested_unit_of_measurement="USD",
state_class=SensorStateClass.TOTAL,
suggested_display_precision=0,
value_fn=lambda data: data.cost_to_date,
@ -141,7 +134,6 @@ GAS_SENSORS: tuple[OpowerEntityDescription, ...] = (
name="Current bill gas forecasted cost",
device_class=SensorDeviceClass.MONETARY,
native_unit_of_measurement="USD",
suggested_unit_of_measurement="USD",
state_class=SensorStateClass.TOTAL,
suggested_display_precision=0,
value_fn=lambda data: data.forecasted_cost,
@ -151,7 +143,6 @@ GAS_SENSORS: tuple[OpowerEntityDescription, ...] = (
name="Typical monthly gas cost",
device_class=SensorDeviceClass.MONETARY,
native_unit_of_measurement="USD",
suggested_unit_of_measurement="USD",
state_class=SensorStateClass.TOTAL,
suggested_display_precision=0,
value_fn=lambda data: data.typical_cost,

View File

@ -46,7 +46,7 @@ class RadarrDataUpdateCoordinator(DataUpdateCoordinator[T], Generic[T], ABC):
"""Data update coordinator for the Radarr integration."""
config_entry: ConfigEntry
update_interval = timedelta(seconds=30)
_update_interval = timedelta(seconds=30)
def __init__(
self,
@ -59,7 +59,7 @@ class RadarrDataUpdateCoordinator(DataUpdateCoordinator[T], Generic[T], ABC):
hass=hass,
logger=LOGGER,
name=DOMAIN,
update_interval=self.update_interval,
update_interval=self._update_interval,
)
self.api_client = api_client
self.host_configuration = host_configuration
@ -133,7 +133,7 @@ class QueueDataUpdateCoordinator(RadarrDataUpdateCoordinator):
class CalendarUpdateCoordinator(RadarrDataUpdateCoordinator[None]):
"""Calendar update coordinator."""
update_interval = timedelta(hours=1)
_update_interval = timedelta(hours=1)
def __init__(
self,

View File

@ -485,6 +485,12 @@ def compile_statistics(instance: Recorder, start: datetime, fire_events: bool) -
The actual calculation is delegated to the platforms.
"""
# Define modified_statistic_ids outside of the "with" statement as
# _compile_statistics may raise and be trapped by
# filter_unique_constraint_integrity_error which would make
# modified_statistic_ids unbound.
modified_statistic_ids: set[str] | None = None
# Return if we already have 5-minute statistics for the requested period
with session_scope(
session=instance.get_session(),

View File

@ -285,6 +285,9 @@ async def async_setup_platform(
class StatisticsSensor(SensorEntity):
"""Representation of a Statistics sensor."""
_attr_should_poll = False
_attr_icon = ICON
def __init__(
self,
source_entity_id: str,
@ -298,9 +301,7 @@ class StatisticsSensor(SensorEntity):
percentile: int,
) -> None:
"""Initialize the Statistics sensor."""
self._attr_icon: str = ICON
self._attr_name: str = name
self._attr_should_poll: bool = False
self._attr_unique_id: str | None = unique_id
self._source_entity_id: str = source_entity_id
self.is_binary: bool = (
@ -326,35 +327,37 @@ class StatisticsSensor(SensorEntity):
self._update_listener: CALLBACK_TYPE | None = None
@callback
def _async_stats_sensor_state_listener(
self,
event: Event[EventStateChangedData],
) -> None:
"""Handle the sensor state changes."""
if (new_state := event.data["new_state"]) is None:
return
self._add_state_to_queue(new_state)
self._async_purge_update_and_schedule()
self.async_write_ha_state()
@callback
def _async_stats_sensor_startup(self, _: HomeAssistant) -> None:
"""Add listener and get recorded state."""
_LOGGER.debug("Startup for %s", self.entity_id)
self.async_on_remove(
async_track_state_change_event(
self.hass,
[self._source_entity_id],
self._async_stats_sensor_state_listener,
)
)
if "recorder" in self.hass.config.components:
self.hass.async_create_task(self._initialize_from_database())
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
@callback
def async_stats_sensor_state_listener(
event: Event[EventStateChangedData],
) -> None:
"""Handle the sensor state changes."""
if (new_state := event.data["new_state"]) is None:
return
self._add_state_to_queue(new_state)
self.async_schedule_update_ha_state(True)
async def async_stats_sensor_startup(_: HomeAssistant) -> None:
"""Add listener and get recorded state."""
_LOGGER.debug("Startup for %s", self.entity_id)
self.async_on_remove(
async_track_state_change_event(
self.hass,
[self._source_entity_id],
async_stats_sensor_state_listener,
)
)
if "recorder" in self.hass.config.components:
self.hass.async_create_task(self._initialize_from_database())
self.async_on_remove(async_at_start(self.hass, async_stats_sensor_startup))
self.async_on_remove(
async_at_start(self.hass, self._async_stats_sensor_startup)
)
def _add_state_to_queue(self, new_state: State) -> None:
"""Add the state to the queue."""
@ -499,7 +502,8 @@ class StatisticsSensor(SensorEntity):
self.ages.popleft()
self.states.popleft()
def _next_to_purge_timestamp(self) -> datetime | None:
@callback
def _async_next_to_purge_timestamp(self) -> datetime | None:
"""Find the timestamp when the next purge would occur."""
if self.ages and self._samples_max_age:
if self.samples_keep_last and len(self.ages) == 1:
@ -521,6 +525,10 @@ class StatisticsSensor(SensorEntity):
async def async_update(self) -> None:
"""Get the latest data and updates the states."""
self._async_purge_update_and_schedule()
def _async_purge_update_and_schedule(self) -> None:
"""Purge old states, update the sensor and schedule the next update."""
_LOGGER.debug("%s: updating statistics", self.entity_id)
if self._samples_max_age is not None:
self._purge_old_states(self._samples_max_age)
@ -531,23 +539,28 @@ class StatisticsSensor(SensorEntity):
# If max_age is set, ensure to update again after the defined interval.
# By basing updates off the timestamps of sampled data we avoid updating
# when none of the observed entities change.
if timestamp := self._next_to_purge_timestamp():
if timestamp := self._async_next_to_purge_timestamp():
_LOGGER.debug("%s: scheduling update at %s", self.entity_id, timestamp)
if self._update_listener:
self._update_listener()
self._update_listener = None
@callback
def _scheduled_update(now: datetime) -> None:
"""Timer callback for sensor update."""
_LOGGER.debug("%s: executing scheduled update", self.entity_id)
self.async_schedule_update_ha_state(True)
self._update_listener = None
self._async_cancel_update_listener()
self._update_listener = async_track_point_in_utc_time(
self.hass, _scheduled_update, timestamp
self.hass, self._async_scheduled_update, timestamp
)
@callback
def _async_cancel_update_listener(self) -> None:
"""Cancel the scheduled update listener."""
if self._update_listener:
self._update_listener()
self._update_listener = None
@callback
def _async_scheduled_update(self, now: datetime) -> None:
"""Timer callback for sensor update."""
_LOGGER.debug("%s: executing scheduled update", self.entity_id)
self._async_cancel_update_listener()
self._async_purge_update_and_schedule()
self.async_write_ha_state()
def _fetch_states_from_database(self) -> list[State]:
"""Fetch the states from the database."""
_LOGGER.debug("%s: initializing values from the database", self.entity_id)
@ -589,8 +602,8 @@ class StatisticsSensor(SensorEntity):
for state in reversed(states):
self._add_state_to_queue(state)
self.async_schedule_update_ha_state(True)
self._async_purge_update_and_schedule()
self.async_write_ha_state()
_LOGGER.debug("%s: initializing from database completed", self.entity_id)
def _update_attributes(self) -> None:

View File

@ -7,6 +7,7 @@ import logging
from synology_dsm.api.surveillance_station import SynoSurveillanceStation
from synology_dsm.api.surveillance_station.camera import SynoCamera
from synology_dsm.exceptions import SynologyDSMNotLoggedInException
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_MAC, CONF_VERIFY_SSL
@ -69,7 +70,10 @@ async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
await api.async_setup()
except SYNOLOGY_AUTH_FAILED_EXCEPTIONS as err:
raise_config_entry_auth_error(err)
except SYNOLOGY_CONNECTION_EXCEPTIONS as err:
except (*SYNOLOGY_CONNECTION_EXCEPTIONS, SynologyDSMNotLoggedInException) as err:
# SynologyDSMNotLoggedInException may be raised even if the user is
# logged in because the session may have expired, and we need to retry
# the login later.
if err.args[0] and isinstance(err.args[0], dict):
details = err.args[0].get(EXCEPTION_DETAILS, EXCEPTION_UNKNOWN)
else:
@ -86,12 +90,9 @@ async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
)
coordinator_central = SynologyDSMCentralUpdateCoordinator(hass, entry, api)
await coordinator_central.async_config_entry_first_refresh()
available_apis = api.dsm.apis
# The central coordinator needs to be refreshed first since
# the next two rely on data from it
coordinator_cameras: SynologyDSMCameraUpdateCoordinator | None = None
if api.surveillance_station is not None:
coordinator_cameras = SynologyDSMCameraUpdateCoordinator(hass, entry, api)

View File

@ -2,6 +2,7 @@
from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import suppress
import logging
@ -38,6 +39,7 @@ from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
CONF_DEVICE_TOKEN,
DEFAULT_TIMEOUT,
EXCEPTION_DETAILS,
EXCEPTION_UNKNOWN,
SYNOLOGY_CONNECTION_EXCEPTIONS,
@ -82,6 +84,31 @@ class SynoApi:
self._with_upgrade = True
self._with_utilisation = True
self._login_future: asyncio.Future[None] | None = None
async def async_login(self) -> None:
"""Login to the Synology DSM API.
This function will only login once if called multiple times
by multiple different callers.
If a login is already in progress, the function will await the
login to complete before returning.
"""
if self._login_future:
return await self._login_future
self._login_future = self._hass.loop.create_future()
try:
await self.dsm.login()
self._login_future.set_result(None)
except BaseException as err:
if not self._login_future.done():
self._login_future.set_exception(err)
raise
finally:
self._login_future = None
async def async_setup(self) -> None:
"""Start interacting with the NAS."""
session = async_get_clientsession(self._hass, self._entry.data[CONF_VERIFY_SSL])
@ -92,10 +119,10 @@ class SynoApi:
self._entry.data[CONF_USERNAME],
self._entry.data[CONF_PASSWORD],
self._entry.data[CONF_SSL],
timeout=self._entry.options.get(CONF_TIMEOUT) or 10,
timeout=self._entry.options.get(CONF_TIMEOUT) or DEFAULT_TIMEOUT,
device_token=self._entry.data.get(CONF_DEVICE_TOKEN),
)
await self.dsm.login()
await self.async_login()
# check if surveillance station is used
self._with_surveillance_station = bool(

View File

@ -179,7 +179,9 @@ class SynologyDSMFlowHandler(ConfigFlow, domain=DOMAIN):
port = DEFAULT_PORT
session = async_get_clientsession(self.hass, verify_ssl)
api = SynologyDSM(session, host, port, username, password, use_ssl, timeout=30)
api = SynologyDSM(
session, host, port, username, password, use_ssl, timeout=DEFAULT_TIMEOUT
)
errors = {}
try:

View File

@ -40,7 +40,7 @@ DEFAULT_PORT = 5000
DEFAULT_PORT_SSL = 5001
# Options
DEFAULT_SCAN_INTERVAL = 15 # min
DEFAULT_TIMEOUT = 10 # sec
DEFAULT_TIMEOUT = 30 # sec
DEFAULT_SNAPSHOT_QUALITY = SNAPSHOT_PROFILE_BALANCED
ENTITY_UNIT_LOAD = "load"

View File

@ -2,9 +2,10 @@
from __future__ import annotations
from collections.abc import Awaitable, Callable, Coroutine
from datetime import timedelta
import logging
from typing import Any, TypeVar
from typing import Any, Concatenate, ParamSpec, TypeVar
from synology_dsm.api.surveillance_station.camera import SynoCamera
from synology_dsm.exceptions import (
@ -30,6 +31,36 @@ _LOGGER = logging.getLogger(__name__)
_DataT = TypeVar("_DataT")
_T = TypeVar("_T", bound="SynologyDSMUpdateCoordinator")
_P = ParamSpec("_P")
def async_re_login_on_expired(
func: Callable[Concatenate[_T, _P], Awaitable[_DataT]],
) -> Callable[Concatenate[_T, _P], Coroutine[Any, Any, _DataT]]:
"""Define a wrapper to re-login when expired."""
async def _async_wrap(self: _T, *args: _P.args, **kwargs: _P.kwargs) -> _DataT:
for attempts in range(2):
try:
return await func(self, *args, **kwargs)
except SynologyDSMNotLoggedInException:
# If login is expired, try to login again
_LOGGER.debug("login is expired, try to login again")
try:
await self.api.async_login()
except SYNOLOGY_AUTH_FAILED_EXCEPTIONS as err:
raise_config_entry_auth_error(err)
if attempts == 0:
continue
except SYNOLOGY_CONNECTION_EXCEPTIONS as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
raise UpdateFailed("Unknown error when communicating with API")
return _async_wrap
class SynologyDSMUpdateCoordinator(DataUpdateCoordinator[_DataT]):
"""DataUpdateCoordinator base class for synology_dsm."""
@ -72,6 +103,7 @@ class SynologyDSMSwitchUpdateCoordinator(
assert info is not None
self.version = info["data"]["CMSMinVersion"]
@async_re_login_on_expired
async def _async_update_data(self) -> dict[str, dict[str, Any]]:
"""Fetch all data from api."""
surveillance_station = self.api.surveillance_station
@ -102,21 +134,10 @@ class SynologyDSMCentralUpdateCoordinator(SynologyDSMUpdateCoordinator[None]):
),
)
@async_re_login_on_expired
async def _async_update_data(self) -> None:
"""Fetch all data from api."""
for attempts in range(2):
try:
await self.api.async_update()
except SynologyDSMNotLoggedInException:
# If login is expired, try to login again
try:
await self.api.dsm.login()
except SYNOLOGY_AUTH_FAILED_EXCEPTIONS as err:
raise_config_entry_auth_error(err)
if attempts == 0:
continue
except SYNOLOGY_CONNECTION_EXCEPTIONS as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
await self.api.async_update()
class SynologyDSMCameraUpdateCoordinator(
@ -133,6 +154,7 @@ class SynologyDSMCameraUpdateCoordinator(
"""Initialize DataUpdateCoordinator for cameras."""
super().__init__(hass, entry, api, timedelta(seconds=30))
@async_re_login_on_expired
async def _async_update_data(self) -> dict[str, dict[int, SynoCamera]]:
"""Fetch all camera data from api."""
surveillance_station = self.api.surveillance_station

View File

@ -23,7 +23,7 @@ if TYPE_CHECKING:
APPLICATION_NAME: Final = "HomeAssistant"
MAJOR_VERSION: Final = 2024
MINOR_VERSION: Final = 5
PATCH_VERSION: Final = "1"
PATCH_VERSION: Final = "2"
__short_version__: Final = f"{MAJOR_VERSION}.{MINOR_VERSION}"
__version__: Final = f"{__short_version__}.{PATCH_VERSION}"
REQUIRED_PYTHON_VER: Final[tuple[int, int, int]] = (3, 12, 0)

View File

@ -352,6 +352,18 @@ class FlowManager(abc.ABC, Generic[_FlowResultT, _HandlerT]):
) -> _FlowResultT:
"""Continue a data entry flow."""
result: _FlowResultT | None = None
# Workaround for flow handlers which have not been upgraded to pass a show
# progress task, needed because of the change to eager tasks in HA Core 2024.5,
# can be removed in HA Core 2024.8.
flow = self._progress.get(flow_id)
if flow and flow.deprecated_show_progress:
if (cur_step := flow.cur_step) and cur_step[
"type"
] == FlowResultType.SHOW_PROGRESS:
# Allow the progress task to finish before we call the flow handler
await asyncio.sleep(0)
while not result or result["type"] == FlowResultType.SHOW_PROGRESS_DONE:
result = await self._async_configure(flow_id, user_input)
flow = self._progress.get(flow_id)

View File

@ -1436,12 +1436,18 @@ class _TrackPointUTCTime:
"""Initialize track job."""
loop = self.hass.loop
self._cancel_callback = loop.call_at(
loop.time() + self.expected_fire_timestamp - time.time(), self._run_action
loop.time() + self.expected_fire_timestamp - time.time(), self
)
@callback
def _run_action(self) -> None:
"""Call the action."""
def __call__(self) -> None:
"""Call the action.
We implement this as __call__ so when debug logging logs the object
it shows the name of the job. This is especially helpful when asyncio
debug logging is enabled as we can see the name of the job that is
being called that is blocking the event loop.
"""
# Depending on the available clock support (including timer hardware
# and the OS kernel) it can happen that we fire a little bit too early
# as measured by utcnow(). That is bad when callbacks have assumptions
@ -1450,7 +1456,7 @@ class _TrackPointUTCTime:
if (delta := (self.expected_fire_timestamp - time_tracker_timestamp())) > 0:
_LOGGER.debug("Called %f seconds too early, rearming", delta)
loop = self.hass.loop
self._cancel_callback = loop.call_at(loop.time() + delta, self._run_action)
self._cancel_callback = loop.call_at(loop.time() + delta, self)
return
self.hass.async_run_hass_job(self.job, self.utc_point_in_time)

View File

@ -4,7 +4,7 @@ aiodhcpwatcher==1.0.0
aiodiscover==2.1.0
aiodns==3.2.0
aiohttp-fast-url-dispatcher==0.3.0
aiohttp-isal==0.2.0
aiohttp-isal==0.3.1
aiohttp==3.9.5
aiohttp_cors==0.7.0
aiohttp_session==2.12.0
@ -17,7 +17,7 @@ awesomeversion==24.2.0
bcrypt==4.1.2
bleak-retry-connector==3.5.0
bleak==0.21.1
bluetooth-adapters==0.19.1
bluetooth-adapters==0.19.2
bluetooth-auto-recovery==1.4.2
bluetooth-data-tools==1.19.0
cached_ipaddress==0.3.0
@ -32,7 +32,7 @@ habluetooth==2.8.1
hass-nabucasa==0.78.0
hassil==1.6.1
home-assistant-bluetooth==1.12.0
home-assistant-frontend==20240501.0
home-assistant-frontend==20240501.1
home-assistant-intents==2024.4.24
httpx==0.27.0
ifaddr==0.2.0

View File

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "homeassistant"
version = "2024.5.1"
version = "2024.5.2"
license = {text = "Apache-2.0"}
description = "Open-source home automation platform running on Python 3."
readme = "README.rst"
@ -28,7 +28,7 @@ dependencies = [
"aiohttp_cors==0.7.0",
"aiohttp_session==2.12.0",
"aiohttp-fast-url-dispatcher==0.3.0",
"aiohttp-isal==0.2.0",
"aiohttp-isal==0.3.1",
"astral==2.2",
"async-interrupt==1.1.1",
"attrs==23.2.0",

View File

@ -8,7 +8,7 @@ aiohttp==3.9.5
aiohttp_cors==0.7.0
aiohttp_session==2.12.0
aiohttp-fast-url-dispatcher==0.3.0
aiohttp-isal==0.2.0
aiohttp-isal==0.3.1
astral==2.2
async-interrupt==1.1.1
attrs==23.2.0

View File

@ -413,7 +413,7 @@ aioymaps==1.2.2
airly==1.1.0
# homeassistant.components.airthings_ble
airthings-ble==0.8.0
airthings-ble==0.9.0
# homeassistant.components.airthings
airthings-cloud==0.2.0
@ -437,7 +437,7 @@ amcrest==1.9.8
androidtv[async]==0.0.73
# homeassistant.components.androidtv_remote
androidtvremote2==0.0.14
androidtvremote2==0.0.15
# homeassistant.components.anel_pwrctrl
anel-pwrctrl-homeassistant==0.0.1.dev2
@ -579,7 +579,7 @@ bluemaestro-ble==0.2.3
# bluepy==1.3.0
# homeassistant.components.bluetooth
bluetooth-adapters==0.19.1
bluetooth-adapters==0.19.2
# homeassistant.components.bluetooth
bluetooth-auto-recovery==1.4.2
@ -952,7 +952,7 @@ glances-api==0.6.0
goalzero==0.2.2
# homeassistant.components.goodwe
goodwe==0.3.2
goodwe==0.3.4
# homeassistant.components.google_mail
# homeassistant.components.google_tasks
@ -1078,7 +1078,7 @@ hole==0.8.0
holidays==0.47
# homeassistant.components.frontend
home-assistant-frontend==20240501.0
home-assistant-frontend==20240501.1
# homeassistant.components.conversation
home-assistant-intents==2024.4.24

View File

@ -386,7 +386,7 @@ aioymaps==1.2.2
airly==1.1.0
# homeassistant.components.airthings_ble
airthings-ble==0.8.0
airthings-ble==0.9.0
# homeassistant.components.airthings
airthings-cloud==0.2.0
@ -404,7 +404,7 @@ amberelectric==1.1.0
androidtv[async]==0.0.73
# homeassistant.components.androidtv_remote
androidtvremote2==0.0.14
androidtvremote2==0.0.15
# homeassistant.components.anova
anova-wifi==0.10.0
@ -494,7 +494,7 @@ bluecurrent-api==1.2.3
bluemaestro-ble==0.2.3
# homeassistant.components.bluetooth
bluetooth-adapters==0.19.1
bluetooth-adapters==0.19.2
# homeassistant.components.bluetooth
bluetooth-auto-recovery==1.4.2
@ -781,7 +781,7 @@ glances-api==0.6.0
goalzero==0.2.2
# homeassistant.components.goodwe
goodwe==0.3.2
goodwe==0.3.4
# homeassistant.components.google_mail
# homeassistant.components.google_tasks
@ -880,7 +880,7 @@ hole==0.8.0
holidays==0.47
# homeassistant.components.frontend
home-assistant-frontend==20240501.0
home-assistant-frontend==20240501.1
# homeassistant.components.conversation
home-assistant-intents==2024.4.24

View File

@ -4819,3 +4819,21 @@ async def test_track_state_change_deprecated(
"of `async_track_state_change_event` which is deprecated and "
"will be removed in Home Assistant 2025.5. Please report this issue."
) in caplog.text
async def test_track_point_in_time_repr(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture
) -> None:
"""Test track point in time."""
@ha.callback
def _raise_exception(_):
raise RuntimeError("something happened and its poorly described")
async_track_point_in_utc_time(hass, _raise_exception, dt_util.utcnow())
async_fire_time_changed(hass)
await hass.async_block_till_done(wait_background_tasks=True)
assert "Exception in callback _TrackPointUTCTime" in caplog.text
assert "._raise_exception" in caplog.text
await hass.async_block_till_done(wait_background_tasks=True)

View File

@ -7,6 +7,12 @@ IGNORE_UNCAUGHT_EXCEPTIONS = [
"tests.test_runner",
"test_unhandled_exception_traceback",
),
(
# This test explicitly throws an uncaught exception
# and should not be removed.
"tests.helpers.test_event",
"test_track_point_in_time_repr",
),
(
"test_homeassistant_bridge",
"test_homeassistant_bridge_fan_setup",