mirror of
https://github.com/home-assistant/core.git
synced 2025-07-21 20:27:08 +00:00
Collection of code styling tweaks (#87344)
This commit is contained in:
parent
3ccd0ef013
commit
fcb612cd6f
@ -20,7 +20,7 @@ class AcmedaBase(entity.Entity):
|
|||||||
self.roller = roller
|
self.roller = roller
|
||||||
|
|
||||||
async def async_remove_and_unregister(self) -> None:
|
async def async_remove_and_unregister(self) -> None:
|
||||||
"""Unregister from entity and device registry and call entity remove function."""
|
"""Unregister from registries and call entity remove function."""
|
||||||
LOGGER.error("Removing %s %s", self.__class__.__name__, self.unique_id)
|
LOGGER.error("Removing %s %s", self.__class__.__name__, self.unique_id)
|
||||||
|
|
||||||
ent_registry = er.async_get(self.hass)
|
ent_registry = er.async_get(self.hass)
|
||||||
|
@ -54,7 +54,10 @@ async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
|
|||||||
)
|
)
|
||||||
|
|
||||||
def error_handle_factory(func):
|
def error_handle_factory(func):
|
||||||
"""Return the provided API function wrapped in an error handler and coordinator refresh."""
|
"""Return the provided API function wrapped.
|
||||||
|
|
||||||
|
Adds an error handler and coordinator refresh.
|
||||||
|
"""
|
||||||
|
|
||||||
async def error_handle(param):
|
async def error_handle(param):
|
||||||
try:
|
try:
|
||||||
|
@ -228,7 +228,8 @@ class GrowattData:
|
|||||||
)
|
)
|
||||||
|
|
||||||
mix_detail = self.api.mix_detail(self.device_id, self.plant_id)
|
mix_detail = self.api.mix_detail(self.device_id, self.plant_id)
|
||||||
# Get the chart data and work out the time of the last entry, use this as the last time data was published to the Growatt Server
|
# Get the chart data and work out the time of the last entry, use this
|
||||||
|
# as the last time data was published to the Growatt Server
|
||||||
mix_chart_entries = mix_detail["chartData"]
|
mix_chart_entries = mix_detail["chartData"]
|
||||||
sorted_keys = sorted(mix_chart_entries)
|
sorted_keys = sorted(mix_chart_entries)
|
||||||
|
|
||||||
@ -239,12 +240,15 @@ class GrowattData:
|
|||||||
date_now, last_updated_time, dt.DEFAULT_TIME_ZONE
|
date_now, last_updated_time, dt.DEFAULT_TIME_ZONE
|
||||||
)
|
)
|
||||||
|
|
||||||
# Dashboard data is largely inaccurate for mix system but it is the only call with the ability to return the combined
|
# Dashboard data is largely inaccurate for mix system but it is the only
|
||||||
# imported from grid value that is the combination of charging AND load consumption
|
# call with the ability to return the combined imported from grid value
|
||||||
|
# that is the combination of charging AND load consumption
|
||||||
dashboard_data = self.api.dashboard_data(self.plant_id)
|
dashboard_data = self.api.dashboard_data(self.plant_id)
|
||||||
# Dashboard values have units e.g. "kWh" as part of their returned string, so we remove it
|
# Dashboard values have units e.g. "kWh" as part of their returned
|
||||||
|
# string, so we remove it
|
||||||
dashboard_values_for_mix = {
|
dashboard_values_for_mix = {
|
||||||
# etouser is already used by the results from 'mix_detail' so we rebrand it as 'etouser_combined'
|
# etouser is already used by the results from 'mix_detail' so we
|
||||||
|
# rebrand it as 'etouser_combined'
|
||||||
"etouser_combined": float(
|
"etouser_combined": float(
|
||||||
dashboard_data["etouser"].replace("kWh", "")
|
dashboard_data["etouser"].replace("kWh", "")
|
||||||
)
|
)
|
||||||
@ -297,10 +301,11 @@ class GrowattData:
|
|||||||
)
|
)
|
||||||
diff = float(api_value) - float(previous_value)
|
diff = float(api_value) - float(previous_value)
|
||||||
|
|
||||||
# Check if the value has dropped (negative value i.e. < 0) and it has only dropped by a
|
# Check if the value has dropped (negative value i.e. < 0) and it has only
|
||||||
# small amount, if so, use the previous value.
|
# dropped by a small amount, if so, use the previous value.
|
||||||
# Note - The energy dashboard takes care of drops within 10% of the current value,
|
# Note - The energy dashboard takes care of drops within 10%
|
||||||
# however if the value is low e.g. 0.2 and drops by 0.1 it classes as a reset.
|
# of the current value, however if the value is low e.g. 0.2
|
||||||
|
# and drops by 0.1 it classes as a reset.
|
||||||
if -(entity_description.previous_value_drop_threshold) <= diff < 0:
|
if -(entity_description.previous_value_drop_threshold) <= diff < 0:
|
||||||
_LOGGER.debug(
|
_LOGGER.debug(
|
||||||
(
|
(
|
||||||
@ -317,18 +322,19 @@ class GrowattData:
|
|||||||
"%s - No drop detected, using API value", entity_description.name
|
"%s - No drop detected, using API value", entity_description.name
|
||||||
)
|
)
|
||||||
|
|
||||||
# Lifetime total values should always be increasing, they will never reset, however
|
# Lifetime total values should always be increasing, they will never reset,
|
||||||
# the API sometimes returns 0 values when the clock turns to 00:00 local time
|
# however the API sometimes returns 0 values when the clock turns to 00:00
|
||||||
# in that scenario we should just return the previous value
|
# local time in that scenario we should just return the previous value
|
||||||
# Scenarios:
|
# Scenarios:
|
||||||
# 1 - System has a genuine 0 value when it it first commissioned:
|
# 1 - System has a genuine 0 value when it it first commissioned:
|
||||||
# - will return 0 until a non-zero value is registered
|
# - will return 0 until a non-zero value is registered
|
||||||
# 2 - System has been running fine but temporarily resets to 0 briefly at midnight:
|
# 2 - System has been running fine but temporarily resets to 0 briefly
|
||||||
|
# at midnight:
|
||||||
# - will return the previous value
|
# - will return the previous value
|
||||||
# 3 - HA is restarted during the midnight 'outage' - Not handled:
|
# 3 - HA is restarted during the midnight 'outage' - Not handled:
|
||||||
# - Previous value will not exist meaning 0 will be returned
|
# - Previous value will not exist meaning 0 will be returned
|
||||||
# - This is an edge case that would be better handled by looking up the previous
|
# - This is an edge case that would be better handled by looking
|
||||||
# value of the entity from the recorder
|
# up the previous value of the entity from the recorder
|
||||||
if entity_description.never_resets and api_value == 0 and previous_value:
|
if entity_description.never_resets and api_value == 0 and previous_value:
|
||||||
_LOGGER.debug(
|
_LOGGER.debug(
|
||||||
(
|
(
|
||||||
|
@ -200,7 +200,8 @@ class ImageServeView(HomeAssistantView):
|
|||||||
|
|
||||||
if not target_file.is_file():
|
if not target_file.is_file():
|
||||||
async with self.transform_lock:
|
async with self.transform_lock:
|
||||||
# Another check in case another request already finished it while waiting
|
# Another check in case another request already
|
||||||
|
# finished it while waiting
|
||||||
if not target_file.is_file():
|
if not target_file.is_file():
|
||||||
await hass.async_add_executor_job(
|
await hass.async_add_executor_job(
|
||||||
_generate_thumbnail,
|
_generate_thumbnail,
|
||||||
|
@ -85,9 +85,9 @@ class MinecraftServerConfigFlow(ConfigFlow, domain=DOMAIN):
|
|||||||
unique_id = ""
|
unique_id = ""
|
||||||
title = f"{host}:{port}"
|
title = f"{host}:{port}"
|
||||||
if ip_address is not None:
|
if ip_address is not None:
|
||||||
# Since IP addresses can change and therefore are not allowed in a
|
# Since IP addresses can change and therefore are not allowed
|
||||||
# unique_id, fall back to the MAC address and port (to support
|
# in a unique_id, fall back to the MAC address and port (to
|
||||||
# servers with same MAC address but different ports).
|
# support servers with same MAC address but different ports).
|
||||||
unique_id = f"{mac_address}-{port}"
|
unique_id = f"{mac_address}-{port}"
|
||||||
if ip_address.version == 6:
|
if ip_address.version == 6:
|
||||||
title = f"[{host}]:{port}"
|
title = f"[{host}]:{port}"
|
||||||
@ -101,15 +101,16 @@ class MinecraftServerConfigFlow(ConfigFlow, domain=DOMAIN):
|
|||||||
unique_id = f"{host}-srv"
|
unique_id = f"{host}-srv"
|
||||||
title = host
|
title = host
|
||||||
else:
|
else:
|
||||||
# Use host name and port in unique_id (to support servers with
|
# Use host name and port in unique_id (to support servers
|
||||||
# same host name but different ports).
|
# with same host name but different ports).
|
||||||
unique_id = f"{host}-{port}"
|
unique_id = f"{host}-{port}"
|
||||||
|
|
||||||
# Abort in case the host was already configured before.
|
# Abort in case the host was already configured before.
|
||||||
await self.async_set_unique_id(unique_id)
|
await self.async_set_unique_id(unique_id)
|
||||||
self._abort_if_unique_id_configured()
|
self._abort_if_unique_id_configured()
|
||||||
|
|
||||||
# Configuration data are available and no error was detected, create configuration entry.
|
# Configuration data are available and no error was detected,
|
||||||
|
# create configuration entry.
|
||||||
return self.async_create_entry(title=title, data=config_data)
|
return self.async_create_entry(title=title, data=config_data)
|
||||||
|
|
||||||
# Show configuration form (default form in case of no user_input,
|
# Show configuration form (default form in case of no user_input,
|
||||||
|
@ -117,9 +117,10 @@ CONF_MODE_LIST = "modes"
|
|||||||
CONF_MODE_STATE_TEMPLATE = "mode_state_template"
|
CONF_MODE_STATE_TEMPLATE = "mode_state_template"
|
||||||
CONF_MODE_STATE_TOPIC = "mode_state_topic"
|
CONF_MODE_STATE_TOPIC = "mode_state_topic"
|
||||||
|
|
||||||
# CONF_POWER_COMMAND_TOPIC, CONF_POWER_STATE_TOPIC and CONF_POWER_STATE_TEMPLATE are deprecated,
|
# CONF_POWER_COMMAND_TOPIC, CONF_POWER_STATE_TOPIC and CONF_POWER_STATE_TEMPLATE
|
||||||
# support for CONF_POWER_STATE_TOPIC and CONF_POWER_STATE_TEMPLATE was already removed or never added
|
# are deprecated, support for CONF_POWER_STATE_TOPIC and CONF_POWER_STATE_TEMPLATE
|
||||||
# support was deprecated with release 2023.2 and will be removed with release 2023.8
|
# was already removed or never added support was deprecated with release 2023.2
|
||||||
|
# and will be removed with release 2023.8
|
||||||
CONF_POWER_COMMAND_TOPIC = "power_command_topic"
|
CONF_POWER_COMMAND_TOPIC = "power_command_topic"
|
||||||
CONF_POWER_STATE_TEMPLATE = "power_state_template"
|
CONF_POWER_STATE_TEMPLATE = "power_state_template"
|
||||||
CONF_POWER_STATE_TOPIC = "power_state_topic"
|
CONF_POWER_STATE_TOPIC = "power_state_topic"
|
||||||
@ -375,9 +376,10 @@ PLATFORM_SCHEMA_MODERN = vol.All(
|
|||||||
cv.removed(CONF_HOLD_STATE_TEMPLATE),
|
cv.removed(CONF_HOLD_STATE_TEMPLATE),
|
||||||
cv.removed(CONF_HOLD_STATE_TOPIC),
|
cv.removed(CONF_HOLD_STATE_TOPIC),
|
||||||
cv.removed(CONF_HOLD_LIST),
|
cv.removed(CONF_HOLD_LIST),
|
||||||
# CONF_POWER_COMMAND_TOPIC, CONF_POWER_STATE_TOPIC and CONF_POWER_STATE_TEMPLATE are deprecated,
|
# CONF_POWER_COMMAND_TOPIC, CONF_POWER_STATE_TOPIC and CONF_POWER_STATE_TEMPLATE
|
||||||
# support for CONF_POWER_STATE_TOPIC and CONF_POWER_STATE_TEMPLATE was already removed or never added
|
# are deprecated, support for CONF_POWER_STATE_TOPIC and CONF_POWER_STATE_TEMPLATE
|
||||||
# support was deprecated with release 2023.2 and will be removed with release 2023.8
|
# was already removed or never added support was deprecated with release 2023.2
|
||||||
|
# and will be removed with release 2023.8
|
||||||
cv.deprecated(CONF_POWER_COMMAND_TOPIC),
|
cv.deprecated(CONF_POWER_COMMAND_TOPIC),
|
||||||
cv.deprecated(CONF_POWER_STATE_TEMPLATE),
|
cv.deprecated(CONF_POWER_STATE_TEMPLATE),
|
||||||
cv.deprecated(CONF_POWER_STATE_TOPIC),
|
cv.deprecated(CONF_POWER_STATE_TOPIC),
|
||||||
|
@ -252,7 +252,8 @@ async def async_import_config(hass: HomeAssistant, entry: ConfigEntry) -> None:
|
|||||||
new_data.update(
|
new_data.update(
|
||||||
{
|
{
|
||||||
CONF_SUBSCRIBER_ID: config[CONF_SUBSCRIBER_ID],
|
CONF_SUBSCRIBER_ID: config[CONF_SUBSCRIBER_ID],
|
||||||
CONF_SUBSCRIBER_ID_IMPORTED: True, # Don't delete user managed subscriber
|
# Don't delete user managed subscriber
|
||||||
|
CONF_SUBSCRIBER_ID_IMPORTED: True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
hass.config_entries.async_update_entry(
|
hass.config_entries.async_update_entry(
|
||||||
|
@ -106,8 +106,8 @@ CONFIG_SCHEMA = vol.Schema(
|
|||||||
vol.Optional(CONF_SSL, default=False): cv.boolean,
|
vol.Optional(CONF_SSL, default=False): cv.boolean,
|
||||||
vol.Optional(CONF_PORT, default=80): cv.port,
|
vol.Optional(CONF_PORT, default=80): cv.port,
|
||||||
vol.Optional(CONF_PATH, default="/"): ensure_valid_path,
|
vol.Optional(CONF_PATH, default="/"): ensure_valid_path,
|
||||||
# Following values are not longer used in the configuration of the integration
|
# Following values are not longer used in the configuration
|
||||||
# and are here for historical purposes
|
# of the integration and are here for historical purposes
|
||||||
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
|
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
|
||||||
vol.Optional(
|
vol.Optional(
|
||||||
CONF_NUMBER_OF_TOOLS, default=0
|
CONF_NUMBER_OF_TOOLS, default=0
|
||||||
|
@ -96,9 +96,10 @@ class OmniLogicSwitch(OmniLogicEntity, SwitchEntity):
|
|||||||
"""Return the on/off state of the switch."""
|
"""Return the on/off state of the switch."""
|
||||||
state_int = 0
|
state_int = 0
|
||||||
|
|
||||||
# The Omnilogic API has a significant delay in state reporting after calling for a
|
# The Omnilogic API has a significant delay in state reporting after
|
||||||
# change. This state delay will ensure that HA keeps an optimistic value of state
|
# calling for a change. This state delay will ensure that HA keeps an
|
||||||
# during this period to improve the user experience and avoid confusion.
|
# optimistic value of state during this period to improve the user
|
||||||
|
# experience and avoid confusion.
|
||||||
if self._last_action < (time.time() - self._state_delay):
|
if self._last_action < (time.time() - self._state_delay):
|
||||||
state_int = int(self.coordinator.data[self._item_id][self._state_key])
|
state_int = int(self.coordinator.data[self._item_id][self._state_key])
|
||||||
|
|
||||||
|
@ -66,7 +66,8 @@ class OncueEntity(
|
|||||||
return False
|
return False
|
||||||
# If the cloud is reporting that the generator is not connected
|
# If the cloud is reporting that the generator is not connected
|
||||||
# this also indicates the data is not available.
|
# this also indicates the data is not available.
|
||||||
# The battery voltage sensor reports 0.0 rather than -- hence the purpose of this check.
|
# The battery voltage sensor reports 0.0 rather than
|
||||||
|
# -- hence the purpose of this check.
|
||||||
device: OncueDevice = self.coordinator.data[self._device_id]
|
device: OncueDevice = self.coordinator.data[self._device_id]
|
||||||
conn_established: OncueSensor = device.sensors[CONNECTION_ESTABLISHED_KEY]
|
conn_established: OncueSensor = device.sensors[CONNECTION_ESTABLISHED_KEY]
|
||||||
if (
|
if (
|
||||||
|
@ -84,7 +84,8 @@ def async_setup_block_attribute_entities(
|
|||||||
if getattr(block, sensor_id, None) in (-1, None):
|
if getattr(block, sensor_id, None) in (-1, None):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Filter and remove entities that according to settings should not create an entity
|
# Filter and remove entities that according to settings
|
||||||
|
# should not create an entity
|
||||||
if description.removal_condition and description.removal_condition(
|
if description.removal_condition and description.removal_condition(
|
||||||
coordinator.device.settings, block
|
coordinator.device.settings, block
|
||||||
):
|
):
|
||||||
@ -192,7 +193,8 @@ def async_setup_rpc_attribute_entities(
|
|||||||
] and not description.supported(coordinator.device.status[key]):
|
] and not description.supported(coordinator.device.status[key]):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Filter and remove entities that according to settings/status should not create an entity
|
# Filter and remove entities that according to settings/status
|
||||||
|
# should not create an entity
|
||||||
if description.removal_condition and description.removal_condition(
|
if description.removal_condition and description.removal_condition(
|
||||||
coordinator.device.config, coordinator.device.status, key
|
coordinator.device.config, coordinator.device.status, key
|
||||||
):
|
):
|
||||||
|
@ -298,7 +298,8 @@ class BlockShellyLight(ShellyBlockEntity, LightEntity):
|
|||||||
ATTR_COLOR_TEMP_KELVIN in kwargs
|
ATTR_COLOR_TEMP_KELVIN in kwargs
|
||||||
and ColorMode.COLOR_TEMP in supported_color_modes
|
and ColorMode.COLOR_TEMP in supported_color_modes
|
||||||
):
|
):
|
||||||
# Color temperature change - used only in white mode, switch device mode to white
|
# Color temperature change - used only in white mode,
|
||||||
|
# switch device mode to white
|
||||||
color_temp = kwargs[ATTR_COLOR_TEMP_KELVIN]
|
color_temp = kwargs[ATTR_COLOR_TEMP_KELVIN]
|
||||||
set_mode = "white"
|
set_mode = "white"
|
||||||
params["temp"] = int(
|
params["temp"] = int(
|
||||||
@ -309,12 +310,14 @@ class BlockShellyLight(ShellyBlockEntity, LightEntity):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if ATTR_RGB_COLOR in kwargs and ColorMode.RGB in supported_color_modes:
|
if ATTR_RGB_COLOR in kwargs and ColorMode.RGB in supported_color_modes:
|
||||||
# Color channels change - used only in color mode, switch device mode to color
|
# Color channels change - used only in color mode,
|
||||||
|
# switch device mode to color
|
||||||
set_mode = "color"
|
set_mode = "color"
|
||||||
(params["red"], params["green"], params["blue"]) = kwargs[ATTR_RGB_COLOR]
|
(params["red"], params["green"], params["blue"]) = kwargs[ATTR_RGB_COLOR]
|
||||||
|
|
||||||
if ATTR_RGBW_COLOR in kwargs and ColorMode.RGBW in supported_color_modes:
|
if ATTR_RGBW_COLOR in kwargs and ColorMode.RGBW in supported_color_modes:
|
||||||
# Color channels change - used only in color mode, switch device mode to color
|
# Color channels change - used only in color mode,
|
||||||
|
# switch device mode to color
|
||||||
set_mode = "color"
|
set_mode = "color"
|
||||||
(params["red"], params["green"], params["blue"], params["white"]) = kwargs[
|
(params["red"], params["green"], params["blue"], params["white"]) = kwargs[
|
||||||
ATTR_RGBW_COLOR
|
ATTR_RGBW_COLOR
|
||||||
|
@ -160,12 +160,16 @@ class StreamMuxer:
|
|||||||
format=SEGMENT_CONTAINER_FORMAT,
|
format=SEGMENT_CONTAINER_FORMAT,
|
||||||
container_options={
|
container_options={
|
||||||
**{
|
**{
|
||||||
# Removed skip_sidx - see https://github.com/home-assistant/core/pull/39970
|
# Removed skip_sidx - see:
|
||||||
# "cmaf" flag replaces several of the movflags used, but too recent to use for now
|
# https://github.com/home-assistant/core/pull/39970
|
||||||
|
# "cmaf" flag replaces several of the movflags used,
|
||||||
|
# but too recent to use for now
|
||||||
"movflags": "frag_custom+empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer+delay_moov",
|
"movflags": "frag_custom+empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer+delay_moov",
|
||||||
# Sometimes the first segment begins with negative timestamps, and this setting just
|
# Sometimes the first segment begins with negative timestamps,
|
||||||
# adjusts the timestamps in the output from that segment to start from 0. Helps from
|
# and this setting just
|
||||||
# having to make some adjustments in test_durations
|
# adjusts the timestamps in the output from that segment to start
|
||||||
|
# from 0. Helps from having to make some adjustments
|
||||||
|
# in test_durations
|
||||||
"avoid_negative_ts": "make_non_negative",
|
"avoid_negative_ts": "make_non_negative",
|
||||||
"fragment_index": str(sequence + 1),
|
"fragment_index": str(sequence + 1),
|
||||||
"video_track_timescale": str(int(1 / input_vstream.time_base)),
|
"video_track_timescale": str(int(1 / input_vstream.time_base)),
|
||||||
@ -176,22 +180,31 @@ class StreamMuxer:
|
|||||||
**(
|
**(
|
||||||
{
|
{
|
||||||
"movflags": "empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer+delay_moov",
|
"movflags": "empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer+delay_moov",
|
||||||
# Create a fragment every TARGET_PART_DURATION. The data from each fragment is stored in
|
# Create a fragment every TARGET_PART_DURATION. The data from
|
||||||
# a "Part" that can be combined with the data from all the other "Part"s, plus an init
|
# each fragment is stored in a "Part" that can be combined with
|
||||||
# section, to reconstitute the data in a "Segment".
|
# the data from all the other "Part"s, plus an init section,
|
||||||
# The LL-HLS spec allows for a fragment's duration to be within the range [0.85x,1.0x]
|
# to reconstitute the data in a "Segment".
|
||||||
# of the part target duration. We use the frag_duration option to tell ffmpeg to try to
|
#
|
||||||
# cut the fragments when they reach frag_duration. However, the resulting fragments can
|
# The LL-HLS spec allows for a fragment's duration to be within
|
||||||
# have variability in their durations and can end up being too short or too long. With a
|
# the range [0.85x,1.0x] of the part target duration. We use the
|
||||||
# video track with no audio, the discrete nature of frames means that the frame at the
|
# frag_duration option to tell ffmpeg to try to cut the
|
||||||
# end of a fragment will sometimes extend slightly beyond the desired frag_duration.
|
# fragments when they reach frag_duration. However,
|
||||||
# If there are two tracks, as in the case of a video feed with audio, there is an added
|
# the resulting fragments can have variability in their
|
||||||
# wrinkle as the fragment cut seems to be done on the first track that crosses the desired
|
# durations and can end up being too short or too long. With a
|
||||||
# threshold, and cutting on the audio track may also result in a shorter video fragment
|
# video track with no audio, the discrete nature of frames means
|
||||||
# than desired.
|
# that the frame at the end of a fragment will sometimes extend
|
||||||
# Given this, our approach is to give ffmpeg a frag_duration somewhere in the middle
|
# slightly beyond the desired frag_duration.
|
||||||
# of the range, hoping that the parts stay pretty well bounded, and we adjust the part
|
#
|
||||||
# durations a bit in the hls metadata so that everything "looks" ok.
|
# If there are two tracks, as in the case of a video feed with
|
||||||
|
# audio, there is an added wrinkle as the fragment cut seems to
|
||||||
|
# be done on the first track that crosses the desired threshold,
|
||||||
|
# and cutting on the audio track may also result in a shorter
|
||||||
|
# video fragment than desired.
|
||||||
|
#
|
||||||
|
# Given this, our approach is to give ffmpeg a frag_duration
|
||||||
|
# somewhere in the middle of the range, hoping that the parts
|
||||||
|
# stay pretty well bounded, and we adjust the part durations
|
||||||
|
# a bit in the hls metadata so that everything "looks" ok.
|
||||||
"frag_duration": str(
|
"frag_duration": str(
|
||||||
int(self._stream_settings.part_target_duration * 9e5)
|
int(self._stream_settings.part_target_duration * 9e5)
|
||||||
),
|
),
|
||||||
@ -425,10 +438,10 @@ class TimestampValidator:
|
|||||||
)
|
)
|
||||||
# Number of consecutive missing decompression timestamps
|
# Number of consecutive missing decompression timestamps
|
||||||
self._missing_dts = 0
|
self._missing_dts = 0
|
||||||
# For the bounds, just use the larger of the two values. If the error is not flagged
|
# For the bounds, just use the larger of the two values. If the error
|
||||||
# by one stream, it should just get flagged by the other stream. Either value should
|
# is not flagged by one stream, it should just get flagged by the other
|
||||||
# result in a value which is much less than a 32 bit INT_MAX, which helps avoid the
|
# stream. Either value should result in a value which is much less than
|
||||||
# assertion error from FFmpeg.
|
# a 32 bit INT_MAX, which helps avoid the assertion error from FFmpeg.
|
||||||
self._max_dts_gap = MAX_TIMESTAMP_GAP * max(
|
self._max_dts_gap = MAX_TIMESTAMP_GAP * max(
|
||||||
inv_video_time_base, inv_audio_time_base
|
inv_video_time_base, inv_audio_time_base
|
||||||
)
|
)
|
||||||
|
@ -268,7 +268,8 @@ async def _async_migrate_entries(
|
|||||||
all_sensors.extend(API_GEN_2_SENSORS)
|
all_sensors.extend(API_GEN_2_SENSORS)
|
||||||
all_sensors.extend(SAFETY_SENSORS)
|
all_sensors.extend(SAFETY_SENSORS)
|
||||||
|
|
||||||
# Old unique_id is (previously title-cased) sensor name (e.g. "VIN_Avg Fuel Consumption")
|
# Old unique_id is (previously title-cased) sensor name
|
||||||
|
# (e.g. "VIN_Avg Fuel Consumption")
|
||||||
replacements = {str(s.name).upper(): s.key for s in all_sensors}
|
replacements = {str(s.name).upper(): s.key for s in all_sensors}
|
||||||
|
|
||||||
@callback
|
@callback
|
||||||
|
@ -104,11 +104,13 @@ class SwitchBeeLightEntity(SwitchBeeDeviceEntity[SwitchBeeDimmer], LightEntity):
|
|||||||
) from exp
|
) from exp
|
||||||
|
|
||||||
if not isinstance(state, int):
|
if not isinstance(state, int):
|
||||||
# We just turned the light on, still don't know the last brightness known the Central Unit (yet)
|
# We just turned the light on, still don't know the last brightness
|
||||||
# the brightness will be learned and updated in the next coordinator refresh
|
# known the Central Unit (yet) the brightness will be learned
|
||||||
|
# and updated in the next coordinator refresh
|
||||||
return
|
return
|
||||||
|
|
||||||
# update the coordinator data manually we already know the Central Unit brightness data for this light
|
# update the coordinator data manually we already know the Central Unit
|
||||||
|
# brightness data for this light
|
||||||
self._get_coordinator_device().brightness = state
|
self._get_coordinator_device().brightness = state
|
||||||
self.coordinator.async_set_updated_data(self.coordinator.data)
|
self.coordinator.async_set_updated_data(self.coordinator.data)
|
||||||
|
|
||||||
|
@ -87,5 +87,6 @@ class FuelPriceSensor(TankerkoenigCoordinatorEntity, SensorEntity):
|
|||||||
@property
|
@property
|
||||||
def native_value(self):
|
def native_value(self):
|
||||||
"""Return the state of the device."""
|
"""Return the state of the device."""
|
||||||
# key Fuel_type is not available when the fuel station is closed, use "get" instead of "[]" to avoid exceptions
|
# key Fuel_type is not available when the fuel station is closed,
|
||||||
|
# use "get" instead of "[]" to avoid exceptions
|
||||||
return self.coordinator.data[self._station_id].get(self._fuel_type)
|
return self.coordinator.data[self._station_id].get(self._fuel_type)
|
||||||
|
@ -469,12 +469,15 @@ class TibberSensorRT(TibberSensor, CoordinatorEntity["TibberRtDataCoordinator"])
|
|||||||
"accumulatedConsumption",
|
"accumulatedConsumption",
|
||||||
"accumulatedProduction",
|
"accumulatedProduction",
|
||||||
):
|
):
|
||||||
# Value is reset to 0 at midnight, but not always strictly increasing due to hourly corrections
|
# Value is reset to 0 at midnight, but not always strictly increasing
|
||||||
# If device is offline, last_reset should be updated when it comes back online if the value has decreased
|
# due to hourly corrections.
|
||||||
|
# If device is offline, last_reset should be updated when it comes
|
||||||
|
# back online if the value has decreased
|
||||||
ts_local = dt_util.parse_datetime(live_measurement["timestamp"])
|
ts_local = dt_util.parse_datetime(live_measurement["timestamp"])
|
||||||
if ts_local is not None:
|
if ts_local is not None:
|
||||||
if self.last_reset is None or (
|
if self.last_reset is None or (
|
||||||
state < 0.5 * self.native_value # type: ignore[operator] # native_value is float
|
# native_value is float
|
||||||
|
state < 0.5 * self.native_value # type: ignore[operator]
|
||||||
and (
|
and (
|
||||||
ts_local.hour == 0
|
ts_local.hour == 0
|
||||||
or (ts_local - self.last_reset) > timedelta(hours=24)
|
or (ts_local - self.last_reset) > timedelta(hours=24)
|
||||||
|
@ -39,8 +39,14 @@ class ToonElectricityMeterDeviceEntity(ToonEntity):
|
|||||||
agreement_id = self.coordinator.data.agreement.agreement_id
|
agreement_id = self.coordinator.data.agreement.agreement_id
|
||||||
return DeviceInfo(
|
return DeviceInfo(
|
||||||
name="Electricity Meter",
|
name="Electricity Meter",
|
||||||
identifiers={(DOMAIN, agreement_id, "electricity")}, # type: ignore[arg-type]
|
identifiers={
|
||||||
via_device=(DOMAIN, agreement_id, "meter_adapter"), # type: ignore[typeddict-item]
|
(DOMAIN, agreement_id, "electricity"), # type: ignore[arg-type]
|
||||||
|
},
|
||||||
|
via_device=(
|
||||||
|
DOMAIN,
|
||||||
|
agreement_id, # type: ignore[typeddict-item]
|
||||||
|
"meter_adapter",
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -53,8 +59,14 @@ class ToonGasMeterDeviceEntity(ToonEntity):
|
|||||||
agreement_id = self.coordinator.data.agreement.agreement_id
|
agreement_id = self.coordinator.data.agreement.agreement_id
|
||||||
return DeviceInfo(
|
return DeviceInfo(
|
||||||
name="Gas Meter",
|
name="Gas Meter",
|
||||||
identifiers={(DOMAIN, agreement_id, "gas")}, # type: ignore[arg-type]
|
identifiers={
|
||||||
via_device=(DOMAIN, agreement_id, "electricity"), # type: ignore[typeddict-item]
|
(DOMAIN, agreement_id, "gas"), # type: ignore[arg-type]
|
||||||
|
},
|
||||||
|
via_device=(
|
||||||
|
DOMAIN,
|
||||||
|
agreement_id, # type: ignore[typeddict-item]
|
||||||
|
"electricity",
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -67,8 +79,14 @@ class ToonWaterMeterDeviceEntity(ToonEntity):
|
|||||||
agreement_id = self.coordinator.data.agreement.agreement_id
|
agreement_id = self.coordinator.data.agreement.agreement_id
|
||||||
return DeviceInfo(
|
return DeviceInfo(
|
||||||
name="Water Meter",
|
name="Water Meter",
|
||||||
identifiers={(DOMAIN, agreement_id, "water")}, # type: ignore[arg-type]
|
identifiers={
|
||||||
via_device=(DOMAIN, agreement_id, "electricity"), # type: ignore[typeddict-item]
|
(DOMAIN, agreement_id, "water"), # type: ignore[arg-type]
|
||||||
|
},
|
||||||
|
via_device=(
|
||||||
|
DOMAIN,
|
||||||
|
agreement_id, # type: ignore[typeddict-item]
|
||||||
|
"electricity",
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -81,8 +99,14 @@ class ToonSolarDeviceEntity(ToonEntity):
|
|||||||
agreement_id = self.coordinator.data.agreement.agreement_id
|
agreement_id = self.coordinator.data.agreement.agreement_id
|
||||||
return DeviceInfo(
|
return DeviceInfo(
|
||||||
name="Solar Panels",
|
name="Solar Panels",
|
||||||
identifiers={(DOMAIN, agreement_id, "solar")}, # type: ignore[arg-type]
|
identifiers={
|
||||||
via_device=(DOMAIN, agreement_id, "meter_adapter"), # type: ignore[typeddict-item]
|
(DOMAIN, agreement_id, "solar"), # type: ignore[arg-type]
|
||||||
|
},
|
||||||
|
via_device=(
|
||||||
|
DOMAIN,
|
||||||
|
agreement_id, # type: ignore[typeddict-item]
|
||||||
|
"meter_adapter",
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -96,7 +120,13 @@ class ToonBoilerModuleDeviceEntity(ToonEntity):
|
|||||||
return DeviceInfo(
|
return DeviceInfo(
|
||||||
name="Boiler Module",
|
name="Boiler Module",
|
||||||
manufacturer="Eneco",
|
manufacturer="Eneco",
|
||||||
identifiers={(DOMAIN, agreement_id, "boiler_module")}, # type: ignore[arg-type]
|
identifiers={
|
||||||
|
(
|
||||||
|
DOMAIN,
|
||||||
|
agreement_id, # type: ignore[arg-type]
|
||||||
|
"boiler_module",
|
||||||
|
)
|
||||||
|
},
|
||||||
via_device=(DOMAIN, agreement_id),
|
via_device=(DOMAIN, agreement_id),
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -110,8 +140,14 @@ class ToonBoilerDeviceEntity(ToonEntity):
|
|||||||
agreement_id = self.coordinator.data.agreement.agreement_id
|
agreement_id = self.coordinator.data.agreement.agreement_id
|
||||||
return DeviceInfo(
|
return DeviceInfo(
|
||||||
name="Boiler",
|
name="Boiler",
|
||||||
identifiers={(DOMAIN, agreement_id, "boiler")}, # type: ignore[arg-type]
|
identifiers={
|
||||||
via_device=(DOMAIN, agreement_id, "boiler_module"), # type: ignore[typeddict-item]
|
(DOMAIN, agreement_id, "boiler"), # type: ignore[arg-type]
|
||||||
|
},
|
||||||
|
via_device=(
|
||||||
|
DOMAIN,
|
||||||
|
agreement_id, # type: ignore[typeddict-item]
|
||||||
|
"boiler_module",
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -283,7 +283,8 @@ class TwinklyLight(LightEntity):
|
|||||||
self._model = device_info[DEV_MODEL]
|
self._model = device_info[DEV_MODEL]
|
||||||
|
|
||||||
# If the name has changed, persist it in conf entry,
|
# If the name has changed, persist it in conf entry,
|
||||||
# so we will be able to restore this new name if hass is started while the LED string is offline.
|
# so we will be able to restore this new name if hass
|
||||||
|
# is started while the LED string is offline.
|
||||||
self.hass.config_entries.async_update_entry(
|
self.hass.config_entries.async_update_entry(
|
||||||
self._conf,
|
self._conf,
|
||||||
data={
|
data={
|
||||||
@ -301,11 +302,12 @@ class TwinklyLight(LightEntity):
|
|||||||
if not self._is_available:
|
if not self._is_available:
|
||||||
_LOGGER.info("Twinkly '%s' is now available", self._client.host)
|
_LOGGER.info("Twinkly '%s' is now available", self._client.host)
|
||||||
|
|
||||||
# We don't use the echo API to track the availability since we already have to pull
|
# We don't use the echo API to track the availability since
|
||||||
# the device to get its state.
|
# we already have to pull the device to get its state.
|
||||||
self._is_available = True
|
self._is_available = True
|
||||||
except (asyncio.TimeoutError, ClientError):
|
except (asyncio.TimeoutError, ClientError):
|
||||||
# We log this as "info" as it's pretty common that the christmas light are not reachable in july
|
# We log this as "info" as it's pretty common that the Christmas
|
||||||
|
# light are not reachable in July
|
||||||
if self._is_available:
|
if self._is_available:
|
||||||
_LOGGER.info(
|
_LOGGER.info(
|
||||||
"Twinkly '%s' is not reachable (client error)", self._client.host
|
"Twinkly '%s' is not reachable (client error)", self._client.host
|
||||||
|
@ -384,7 +384,8 @@ class ProtectSelects(ProtectDeviceEntity, SelectEntity):
|
|||||||
def _async_update_device_from_protect(self, device: ProtectModelWithId) -> None:
|
def _async_update_device_from_protect(self, device: ProtectModelWithId) -> None:
|
||||||
super()._async_update_device_from_protect(device)
|
super()._async_update_device_from_protect(device)
|
||||||
|
|
||||||
# entities with categories are not exposed for voice and safe to update dynamically
|
# entities with categories are not exposed for voice
|
||||||
|
# and safe to update dynamically
|
||||||
if (
|
if (
|
||||||
self.entity_description.entity_category is not None
|
self.entity_description.entity_category is not None
|
||||||
and self.entity_description.ufp_options_fn is not None
|
and self.entity_description.ufp_options_fn is not None
|
||||||
@ -443,7 +444,10 @@ class ProtectSelects(ProtectDeviceEntity, SelectEntity):
|
|||||||
is_persistent=True,
|
is_persistent=True,
|
||||||
severity=ir.IssueSeverity.WARNING,
|
severity=ir.IssueSeverity.WARNING,
|
||||||
translation_placeholders={
|
translation_placeholders={
|
||||||
"link": "https://www.home-assistant.io/integrations/text#service-textset_value"
|
"link": (
|
||||||
|
"https://www.home-assistant.io/integrations"
|
||||||
|
"/text#service-textset_value"
|
||||||
|
)
|
||||||
},
|
},
|
||||||
translation_key="deprecated_service_set_doorbell_message",
|
translation_key="deprecated_service_set_doorbell_message",
|
||||||
)
|
)
|
||||||
|
@ -195,8 +195,8 @@ CAMERA_SENSORS: tuple[ProtectSensorEntityDescription, ...] = (
|
|||||||
entity_category=EntityCategory.DIAGNOSTIC,
|
entity_category=EntityCategory.DIAGNOSTIC,
|
||||||
state_class=SensorStateClass.MEASUREMENT,
|
state_class=SensorStateClass.MEASUREMENT,
|
||||||
ufp_value="voltage",
|
ufp_value="voltage",
|
||||||
# no feature flag, but voltage will be null if device does not have voltage sensor
|
# no feature flag, but voltage will be null if device does not have
|
||||||
# (i.e. is not G4 Doorbell or not on 1.20.1+)
|
# voltage sensor (i.e. is not G4 Doorbell or not on 1.20.1+)
|
||||||
ufp_required_field="voltage",
|
ufp_required_field="voltage",
|
||||||
precision=2,
|
precision=2,
|
||||||
),
|
),
|
||||||
|
@ -77,7 +77,8 @@ class UpnpFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
|
|||||||
VERSION = 1
|
VERSION = 1
|
||||||
|
|
||||||
# Paths:
|
# Paths:
|
||||||
# - ssdp(discovery_info) --> ssdp_confirm(None) --> ssdp_confirm({}) --> create_entry()
|
# - ssdp(discovery_info) --> ssdp_confirm(None)
|
||||||
|
# --> ssdp_confirm({}) --> create_entry()
|
||||||
# - user(None): scan --> user({...}) --> create_entry()
|
# - user(None): scan --> user({...}) --> create_entry()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -173,7 +174,8 @@ class UpnpFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
|
|||||||
host = discovery_info.ssdp_headers["_host"]
|
host = discovery_info.ssdp_headers["_host"]
|
||||||
self._abort_if_unique_id_configured(
|
self._abort_if_unique_id_configured(
|
||||||
# Store mac address and other data for older entries.
|
# Store mac address and other data for older entries.
|
||||||
# The location is stored in the config entry such that when the location changes, the entry is reloaded.
|
# The location is stored in the config entry such that
|
||||||
|
# when the location changes, the entry is reloaded.
|
||||||
updates={
|
updates={
|
||||||
CONFIG_ENTRY_MAC_ADDRESS: mac_address,
|
CONFIG_ENTRY_MAC_ADDRESS: mac_address,
|
||||||
CONFIG_ENTRY_LOCATION: discovery_info.ssdp_location,
|
CONFIG_ENTRY_LOCATION: discovery_info.ssdp_location,
|
||||||
@ -205,8 +207,9 @@ class UpnpFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
|
|||||||
data={**entry.data, CONFIG_ENTRY_UDN: discovery_info.ssdp_udn},
|
data={**entry.data, CONFIG_ENTRY_UDN: discovery_info.ssdp_udn},
|
||||||
)
|
)
|
||||||
if entry.state == config_entries.ConfigEntryState.LOADED:
|
if entry.state == config_entries.ConfigEntryState.LOADED:
|
||||||
# Only reload when entry has state LOADED; when entry has state SETUP_RETRY,
|
# Only reload when entry has state LOADED; when entry has state
|
||||||
# another load is started, causing the entry to be loaded twice.
|
# SETUP_RETRY, another load is started,
|
||||||
|
# causing the entry to be loaded twice.
|
||||||
LOGGER.debug("Reloading entry: %s", entry.entry_id)
|
LOGGER.debug("Reloading entry: %s", entry.entry_id)
|
||||||
self.hass.async_create_task(
|
self.hass.async_create_task(
|
||||||
self.hass.config_entries.async_reload(entry.entry_id)
|
self.hass.config_entries.async_reload(entry.entry_id)
|
||||||
|
@ -121,7 +121,8 @@ class VeSyncBaseLight(VeSyncDevice, LightEntity):
|
|||||||
brightness = max(1, min(brightness, 100))
|
brightness = max(1, min(brightness, 100))
|
||||||
# call pyvesync library api method to set brightness
|
# call pyvesync library api method to set brightness
|
||||||
self.device.set_brightness(brightness)
|
self.device.set_brightness(brightness)
|
||||||
# flag attribute_adjustment_only, so it doesn't turn_on the device redundantly
|
# flag attribute_adjustment_only, so it doesn't
|
||||||
|
# turn_on the device redundantly
|
||||||
attribute_adjustment_only = True
|
attribute_adjustment_only = True
|
||||||
# check flag if should skip sending the turn_on command
|
# check flag if should skip sending the turn_on command
|
||||||
if attribute_adjustment_only:
|
if attribute_adjustment_only:
|
||||||
|
@ -71,7 +71,8 @@ async def async_setup_entry(
|
|||||||
name = config_entry.data[CONF_NAME]
|
name = config_entry.data[CONF_NAME]
|
||||||
device_class = config_entry.data[CONF_DEVICE_CLASS]
|
device_class = config_entry.data[CONF_DEVICE_CLASS]
|
||||||
|
|
||||||
# If config entry options not set up, set them up, otherwise assign values managed in options
|
# If config entry options not set up, set them up,
|
||||||
|
# otherwise assign values managed in options
|
||||||
volume_step = config_entry.options.get(
|
volume_step = config_entry.options.get(
|
||||||
CONF_VOLUME_STEP, config_entry.data.get(CONF_VOLUME_STEP, DEFAULT_VOLUME_STEP)
|
CONF_VOLUME_STEP, config_entry.data.get(CONF_VOLUME_STEP, DEFAULT_VOLUME_STEP)
|
||||||
)
|
)
|
||||||
|
@ -1,4 +1,7 @@
|
|||||||
"""Home Assistant component for accessing the Wallbox Portal API. The number component allows control of charging current."""
|
"""Home Assistant component for accessing the Wallbox Portal API.
|
||||||
|
|
||||||
|
The number component allows control of charging current.
|
||||||
|
"""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
@ -104,10 +104,14 @@ WITHINGS_MEASURE_TYPE_MAP: dict[
|
|||||||
MeasureType.SP02: Measurement.SPO2_PCT,
|
MeasureType.SP02: Measurement.SPO2_PCT,
|
||||||
MeasureType.HYDRATION: Measurement.HYDRATION,
|
MeasureType.HYDRATION: Measurement.HYDRATION,
|
||||||
MeasureType.PULSE_WAVE_VELOCITY: Measurement.PWV,
|
MeasureType.PULSE_WAVE_VELOCITY: Measurement.PWV,
|
||||||
GetSleepSummaryField.BREATHING_DISTURBANCES_INTENSITY: Measurement.SLEEP_BREATHING_DISTURBANCES_INTENSITY,
|
GetSleepSummaryField.BREATHING_DISTURBANCES_INTENSITY: (
|
||||||
|
Measurement.SLEEP_BREATHING_DISTURBANCES_INTENSITY
|
||||||
|
),
|
||||||
GetSleepSummaryField.DEEP_SLEEP_DURATION: Measurement.SLEEP_DEEP_DURATION_SECONDS,
|
GetSleepSummaryField.DEEP_SLEEP_DURATION: Measurement.SLEEP_DEEP_DURATION_SECONDS,
|
||||||
GetSleepSummaryField.DURATION_TO_SLEEP: Measurement.SLEEP_TOSLEEP_DURATION_SECONDS,
|
GetSleepSummaryField.DURATION_TO_SLEEP: Measurement.SLEEP_TOSLEEP_DURATION_SECONDS,
|
||||||
GetSleepSummaryField.DURATION_TO_WAKEUP: Measurement.SLEEP_TOWAKEUP_DURATION_SECONDS,
|
GetSleepSummaryField.DURATION_TO_WAKEUP: (
|
||||||
|
Measurement.SLEEP_TOWAKEUP_DURATION_SECONDS
|
||||||
|
),
|
||||||
GetSleepSummaryField.HR_AVERAGE: Measurement.SLEEP_HEART_RATE_AVERAGE,
|
GetSleepSummaryField.HR_AVERAGE: Measurement.SLEEP_HEART_RATE_AVERAGE,
|
||||||
GetSleepSummaryField.HR_MAX: Measurement.SLEEP_HEART_RATE_MAX,
|
GetSleepSummaryField.HR_MAX: Measurement.SLEEP_HEART_RATE_MAX,
|
||||||
GetSleepSummaryField.HR_MIN: Measurement.SLEEP_HEART_RATE_MIN,
|
GetSleepSummaryField.HR_MIN: Measurement.SLEEP_HEART_RATE_MIN,
|
||||||
@ -292,7 +296,8 @@ class DataManager:
|
|||||||
async def _do_retry(self, func, attempts=3) -> Any:
|
async def _do_retry(self, func, attempts=3) -> Any:
|
||||||
"""Retry a function call.
|
"""Retry a function call.
|
||||||
|
|
||||||
Withings' API occasionally and incorrectly throws errors. Retrying the call tends to work.
|
Withings' API occasionally and incorrectly throws errors.
|
||||||
|
Retrying the call tends to work.
|
||||||
"""
|
"""
|
||||||
exception = None
|
exception = None
|
||||||
for attempt in range(1, attempts + 1):
|
for attempt in range(1, attempts + 1):
|
||||||
@ -375,8 +380,8 @@ class DataManager:
|
|||||||
profile.appli,
|
profile.appli,
|
||||||
self._notify_unsubscribe_delay.total_seconds(),
|
self._notify_unsubscribe_delay.total_seconds(),
|
||||||
)
|
)
|
||||||
# Quick calls to Withings can result in the service returning errors. Give them
|
# Quick calls to Withings can result in the service returning errors.
|
||||||
# some time to cool down.
|
# Give them some time to cool down.
|
||||||
await asyncio.sleep(self._notify_subscribe_delay.total_seconds())
|
await asyncio.sleep(self._notify_subscribe_delay.total_seconds())
|
||||||
await self._hass.async_add_executor_job(
|
await self._hass.async_add_executor_job(
|
||||||
self._api.notify_revoke, profile.callbackurl, profile.appli
|
self._api.notify_revoke, profile.callbackurl, profile.appli
|
||||||
|
@ -50,11 +50,12 @@ class XboxBaseSensorEntity(CoordinatorEntity[XboxUpdateCoordinator]):
|
|||||||
if not self.data:
|
if not self.data:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Xbox sometimes returns a domain that uses a wrong certificate which creates issues
|
# Xbox sometimes returns a domain that uses a wrong certificate which
|
||||||
# with loading the image.
|
# creates issues with loading the image.
|
||||||
# The correct domain is images-eds-ssl which can just be replaced
|
# The correct domain is images-eds-ssl which can just be replaced
|
||||||
# to point to the correct image, with the correct domain and certificate.
|
# to point to the correct image, with the correct domain and certificate.
|
||||||
# We need to also remove the 'mode=Padding' query because with it, it results in an error 400.
|
# We need to also remove the 'mode=Padding' query because with it,
|
||||||
|
# it results in an error 400.
|
||||||
url = URL(self.data.display_pic)
|
url = URL(self.data.display_pic)
|
||||||
if url.host == "images-eds.xboxlive.com":
|
if url.host == "images-eds.xboxlive.com":
|
||||||
url = url.with_host("images-eds-ssl.xboxlive.com").with_scheme("https")
|
url = url.with_host("images-eds-ssl.xboxlive.com").with_scheme("https")
|
||||||
|
@ -54,7 +54,10 @@ class XiaomiConfigFlow(ConfigFlow, domain=DOMAIN):
|
|||||||
async def _async_wait_for_full_advertisement(
|
async def _async_wait_for_full_advertisement(
|
||||||
self, discovery_info: BluetoothServiceInfo, device: DeviceData
|
self, discovery_info: BluetoothServiceInfo, device: DeviceData
|
||||||
) -> BluetoothServiceInfo:
|
) -> BluetoothServiceInfo:
|
||||||
"""Sometimes first advertisement we receive is blank or incomplete. Wait until we get a useful one."""
|
"""Sometimes first advertisement we receive is blank or incomplete.
|
||||||
|
|
||||||
|
Wait until we get a useful one.
|
||||||
|
"""
|
||||||
if not device.pending:
|
if not device.pending:
|
||||||
return discovery_info
|
return discovery_info
|
||||||
|
|
||||||
@ -87,15 +90,16 @@ class XiaomiConfigFlow(ConfigFlow, domain=DOMAIN):
|
|||||||
|
|
||||||
self._discovered_device = device
|
self._discovered_device = device
|
||||||
|
|
||||||
# Wait until we have received enough information about this device to detect its encryption type
|
# Wait until we have received enough information about
|
||||||
|
# this device to detect its encryption type
|
||||||
try:
|
try:
|
||||||
self._discovery_info = await self._async_wait_for_full_advertisement(
|
self._discovery_info = await self._async_wait_for_full_advertisement(
|
||||||
discovery_info, device
|
discovery_info, device
|
||||||
)
|
)
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
# This device might have a really long advertising interval
|
# This device might have a really long advertising interval
|
||||||
# So create a config entry for it, and if we discover it has encryption later
|
# So create a config entry for it, and if we discover it has
|
||||||
# We can do a reauth
|
# encryption later, we can do a reauth
|
||||||
return await self.async_step_confirm_slow()
|
return await self.async_step_confirm_slow()
|
||||||
|
|
||||||
if device.encryption_scheme == EncryptionScheme.MIBEACON_LEGACY:
|
if device.encryption_scheme == EncryptionScheme.MIBEACON_LEGACY:
|
||||||
@ -210,15 +214,16 @@ class XiaomiConfigFlow(ConfigFlow, domain=DOMAIN):
|
|||||||
|
|
||||||
self.context["title_placeholders"] = {"name": discovery.title}
|
self.context["title_placeholders"] = {"name": discovery.title}
|
||||||
|
|
||||||
# Wait until we have received enough information about this device to detect its encryption type
|
# Wait until we have received enough information about
|
||||||
|
# this device to detect its encryption type
|
||||||
try:
|
try:
|
||||||
self._discovery_info = await self._async_wait_for_full_advertisement(
|
self._discovery_info = await self._async_wait_for_full_advertisement(
|
||||||
discovery.discovery_info, discovery.device
|
discovery.discovery_info, discovery.device
|
||||||
)
|
)
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
# This device might have a really long advertising interval
|
# This device might have a really long advertising interval
|
||||||
# So create a config entry for it, and if we discover it has encryption later
|
# So create a config entry for it, and if we discover
|
||||||
# We can do a reauth
|
# it has encryption later, we can do a reauth
|
||||||
return await self.async_step_confirm_slow()
|
return await self.async_step_confirm_slow()
|
||||||
|
|
||||||
self._discovered_device = discovery.device
|
self._discovered_device = discovery.device
|
||||||
|
@ -351,12 +351,14 @@ async def async_create_miio_device_and_coordinator(
|
|||||||
return
|
return
|
||||||
|
|
||||||
if migrate:
|
if migrate:
|
||||||
# Removing fan platform entity for humidifiers and migrate the name to the config entry for migration
|
# Removing fan platform entity for humidifiers and migrate the name
|
||||||
|
# to the config entry for migration
|
||||||
entity_registry = er.async_get(hass)
|
entity_registry = er.async_get(hass)
|
||||||
assert entry.unique_id
|
assert entry.unique_id
|
||||||
entity_id = entity_registry.async_get_entity_id("fan", DOMAIN, entry.unique_id)
|
entity_id = entity_registry.async_get_entity_id("fan", DOMAIN, entry.unique_id)
|
||||||
if entity_id:
|
if entity_id:
|
||||||
# This check is entities that have a platform migration only and should be removed in the future
|
# This check is entities that have a platform migration only
|
||||||
|
# and should be removed in the future
|
||||||
if (entity := entity_registry.async_get(entity_id)) and (
|
if (entity := entity_registry.async_get(entity_id)) and (
|
||||||
migrate_entity_name := entity.name
|
migrate_entity_name := entity.name
|
||||||
):
|
):
|
||||||
|
@ -769,10 +769,13 @@ class MusicCastMediaPlayer(MusicCastDeviceEntity, MediaPlayerEntity):
|
|||||||
async def async_client_join(self, group_id, server) -> bool:
|
async def async_client_join(self, group_id, server) -> bool:
|
||||||
"""Let the client join a group.
|
"""Let the client join a group.
|
||||||
|
|
||||||
If this client is a server, the server will stop distributing. If the client is part of a different group,
|
If this client is a server, the server will stop distributing.
|
||||||
it will leave that group first. Returns True, if the server has to add the client on his side.
|
If the client is part of a different group,
|
||||||
|
it will leave that group first. Returns True, if the server has to
|
||||||
|
add the client on his side.
|
||||||
"""
|
"""
|
||||||
# If we should join the group, which is served by the main zone, we can simply select main_sync as input.
|
# If we should join the group, which is served by the main zone,
|
||||||
|
# we can simply select main_sync as input.
|
||||||
_LOGGER.debug("%s called service client join", self.entity_id)
|
_LOGGER.debug("%s called service client join", self.entity_id)
|
||||||
if self.state == MediaPlayerState.OFF:
|
if self.state == MediaPlayerState.OFF:
|
||||||
await self.async_turn_on()
|
await self.async_turn_on()
|
||||||
|
@ -24,7 +24,8 @@ class YoLinkCoordinator(DataUpdateCoordinator[dict]):
|
|||||||
"""Init YoLink DataUpdateCoordinator.
|
"""Init YoLink DataUpdateCoordinator.
|
||||||
|
|
||||||
fetch state every 30 minutes base on yolink device heartbeat interval
|
fetch state every 30 minutes base on yolink device heartbeat interval
|
||||||
data is None before the first successful update, but we need to use data at first update
|
data is None before the first successful update, but we need to use
|
||||||
|
data at first update
|
||||||
"""
|
"""
|
||||||
super().__init__(
|
super().__init__(
|
||||||
hass, _LOGGER, name=DOMAIN, update_interval=timedelta(minutes=30)
|
hass, _LOGGER, name=DOMAIN, update_interval=timedelta(minutes=30)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
"""Config Flow for zamg the Austrian "Zentralanstalt für Meteorologie und Geodynamik" integration."""
|
"""Config Flow for the zamg integration."""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
"""Constants for zamg the Austrian "Zentralanstalt für Meteorologie und Geodynamik" integration."""
|
"""Constants for the zamg integration."""
|
||||||
|
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
import logging
|
import logging
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
"""Sensor for zamg the Austrian "Zentralanstalt für Meteorologie und Geodynamik" integration."""
|
"""Sensor for the zamg integration."""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from collections.abc import Mapping
|
from collections.abc import Mapping
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
"""Sensor for zamg the Austrian "Zentralanstalt für Meteorologie und Geodynamik" integration."""
|
"""Sensor for the zamg integration."""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from homeassistant.components.weather import WeatherEntity
|
from homeassistant.components.weather import WeatherEntity
|
||||||
|
@ -99,7 +99,8 @@ async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> b
|
|||||||
if config.get(CONF_ENABLE_QUIRKS, True):
|
if config.get(CONF_ENABLE_QUIRKS, True):
|
||||||
setup_quirks(config)
|
setup_quirks(config)
|
||||||
|
|
||||||
# temporary code to remove the ZHA storage file from disk. this will be removed in 2022.10.0
|
# temporary code to remove the ZHA storage file from disk.
|
||||||
|
# this will be removed in 2022.10.0
|
||||||
storage_path = hass.config.path(STORAGE_DIR, "zha.storage")
|
storage_path = hass.config.path(STORAGE_DIR, "zha.storage")
|
||||||
if os.path.isfile(storage_path):
|
if os.path.isfile(storage_path):
|
||||||
_LOGGER.debug("removing ZHA storage file")
|
_LOGGER.debug("removing ZHA storage file")
|
||||||
|
@ -276,9 +276,9 @@ class IasWd(ZigbeeChannel):
|
|||||||
):
|
):
|
||||||
"""Issue a squawk command.
|
"""Issue a squawk command.
|
||||||
|
|
||||||
This command uses the WD capabilities to emit a quick audible/visible pulse called a
|
This command uses the WD capabilities to emit a quick audible/visible
|
||||||
"squawk". The squawk command has no effect if the WD is currently active
|
pulse called a "squawk". The squawk command has no effect if the WD
|
||||||
(warning in progress).
|
is currently active (warning in progress).
|
||||||
"""
|
"""
|
||||||
value = 0
|
value = 0
|
||||||
value = IasWd.set_bit(value, 0, squawk_level, 0)
|
value = IasWd.set_bit(value, 0, squawk_level, 0)
|
||||||
@ -304,16 +304,18 @@ class IasWd(ZigbeeChannel):
|
|||||||
):
|
):
|
||||||
"""Issue a start warning command.
|
"""Issue a start warning command.
|
||||||
|
|
||||||
This command starts the WD operation. The WD alerts the surrounding area by audible
|
This command starts the WD operation. The WD alerts the surrounding area
|
||||||
(siren) and visual (strobe) signals.
|
by audible (siren) and visual (strobe) signals.
|
||||||
|
|
||||||
strobe_duty_cycle indicates the length of the flash cycle. This provides a means
|
strobe_duty_cycle indicates the length of the flash cycle. This provides a means
|
||||||
of varying the flash duration for different alarm types (e.g., fire, police, burglar).
|
of varying the flash duration for different alarm types (e.g., fire, police,
|
||||||
Valid range is 0-100 in increments of 10. All other values SHALL be rounded to the
|
burglar). Valid range is 0-100 in increments of 10. All other values SHALL
|
||||||
nearest valid value. Strobe SHALL calculate duty cycle over a duration of one second.
|
be rounded to the nearest valid value. Strobe SHALL calculate duty cycle over
|
||||||
The ON state SHALL precede the OFF state. For example, if Strobe Duty Cycle Field specifies
|
a duration of one second.
|
||||||
“40,” then the strobe SHALL flash ON for 4/10ths of a second and then turn OFF for
|
|
||||||
6/10ths of a second.
|
The ON state SHALL precede the OFF state. For example, if Strobe Duty Cycle
|
||||||
|
Field specifies “40,” then the strobe SHALL flash ON for 4/10ths of a second
|
||||||
|
and then turn OFF for 6/10ths of a second.
|
||||||
"""
|
"""
|
||||||
value = 0
|
value = 0
|
||||||
value = IasWd.set_bit(value, 0, siren_level, 0)
|
value = IasWd.set_bit(value, 0, siren_level, 0)
|
||||||
|
@ -210,7 +210,8 @@ class ProbeEndpoint:
|
|||||||
for component, ent_n_chan_list in matches.items():
|
for component, ent_n_chan_list in matches.items():
|
||||||
for entity_and_channel in ent_n_chan_list:
|
for entity_and_channel in ent_n_chan_list:
|
||||||
if component == cmpt_by_dev_type:
|
if component == cmpt_by_dev_type:
|
||||||
# for well known device types, like thermostats we'll take only 1st class
|
# for well known device types, like thermostats
|
||||||
|
# we'll take only 1st class
|
||||||
channel_pool.async_new_entity(
|
channel_pool.async_new_entity(
|
||||||
component,
|
component,
|
||||||
entity_and_channel.entity_class,
|
entity_and_channel.entity_class,
|
||||||
|
@ -230,7 +230,8 @@ class ZHAGateway:
|
|||||||
for group_id in self.application_controller.groups:
|
for group_id in self.application_controller.groups:
|
||||||
group = self.application_controller.groups[group_id]
|
group = self.application_controller.groups[group_id]
|
||||||
zha_group = self._async_get_or_create_group(group)
|
zha_group = self._async_get_or_create_group(group)
|
||||||
# we can do this here because the entities are in the entity registry tied to the devices
|
# we can do this here because the entities are in the
|
||||||
|
# entity registry tied to the devices
|
||||||
discovery.GROUP_PROBE.discover_group_entities(zha_group)
|
discovery.GROUP_PROBE.discover_group_entities(zha_group)
|
||||||
|
|
||||||
async def async_initialize_devices_and_entities(self) -> None:
|
async def async_initialize_devices_and_entities(self) -> None:
|
||||||
@ -325,7 +326,8 @@ class ZHAGateway:
|
|||||||
self._hass, f"{SIGNAL_GROUP_MEMBERSHIP_CHANGE}_0x{zigpy_group.group_id:04x}"
|
self._hass, f"{SIGNAL_GROUP_MEMBERSHIP_CHANGE}_0x{zigpy_group.group_id:04x}"
|
||||||
)
|
)
|
||||||
if len(zha_group.members) == 2:
|
if len(zha_group.members) == 2:
|
||||||
# we need to do this because there wasn't already a group entity to remove and re-add
|
# we need to do this because there wasn't already
|
||||||
|
# a group entity to remove and re-add
|
||||||
discovery.GROUP_PROBE.discover_group_entities(zha_group)
|
discovery.GROUP_PROBE.discover_group_entities(zha_group)
|
||||||
|
|
||||||
def group_added(self, zigpy_group: zigpy.group.Group) -> None:
|
def group_added(self, zigpy_group: zigpy.group.Group) -> None:
|
||||||
@ -419,7 +421,9 @@ class ZHAGateway:
|
|||||||
if entity.zha_device.ieee in self.device_registry:
|
if entity.zha_device.ieee in self.device_registry:
|
||||||
entity_refs = self.device_registry.get(entity.zha_device.ieee)
|
entity_refs = self.device_registry.get(entity.zha_device.ieee)
|
||||||
self.device_registry[entity.zha_device.ieee] = [
|
self.device_registry[entity.zha_device.ieee] = [
|
||||||
e for e in entity_refs if e.reference_id != entity.entity_id # type: ignore[union-attr]
|
e
|
||||||
|
for e in entity_refs # type: ignore[union-attr]
|
||||||
|
if e.reference_id != entity.entity_id
|
||||||
]
|
]
|
||||||
|
|
||||||
def _cleanup_group_entity_registry_entries(
|
def _cleanup_group_entity_registry_entries(
|
||||||
@ -440,7 +444,8 @@ class ZHAGateway:
|
|||||||
include_disabled_entities=True,
|
include_disabled_entities=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# then we get the entity entries for this specific group by getting the entries that match
|
# then we get the entity entries for this specific group
|
||||||
|
# by getting the entries that match
|
||||||
entries_to_remove = [
|
entries_to_remove = [
|
||||||
entry
|
entry
|
||||||
for entry in all_group_entity_entries
|
for entry in all_group_entity_entries
|
||||||
@ -619,7 +624,8 @@ class ZHAGateway:
|
|||||||
zha_device.nwk,
|
zha_device.nwk,
|
||||||
zha_device.ieee,
|
zha_device.ieee,
|
||||||
)
|
)
|
||||||
# we don't have to do this on a nwk swap but we don't have a way to tell currently
|
# we don't have to do this on a nwk swap
|
||||||
|
# but we don't have a way to tell currently
|
||||||
await zha_device.async_configure()
|
await zha_device.async_configure()
|
||||||
device_info = zha_device.device_info
|
device_info = zha_device.device_info
|
||||||
device_info[DEVICE_PAIRING_STATUS] = DevicePairingStatus.CONFIGURED.name
|
device_info[DEVICE_PAIRING_STATUS] = DevicePairingStatus.CONFIGURED.name
|
||||||
|
@ -93,7 +93,9 @@ DEVICE_CLASS = {
|
|||||||
zigpy.profiles.zha.DeviceType.ON_OFF_PLUG_IN_UNIT: Platform.SWITCH,
|
zigpy.profiles.zha.DeviceType.ON_OFF_PLUG_IN_UNIT: Platform.SWITCH,
|
||||||
zigpy.profiles.zha.DeviceType.SHADE: Platform.COVER,
|
zigpy.profiles.zha.DeviceType.SHADE: Platform.COVER,
|
||||||
zigpy.profiles.zha.DeviceType.SMART_PLUG: Platform.SWITCH,
|
zigpy.profiles.zha.DeviceType.SMART_PLUG: Platform.SWITCH,
|
||||||
zigpy.profiles.zha.DeviceType.IAS_ANCILLARY_CONTROL: Platform.ALARM_CONTROL_PANEL,
|
zigpy.profiles.zha.DeviceType.IAS_ANCILLARY_CONTROL: (
|
||||||
|
Platform.ALARM_CONTROL_PANEL
|
||||||
|
),
|
||||||
zigpy.profiles.zha.DeviceType.IAS_WARNING_DEVICE: Platform.SIREN,
|
zigpy.profiles.zha.DeviceType.IAS_WARNING_DEVICE: Platform.SIREN,
|
||||||
},
|
},
|
||||||
zigpy.profiles.zll.PROFILE_ID: {
|
zigpy.profiles.zll.PROFILE_ID: {
|
||||||
|
@ -180,7 +180,10 @@ class BaseLight(LogMixin, light.LightEntity):
|
|||||||
transition * 10
|
transition * 10
|
||||||
if transition is not None
|
if transition is not None
|
||||||
else self._zha_config_transition * 10
|
else self._zha_config_transition * 10
|
||||||
) or self._DEFAULT_MIN_TRANSITION_TIME # if 0 is passed in some devices still need the minimum default
|
) or (
|
||||||
|
# if 0 is passed in some devices still need the minimum default
|
||||||
|
self._DEFAULT_MIN_TRANSITION_TIME
|
||||||
|
)
|
||||||
brightness = kwargs.get(light.ATTR_BRIGHTNESS)
|
brightness = kwargs.get(light.ATTR_BRIGHTNESS)
|
||||||
effect = kwargs.get(light.ATTR_EFFECT)
|
effect = kwargs.get(light.ATTR_EFFECT)
|
||||||
flash = kwargs.get(light.ATTR_FLASH)
|
flash = kwargs.get(light.ATTR_FLASH)
|
||||||
@ -218,20 +221,26 @@ class BaseLight(LogMixin, light.LightEntity):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# If we need to pause attribute report parsing, we'll do so here.
|
# If we need to pause attribute report parsing, we'll do so here.
|
||||||
# After successful calls, we later start a timer to unset the flag after transition_time.
|
# After successful calls, we later start a timer to unset the flag after
|
||||||
# On an error on the first move to level call, we unset the flag immediately if no previous timer is running.
|
# transition_time.
|
||||||
# On an error on subsequent calls, we start the transition timer, as a brightness call might have come through.
|
# - On an error on the first move to level call, we unset the flag immediately
|
||||||
|
# if no previous timer is running.
|
||||||
|
# - On an error on subsequent calls, we start the transition timer,
|
||||||
|
# as a brightness call might have come through.
|
||||||
if set_transition_flag:
|
if set_transition_flag:
|
||||||
self.async_transition_set_flag()
|
self.async_transition_set_flag()
|
||||||
|
|
||||||
# If the light is currently off but a turn_on call with a color/temperature is sent,
|
# If the light is currently off but a turn_on call with a color/temperature is
|
||||||
# the light needs to be turned on first at a low brightness level where the light is immediately transitioned
|
# sent, the light needs to be turned on first at a low brightness level where
|
||||||
# to the correct color. Afterwards, the transition is only from the low brightness to the new brightness.
|
# the light is immediately transitioned to the correct color. Afterwards, the
|
||||||
# Otherwise, the transition is from the color the light had before being turned on to the new color.
|
# transition is only from the low brightness to the new brightness.
|
||||||
# This can look especially bad with transitions longer than a second. We do not want to do this for
|
# Otherwise, the transition is from the color the light had before being turned
|
||||||
# devices that need to be forced to use the on command because we would end up with 4 commands sent:
|
# on to the new color. This can look especially bad with transitions longer than
|
||||||
# move to level, on, color, move to level... We also will not set this if the bulb is already in the
|
# a second. We do not want to do this for devices that need to be forced to use
|
||||||
# desired color mode with the desired color or color temperature.
|
# the on command because we would end up with 4 commands sent:
|
||||||
|
# move to level, on, color, move to level... We also will not set this
|
||||||
|
# if the bulb is already in the desired color mode with the desired color
|
||||||
|
# or color temperature.
|
||||||
new_color_provided_while_off = (
|
new_color_provided_while_off = (
|
||||||
self._zha_config_enhanced_light_transition
|
self._zha_config_enhanced_light_transition
|
||||||
and not self._FORCE_ON
|
and not self._FORCE_ON
|
||||||
@ -278,7 +287,8 @@ class BaseLight(LogMixin, light.LightEntity):
|
|||||||
t_log = {}
|
t_log = {}
|
||||||
|
|
||||||
if new_color_provided_while_off:
|
if new_color_provided_while_off:
|
||||||
# If the light is currently off, we first need to turn it on at a low brightness level with no transition.
|
# If the light is currently off, we first need to turn it on at a low
|
||||||
|
# brightness level with no transition.
|
||||||
# After that, we set it to the desired color/temperature with no transition.
|
# After that, we set it to the desired color/temperature with no transition.
|
||||||
result = await self._level_channel.move_to_level_with_on_off(
|
result = await self._level_channel.move_to_level_with_on_off(
|
||||||
level=DEFAULT_MIN_BRIGHTNESS,
|
level=DEFAULT_MIN_BRIGHTNESS,
|
||||||
@ -286,13 +296,15 @@ class BaseLight(LogMixin, light.LightEntity):
|
|||||||
)
|
)
|
||||||
t_log["move_to_level_with_on_off"] = result
|
t_log["move_to_level_with_on_off"] = result
|
||||||
if isinstance(result, Exception) or result[1] is not Status.SUCCESS:
|
if isinstance(result, Exception) or result[1] is not Status.SUCCESS:
|
||||||
# First 'move to level' call failed, so if the transitioning delay isn't running from a previous call,
|
# First 'move to level' call failed, so if the transitioning delay
|
||||||
|
# isn't running from a previous call,
|
||||||
# the flag can be unset immediately
|
# the flag can be unset immediately
|
||||||
if set_transition_flag and not self._transition_listener:
|
if set_transition_flag and not self._transition_listener:
|
||||||
self.async_transition_complete()
|
self.async_transition_complete()
|
||||||
self.debug("turned on: %s", t_log)
|
self.debug("turned on: %s", t_log)
|
||||||
return
|
return
|
||||||
# Currently only setting it to "on", as the correct level state will be set at the second move_to_level call
|
# Currently only setting it to "on", as the correct level state will
|
||||||
|
# be set at the second move_to_level call
|
||||||
self._attr_state = True
|
self._attr_state = True
|
||||||
|
|
||||||
if execute_if_off_supported:
|
if execute_if_off_supported:
|
||||||
@ -306,7 +318,8 @@ class BaseLight(LogMixin, light.LightEntity):
|
|||||||
t_log,
|
t_log,
|
||||||
):
|
):
|
||||||
# Color calls before on/level calls failed,
|
# Color calls before on/level calls failed,
|
||||||
# so if the transitioning delay isn't running from a previous call, the flag can be unset immediately
|
# so if the transitioning delay isn't running from a previous call,
|
||||||
|
# the flag can be unset immediately
|
||||||
if set_transition_flag and not self._transition_listener:
|
if set_transition_flag and not self._transition_listener:
|
||||||
self.async_transition_complete()
|
self.async_transition_complete()
|
||||||
self.debug("turned on: %s", t_log)
|
self.debug("turned on: %s", t_log)
|
||||||
@ -323,8 +336,8 @@ class BaseLight(LogMixin, light.LightEntity):
|
|||||||
)
|
)
|
||||||
t_log["move_to_level_with_on_off"] = result
|
t_log["move_to_level_with_on_off"] = result
|
||||||
if isinstance(result, Exception) or result[1] is not Status.SUCCESS:
|
if isinstance(result, Exception) or result[1] is not Status.SUCCESS:
|
||||||
# First 'move to level' call failed, so if the transitioning delay isn't running from a previous call,
|
# First 'move to level' call failed, so if the transitioning delay
|
||||||
# the flag can be unset immediately
|
# isn't running from a previous call, the flag can be unset immediately
|
||||||
if set_transition_flag and not self._transition_listener:
|
if set_transition_flag and not self._transition_listener:
|
||||||
self.async_transition_complete()
|
self.async_transition_complete()
|
||||||
self.debug("turned on: %s", t_log)
|
self.debug("turned on: %s", t_log)
|
||||||
@ -339,12 +352,14 @@ class BaseLight(LogMixin, light.LightEntity):
|
|||||||
or (self._FORCE_ON and brightness)
|
or (self._FORCE_ON and brightness)
|
||||||
):
|
):
|
||||||
# since some lights don't always turn on with move_to_level_with_on_off,
|
# since some lights don't always turn on with move_to_level_with_on_off,
|
||||||
# we should call the on command on the on_off cluster if brightness is not 0.
|
# we should call the on command on the on_off cluster
|
||||||
|
# if brightness is not 0.
|
||||||
result = await self._on_off_channel.on()
|
result = await self._on_off_channel.on()
|
||||||
t_log["on_off"] = result
|
t_log["on_off"] = result
|
||||||
if isinstance(result, Exception) or result[1] is not Status.SUCCESS:
|
if isinstance(result, Exception) or result[1] is not Status.SUCCESS:
|
||||||
# 'On' call failed, but as brightness may still transition (for FORCE_ON lights),
|
# 'On' call failed, but as brightness may still transition
|
||||||
# we start the timer to unset the flag after the transition_time if necessary.
|
# (for FORCE_ON lights), we start the timer to unset the flag after
|
||||||
|
# the transition_time if necessary.
|
||||||
self.async_transition_start_timer(transition_time)
|
self.async_transition_start_timer(transition_time)
|
||||||
self.debug("turned on: %s", t_log)
|
self.debug("turned on: %s", t_log)
|
||||||
return
|
return
|
||||||
@ -360,13 +375,15 @@ class BaseLight(LogMixin, light.LightEntity):
|
|||||||
new_color_provided_while_off,
|
new_color_provided_while_off,
|
||||||
t_log,
|
t_log,
|
||||||
):
|
):
|
||||||
# Color calls failed, but as brightness may still transition, we start the timer to unset the flag
|
# Color calls failed, but as brightness may still transition,
|
||||||
|
# we start the timer to unset the flag
|
||||||
self.async_transition_start_timer(transition_time)
|
self.async_transition_start_timer(transition_time)
|
||||||
self.debug("turned on: %s", t_log)
|
self.debug("turned on: %s", t_log)
|
||||||
return
|
return
|
||||||
|
|
||||||
if new_color_provided_while_off:
|
if new_color_provided_while_off:
|
||||||
# The light is has the correct color, so we can now transition it to the correct brightness level.
|
# The light is has the correct color, so we can now transition
|
||||||
|
# it to the correct brightness level.
|
||||||
result = await self._level_channel.move_to_level(
|
result = await self._level_channel.move_to_level(
|
||||||
level=level, transition_time=duration
|
level=level, transition_time=duration
|
||||||
)
|
)
|
||||||
@ -378,8 +395,9 @@ class BaseLight(LogMixin, light.LightEntity):
|
|||||||
if level:
|
if level:
|
||||||
self._attr_brightness = level
|
self._attr_brightness = level
|
||||||
|
|
||||||
# Our light is guaranteed to have just started the transitioning process if necessary,
|
# Our light is guaranteed to have just started the transitioning process
|
||||||
# so we start the delay for the transition (to stop parsing attribute reports after the completed transition).
|
# if necessary, so we start the delay for the transition (to stop parsing
|
||||||
|
# attribute reports after the completed transition).
|
||||||
self.async_transition_start_timer(transition_time)
|
self.async_transition_start_timer(transition_time)
|
||||||
|
|
||||||
if effect == light.EFFECT_COLORLOOP:
|
if effect == light.EFFECT_COLORLOOP:
|
||||||
@ -437,7 +455,8 @@ class BaseLight(LogMixin, light.LightEntity):
|
|||||||
if self._zha_config_enable_light_transitioning_flag:
|
if self._zha_config_enable_light_transitioning_flag:
|
||||||
self.async_transition_set_flag()
|
self.async_transition_set_flag()
|
||||||
|
|
||||||
# is not none looks odd here, but it will override built in bulb transition times if we pass 0 in here
|
# is not none looks odd here, but it will override built in bulb
|
||||||
|
# transition times if we pass 0 in here
|
||||||
if transition is not None and supports_level:
|
if transition is not None and supports_level:
|
||||||
result = await self._level_channel.move_to_level_with_on_off(
|
result = await self._level_channel.move_to_level_with_on_off(
|
||||||
level=0,
|
level=0,
|
||||||
@ -460,7 +479,8 @@ class BaseLight(LogMixin, light.LightEntity):
|
|||||||
self._off_brightness = self._attr_brightness
|
self._off_brightness = self._attr_brightness
|
||||||
if transition is not None:
|
if transition is not None:
|
||||||
# save for when calling turn_on without a brightness:
|
# save for when calling turn_on without a brightness:
|
||||||
# current_level is set to 1 after transitioning to level 0, needed for correct state with light groups
|
# current_level is set to 1 after transitioning to level 0,
|
||||||
|
# needed for correct state with light groups
|
||||||
self._attr_brightness = 1
|
self._attr_brightness = 1
|
||||||
self._off_with_transition = transition is not None
|
self._off_with_transition = transition is not None
|
||||||
|
|
||||||
@ -560,7 +580,10 @@ class BaseLight(LogMixin, light.LightEntity):
|
|||||||
|
|
||||||
@callback
|
@callback
|
||||||
def async_transition_start_timer(self, transition_time) -> None:
|
def async_transition_start_timer(self, transition_time) -> None:
|
||||||
"""Start a timer to unset _transitioning_individual after transition_time if necessary."""
|
"""Start a timer to unset _transitioning_individual after transition_time.
|
||||||
|
|
||||||
|
If necessary.
|
||||||
|
"""
|
||||||
if not transition_time:
|
if not transition_time:
|
||||||
return
|
return
|
||||||
# For longer transitions, we want to extend the timer a bit more
|
# For longer transitions, we want to extend the timer a bit more
|
||||||
@ -869,8 +892,9 @@ class Light(BaseLight, ZhaEntity):
|
|||||||
attributes, from_cache=False, only_cache=False
|
attributes, from_cache=False, only_cache=False
|
||||||
)
|
)
|
||||||
|
|
||||||
# although rare, a transition might have been started while we were waiting for the polled attributes,
|
# although rare, a transition might have been started while we were waiting
|
||||||
# so abort if we are transitioning, as that state will not be accurate
|
# for the polled attributes, so abort if we are transitioning,
|
||||||
|
# as that state will not be accurate
|
||||||
if self.is_transitioning:
|
if self.is_transitioning:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -957,31 +981,37 @@ class Light(BaseLight, ZhaEntity):
|
|||||||
|
|
||||||
supported_modes = self._attr_supported_color_modes
|
supported_modes = self._attr_supported_color_modes
|
||||||
|
|
||||||
# unset "off brightness" and "off with transition" if group turned on this light
|
# unset "off brightness" and "off with transition"
|
||||||
|
# if group turned on this light
|
||||||
if state and not self._attr_state:
|
if state and not self._attr_state:
|
||||||
self._off_with_transition = False
|
self._off_with_transition = False
|
||||||
self._off_brightness = None
|
self._off_brightness = None
|
||||||
|
|
||||||
# set "off brightness" and "off with transition" if group turned off this light
|
# set "off brightness" and "off with transition"
|
||||||
|
# if group turned off this light, and the light was not already off
|
||||||
|
# (to not override _off_with_transition)
|
||||||
elif (
|
elif (
|
||||||
not state # group is turning the light off
|
not state and self._attr_state and brightness_supported(supported_modes)
|
||||||
and self._attr_state # check the light was not already off (to not override _off_with_transition)
|
|
||||||
and brightness_supported(supported_modes)
|
|
||||||
):
|
):
|
||||||
# use individual brightness, instead of possibly averaged brightness from group
|
# use individual brightness, instead of possibly averaged
|
||||||
|
# brightness from group
|
||||||
self._off_brightness = self._attr_brightness
|
self._off_brightness = self._attr_brightness
|
||||||
self._off_with_transition = update_params["off_with_transition"]
|
self._off_with_transition = update_params["off_with_transition"]
|
||||||
|
|
||||||
# Note: If individual lights have off_with_transition set, but not the group,
|
# Note: If individual lights have off_with_transition set, but not the
|
||||||
# and the group is then turned on without a level, individual lights might fall back to brightness level 1.
|
# group, and the group is then turned on without a level, individual lights
|
||||||
# Since all lights might need different brightness levels to be turned on, we can't use one group call.
|
# might fall back to brightness level 1.
|
||||||
# And making individual calls when turning on a ZHA group would cause a lot of traffic.
|
# Since all lights might need different brightness levels to be turned on,
|
||||||
# In this case, turn_on should either just be called with a level or individual turn_on calls can be used.
|
# we can't use one group call. And making individual calls when turning on
|
||||||
|
# a ZHA group would cause a lot of traffic. In this case,
|
||||||
|
# turn_on should either just be called with a level or individual turn_on
|
||||||
|
# calls can be used.
|
||||||
|
|
||||||
# state is always set (light.turn_on/light.turn_off)
|
# state is always set (light.turn_on/light.turn_off)
|
||||||
self._attr_state = state
|
self._attr_state = state
|
||||||
|
|
||||||
# before assuming a group state attribute, check if the attribute was actually set in that call
|
# before assuming a group state attribute, check if the attribute
|
||||||
|
# was actually set in that call
|
||||||
if brightness is not None and brightness_supported(supported_modes):
|
if brightness is not None and brightness_supported(supported_modes):
|
||||||
self._attr_brightness = brightness
|
self._attr_brightness = brightness
|
||||||
if color_mode is not None and color_mode in supported_modes:
|
if color_mode is not None and color_mode in supported_modes:
|
||||||
@ -1052,7 +1082,8 @@ class LightGroup(BaseLight, ZhaGroupEntity):
|
|||||||
|
|
||||||
self._GROUP_SUPPORTS_EXECUTE_IF_OFF = True # pylint: disable=invalid-name
|
self._GROUP_SUPPORTS_EXECUTE_IF_OFF = True # pylint: disable=invalid-name
|
||||||
# Check all group members to see if they support execute_if_off.
|
# Check all group members to see if they support execute_if_off.
|
||||||
# If at least one member has a color cluster and doesn't support it, it's not used.
|
# If at least one member has a color cluster and doesn't support it,
|
||||||
|
# it's not used.
|
||||||
for member in group.members:
|
for member in group.members:
|
||||||
for pool in member.device.channels.pools:
|
for pool in member.device.channels.pools:
|
||||||
for channel in pool.all_channels.values():
|
for channel in pool.all_channels.values():
|
||||||
@ -1122,8 +1153,9 @@ class LightGroup(BaseLight, ZhaGroupEntity):
|
|||||||
|
|
||||||
async def async_turn_on(self, **kwargs: Any) -> None:
|
async def async_turn_on(self, **kwargs: Any) -> None:
|
||||||
"""Turn the entity on."""
|
"""Turn the entity on."""
|
||||||
# "off with transition" and "off brightness" will get overridden when turning on the group,
|
# "off with transition" and "off brightness" will get overridden when
|
||||||
# but they are needed for setting the assumed member state correctly, so save them here
|
# turning on the group, but they are needed for setting the assumed
|
||||||
|
# member state correctly, so save them here
|
||||||
off_brightness = self._off_brightness if self._off_with_transition else None
|
off_brightness = self._off_brightness if self._off_with_transition else None
|
||||||
await super().async_turn_on(**kwargs)
|
await super().async_turn_on(**kwargs)
|
||||||
if self._zha_config_group_members_assume_state:
|
if self._zha_config_group_members_assume_state:
|
||||||
@ -1251,11 +1283,13 @@ class LightGroup(BaseLight, ZhaGroupEntity):
|
|||||||
"off_with_transition": self._off_with_transition,
|
"off_with_transition": self._off_with_transition,
|
||||||
}
|
}
|
||||||
|
|
||||||
# check if the parameters were actually updated in the service call before updating members
|
# check if the parameters were actually updated
|
||||||
|
# in the service call before updating members
|
||||||
if light.ATTR_BRIGHTNESS in service_kwargs: # or off brightness
|
if light.ATTR_BRIGHTNESS in service_kwargs: # or off brightness
|
||||||
update_params[light.ATTR_BRIGHTNESS] = self._attr_brightness
|
update_params[light.ATTR_BRIGHTNESS] = self._attr_brightness
|
||||||
elif off_brightness is not None:
|
elif off_brightness is not None:
|
||||||
# if we turn on the group light with "off brightness", pass that to the members
|
# if we turn on the group light with "off brightness",
|
||||||
|
# pass that to the members
|
||||||
update_params[light.ATTR_BRIGHTNESS] = off_brightness
|
update_params[light.ATTR_BRIGHTNESS] = off_brightness
|
||||||
|
|
||||||
if light.ATTR_COLOR_TEMP in service_kwargs:
|
if light.ATTR_COLOR_TEMP in service_kwargs:
|
||||||
|
@ -484,7 +484,9 @@ async def websocket_network_status(
|
|||||||
"type": controller.controller_type,
|
"type": controller.controller_type,
|
||||||
"own_node_id": controller.own_node_id,
|
"own_node_id": controller.own_node_id,
|
||||||
"is_primary": controller.is_primary,
|
"is_primary": controller.is_primary,
|
||||||
"is_using_home_id_from_other_network": controller.is_using_home_id_from_other_network,
|
"is_using_home_id_from_other_network": (
|
||||||
|
controller.is_using_home_id_from_other_network
|
||||||
|
),
|
||||||
"is_sis_present": controller.is_SIS_present,
|
"is_sis_present": controller.is_SIS_present,
|
||||||
"was_real_primary": controller.was_real_primary,
|
"was_real_primary": controller.was_real_primary,
|
||||||
"is_suc": controller.is_suc,
|
"is_suc": controller.is_suc,
|
||||||
|
@ -220,7 +220,8 @@ class ZWaveClimate(ZWaveBaseEntity, ClimateEntity):
|
|||||||
all_presets: dict[str, int | None] = {PRESET_NONE: None}
|
all_presets: dict[str, int | None] = {PRESET_NONE: None}
|
||||||
|
|
||||||
# Z-Wave uses one list for both modes and presets.
|
# Z-Wave uses one list for both modes and presets.
|
||||||
# Iterate over all Z-Wave ThermostatModes and extract the hvac modes and presets.
|
# Iterate over all Z-Wave ThermostatModes
|
||||||
|
# and extract the hvac modes and presets.
|
||||||
if self._current_mode is None:
|
if self._current_mode is None:
|
||||||
self._hvac_modes = {
|
self._hvac_modes = {
|
||||||
ZW_HVAC_MODE_MAP[ThermostatMode.HEAT]: ThermostatMode.HEAT
|
ZW_HVAC_MODE_MAP[ThermostatMode.HEAT]: ThermostatMode.HEAT
|
||||||
@ -242,7 +243,8 @@ class ZWaveClimate(ZWaveBaseEntity, ClimateEntity):
|
|||||||
def _current_mode_setpoint_enums(self) -> list[ThermostatSetpointType]:
|
def _current_mode_setpoint_enums(self) -> list[ThermostatSetpointType]:
|
||||||
"""Return the list of enums that are relevant to the current thermostat mode."""
|
"""Return the list of enums that are relevant to the current thermostat mode."""
|
||||||
if self._current_mode is None or self._current_mode.value is None:
|
if self._current_mode is None or self._current_mode.value is None:
|
||||||
# Thermostat(valve) with no support for setting a mode is considered heating-only
|
# Thermostat(valve) with no support for setting a mode
|
||||||
|
# is considered heating-only
|
||||||
return [ThermostatSetpointType.HEATING]
|
return [ThermostatSetpointType.HEATING]
|
||||||
return THERMOSTAT_MODE_SETPOINT_MAP.get(int(self._current_mode.value), [])
|
return THERMOSTAT_MODE_SETPOINT_MAP.get(int(self._current_mode.value), [])
|
||||||
|
|
||||||
@ -261,7 +263,8 @@ class ZWaveClimate(ZWaveBaseEntity, ClimateEntity):
|
|||||||
def hvac_mode(self) -> HVACMode:
|
def hvac_mode(self) -> HVACMode:
|
||||||
"""Return hvac operation ie. heat, cool mode."""
|
"""Return hvac operation ie. heat, cool mode."""
|
||||||
if self._current_mode is None:
|
if self._current_mode is None:
|
||||||
# Thermostat(valve) with no support for setting a mode is considered heating-only
|
# Thermostat(valve) with no support for setting
|
||||||
|
# a mode is considered heating-only
|
||||||
return HVACMode.HEAT
|
return HVACMode.HEAT
|
||||||
if self._current_mode.value is None:
|
if self._current_mode.value is None:
|
||||||
# guard missing value
|
# guard missing value
|
||||||
|
@ -179,13 +179,17 @@ class ZWaveDiscoverySchema:
|
|||||||
device_class_generic: set[str | int] | None = None
|
device_class_generic: set[str | int] | None = None
|
||||||
# [optional] the node's specific device class must match ANY of these values
|
# [optional] the node's specific device class must match ANY of these values
|
||||||
device_class_specific: set[str | int] | None = None
|
device_class_specific: set[str | int] | None = None
|
||||||
# [optional] additional values that ALL need to be present on the node for this scheme to pass
|
# [optional] additional values that ALL need to be present
|
||||||
|
# on the node for this scheme to pass
|
||||||
required_values: list[ZWaveValueDiscoverySchema] | None = None
|
required_values: list[ZWaveValueDiscoverySchema] | None = None
|
||||||
# [optional] additional values that MAY NOT be present on the node for this scheme to pass
|
# [optional] additional values that MAY NOT be present
|
||||||
|
# on the node for this scheme to pass
|
||||||
absent_values: list[ZWaveValueDiscoverySchema] | None = None
|
absent_values: list[ZWaveValueDiscoverySchema] | None = None
|
||||||
# [optional] bool to specify if this primary value may be discovered by multiple platforms
|
# [optional] bool to specify if this primary value may be discovered
|
||||||
|
# by multiple platforms
|
||||||
allow_multi: bool = False
|
allow_multi: bool = False
|
||||||
# [optional] bool to specify whether state is assumed and events should be fired on value update
|
# [optional] bool to specify whether state is assumed
|
||||||
|
# and events should be fired on value update
|
||||||
assumed_state: bool = False
|
assumed_state: bool = False
|
||||||
# [optional] bool to specify whether entity should be enabled by default
|
# [optional] bool to specify whether entity should be enabled by default
|
||||||
entity_registry_enabled_default: bool = True
|
entity_registry_enabled_default: bool = True
|
||||||
@ -977,7 +981,8 @@ def async_discover_single_value(
|
|||||||
)
|
)
|
||||||
|
|
||||||
if not schema.allow_multi:
|
if not schema.allow_multi:
|
||||||
# return early since this value may not be discovered by other schemas/platforms
|
# return early since this value may not be discovered
|
||||||
|
# by other schemas/platforms
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
@ -444,7 +444,8 @@ class ZwaveLight(ZWaveBaseEntity, LightEntity):
|
|||||||
class ZwaveBlackIsOffLight(ZwaveLight):
|
class ZwaveBlackIsOffLight(ZwaveLight):
|
||||||
"""Representation of a Z-Wave light where setting the color to black turns it off.
|
"""Representation of a Z-Wave light where setting the color to black turns it off.
|
||||||
|
|
||||||
Currently only supports lights with RGB, no color temperature, and no white channels.
|
Currently only supports lights with RGB, no color temperature,
|
||||||
|
and no white channels.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
|
@ -36,8 +36,8 @@ class ValueID:
|
|||||||
def from_unique_id(unique_id: str) -> ValueID:
|
def from_unique_id(unique_id: str) -> ValueID:
|
||||||
"""Get a ValueID from a unique ID.
|
"""Get a ValueID from a unique ID.
|
||||||
|
|
||||||
This also works for Notification CC Binary Sensors which have their own unique ID
|
This also works for Notification CC Binary Sensors which have their
|
||||||
format.
|
own unique ID format.
|
||||||
"""
|
"""
|
||||||
return ValueID.from_string_id(unique_id.split(".")[1])
|
return ValueID.from_string_id(unique_id.split(".")[1])
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
"""Methods and classes related to executing Z-Wave commands and publishing these to hass."""
|
"""Methods and classes related to executing Z-Wave commands."""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
@ -100,7 +100,10 @@ def raise_exceptions_from_results(
|
|||||||
|
|
||||||
|
|
||||||
class ZWaveServices:
|
class ZWaveServices:
|
||||||
"""Class that holds our services (Zwave Commands) that should be published to hass."""
|
"""Class that holds our services (Zwave Commands).
|
||||||
|
|
||||||
|
Services that should be published to hass.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@ -157,8 +160,8 @@ class ZWaveServices:
|
|||||||
if first_node and not all(node.client.driver is not None for node in nodes):
|
if first_node and not all(node.client.driver is not None for node in nodes):
|
||||||
raise vol.Invalid(f"Driver not ready for all nodes: {nodes}")
|
raise vol.Invalid(f"Driver not ready for all nodes: {nodes}")
|
||||||
|
|
||||||
# If any nodes don't have matching home IDs, we can't run the command because
|
# If any nodes don't have matching home IDs, we can't run the command
|
||||||
# we can't multicast across multiple networks
|
# because we can't multicast across multiple networks
|
||||||
if (
|
if (
|
||||||
first_node
|
first_node
|
||||||
and first_node.client.driver # We checked the driver was ready above.
|
and first_node.client.driver # We checked the driver was ready above.
|
||||||
|
@ -86,7 +86,7 @@ class ZWaveSwitch(ZWaveBaseEntity, SwitchEntity):
|
|||||||
|
|
||||||
|
|
||||||
class ZWaveBarrierEventSignalingSwitch(ZWaveBaseEntity, SwitchEntity):
|
class ZWaveBarrierEventSignalingSwitch(ZWaveBaseEntity, SwitchEntity):
|
||||||
"""This switch is used to turn on or off a barrier device's event signaling subsystem."""
|
"""Switch is used to turn on/off a barrier device's event signaling subsystem."""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
@ -232,7 +232,8 @@ class ZWaveNodeFirmwareUpdate(UpdateEntity):
|
|||||||
await self._finished_event.wait()
|
await self._finished_event.wait()
|
||||||
assert self._result is not None
|
assert self._result is not None
|
||||||
|
|
||||||
# If the update was not successful, we should throw an error to let the user know
|
# If the update was not successful, we should throw an error
|
||||||
|
# to let the user know
|
||||||
if not self._result.success:
|
if not self._result.success:
|
||||||
error_msg = self._result.status.name.replace("_", " ").title()
|
error_msg = self._result.status.name.replace("_", " ").title()
|
||||||
self._unsub_firmware_events_and_reset_progress()
|
self._unsub_firmware_events_and_reset_progress()
|
||||||
|
@ -33,7 +33,10 @@ class ZWaveMeConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
|
|||||||
errors = {}
|
errors = {}
|
||||||
placeholders = {
|
placeholders = {
|
||||||
"local_token": "/112f7a4a-0051-cc2b-3b61-1898181b9950",
|
"local_token": "/112f7a4a-0051-cc2b-3b61-1898181b9950",
|
||||||
"find_token": "0481effe8a5c6f757b455babb678dc0e764feae279/112f7a4a-0051-cc2b-3b61-1898181b9950",
|
"find_token": (
|
||||||
|
"0481effe8a5c6f757b455babb678dc0e764feae279/112f7a4a-0051"
|
||||||
|
"-cc2b-3b61-1898181b9950"
|
||||||
|
),
|
||||||
"local_url": "ws://192.168.1.39:8083",
|
"local_url": "ws://192.168.1.39:8083",
|
||||||
"add_on_url": "ws://127.0.0.1:8083",
|
"add_on_url": "ws://127.0.0.1:8083",
|
||||||
"find_url": "wss://find.z-wave.me",
|
"find_url": "wss://find.z-wave.me",
|
||||||
|
@ -74,7 +74,8 @@ class ZWaveMeCover(ZWaveMeEntity, CoverEntity):
|
|||||||
|
|
||||||
None is unknown, 0 is closed, 100 is fully open.
|
None is unknown, 0 is closed, 100 is fully open.
|
||||||
|
|
||||||
Allow small calibration errors (some devices after a long time become not well calibrated)
|
Allow small calibration errors (some devices after a long time
|
||||||
|
become not well calibrated).
|
||||||
"""
|
"""
|
||||||
if self.device.level > 95:
|
if self.device.level > 95:
|
||||||
return 100
|
return 100
|
||||||
@ -87,7 +88,8 @@ class ZWaveMeCover(ZWaveMeEntity, CoverEntity):
|
|||||||
|
|
||||||
None is unknown.
|
None is unknown.
|
||||||
|
|
||||||
Allow small calibration errors (some devices after a long time become not well calibrated)
|
Allow small calibration errors (some devices after a long time
|
||||||
|
become not well calibrated).
|
||||||
"""
|
"""
|
||||||
if self.device.level is None:
|
if self.device.level is None:
|
||||||
return None
|
return None
|
||||||
|
@ -438,7 +438,10 @@ class OAuth2AuthorizeCallbackView(http.HomeAssistantView):
|
|||||||
|
|
||||||
if state is None:
|
if state is None:
|
||||||
return web.Response(
|
return web.Response(
|
||||||
text="Invalid state. Is My Home Assistant configured to go to the right instance?",
|
text=(
|
||||||
|
"Invalid state. Is My Home Assistant configured "
|
||||||
|
"to go to the right instance?"
|
||||||
|
),
|
||||||
status=400,
|
status=400,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user