React to state report events to increase sample size of statistics (#129211)

* react to state reported events to increase sample size

* added test case for timinig and minor corrections
This commit is contained in:
unfug-at-github 2024-10-28 14:41:48 +01:00 committed by GitHub
parent 40b561ea69
commit 50ccce7387
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 91 additions and 10 deletions

View File

@ -37,6 +37,7 @@ from homeassistant.core import (
CALLBACK_TYPE, CALLBACK_TYPE,
Event, Event,
EventStateChangedData, EventStateChangedData,
EventStateReportedData,
HomeAssistant, HomeAssistant,
State, State,
callback, callback,
@ -48,6 +49,7 @@ from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import ( from homeassistant.helpers.event import (
async_track_point_in_utc_time, async_track_point_in_utc_time,
async_track_state_change_event, async_track_state_change_event,
async_track_state_report_event,
) )
from homeassistant.helpers.reload import async_setup_reload_service from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType, StateType from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType, StateType
@ -393,13 +395,12 @@ class StatisticsSensor(SensorEntity):
await self._async_stats_sensor_startup() await self._async_stats_sensor_startup()
return self._call_on_remove_callbacks return self._call_on_remove_callbacks
@callback def _async_handle_new_state(
def _async_stats_sensor_state_listener(
self, self,
event: Event[EventStateChangedData], reported_state: State | None,
) -> None: ) -> None:
"""Handle the sensor state changes.""" """Handle the sensor state changes."""
if (new_state := event.data["new_state"]) is None: if (new_state := reported_state) is None:
return return
self._add_state_to_queue(new_state) self._add_state_to_queue(new_state)
self._async_purge_update_and_schedule() self._async_purge_update_and_schedule()
@ -411,6 +412,20 @@ class StatisticsSensor(SensorEntity):
if not self._preview_callback: if not self._preview_callback:
self.async_write_ha_state() self.async_write_ha_state()
@callback
def _async_stats_sensor_state_change_listener(
self,
event: Event[EventStateChangedData],
) -> None:
self._async_handle_new_state(event.data["new_state"])
@callback
def _async_stats_sensor_state_report_listener(
self,
event: Event[EventStateReportedData],
) -> None:
self._async_handle_new_state(event.data["new_state"])
async def _async_stats_sensor_startup(self) -> None: async def _async_stats_sensor_startup(self) -> None:
"""Add listener and get recorded state. """Add listener and get recorded state.
@ -425,7 +440,14 @@ class StatisticsSensor(SensorEntity):
async_track_state_change_event( async_track_state_change_event(
self.hass, self.hass,
[self._source_entity_id], [self._source_entity_id],
self._async_stats_sensor_state_listener, self._async_stats_sensor_state_change_listener,
)
)
self.async_on_remove(
async_track_state_report_event(
self.hass,
[self._source_entity_id],
self._async_stats_sensor_state_report_listener,
) )
) )
@ -435,6 +457,10 @@ class StatisticsSensor(SensorEntity):
def _add_state_to_queue(self, new_state: State) -> None: def _add_state_to_queue(self, new_state: State) -> None:
"""Add the state to the queue.""" """Add the state to the queue."""
# Attention: it is not safe to store the new_state object,
# since the "last_reported" value will be updated over time.
# Here we make a copy the current value, which is okay.
self._available = new_state.state != STATE_UNAVAILABLE self._available = new_state.state != STATE_UNAVAILABLE
if new_state.state == STATE_UNAVAILABLE: if new_state.state == STATE_UNAVAILABLE:
self.attributes[STAT_SOURCE_VALUE_VALID] = None self.attributes[STAT_SOURCE_VALUE_VALID] = None
@ -449,7 +475,7 @@ class StatisticsSensor(SensorEntity):
self.states.append(new_state.state == "on") self.states.append(new_state.state == "on")
else: else:
self.states.append(float(new_state.state)) self.states.append(float(new_state.state))
self.ages.append(new_state.last_updated) self.ages.append(new_state.last_reported)
self.attributes[STAT_SOURCE_VALUE_VALID] = True self.attributes[STAT_SOURCE_VALUE_VALID] = True
except ValueError: except ValueError:
self.attributes[STAT_SOURCE_VALUE_VALID] = False self.attributes[STAT_SOURCE_VALUE_VALID] = False

View File

@ -250,8 +250,15 @@ async def test_sensor_defaults_binary(hass: HomeAssistant) -> None:
assert "age_coverage_ratio" not in state.attributes assert "age_coverage_ratio" not in state.attributes
async def test_sensor_source_with_force_update(hass: HomeAssistant) -> None: async def test_sensor_state_reported(hass: HomeAssistant) -> None:
"""Test the behavior of the sensor when the source sensor force-updates with same value.""" """Test the behavior of the sensor with a sequence of identical values.
Forced updates no longer make a difference, since the statistics are now reacting not
only to state change events but also to state report events (EVENT_STATE_REPORTED).
This means repeating values will be added to the buffer repeatedly in both cases.
This fixes problems with time based averages and some other functions that behave
differently when repeating values are reported.
"""
repeating_values = [18, 0, 0, 0, 0, 0, 0, 0, 9] repeating_values = [18, 0, 0, 0, 0, 0, 0, 0, 9]
assert await async_setup_component( assert await async_setup_component(
hass, hass,
@ -294,9 +301,9 @@ async def test_sensor_source_with_force_update(hass: HomeAssistant) -> None:
state_normal = hass.states.get("sensor.test_normal") state_normal = hass.states.get("sensor.test_normal")
state_force = hass.states.get("sensor.test_force") state_force = hass.states.get("sensor.test_force")
assert state_normal and state_force assert state_normal and state_force
assert state_normal.state == str(round(sum(repeating_values) / 3, 2)) assert state_normal.state == str(round(sum(repeating_values) / 9, 2))
assert state_force.state == str(round(sum(repeating_values) / 9, 2)) assert state_force.state == str(round(sum(repeating_values) / 9, 2))
assert state_normal.attributes.get("buffer_usage_ratio") == round(3 / 20, 2) assert state_normal.attributes.get("buffer_usage_ratio") == round(9 / 20, 2)
assert state_force.attributes.get("buffer_usage_ratio") == round(9 / 20, 2) assert state_force.attributes.get("buffer_usage_ratio") == round(9 / 20, 2)
@ -1777,3 +1784,51 @@ async def test_update_before_load(recorder_mock: Recorder, hass: HomeAssistant)
# we will end up with a buffer of [1 .. 9] (10 wasn't added) # we will end up with a buffer of [1 .. 9] (10 wasn't added)
# so the computed average_step is 1+2+3+4+5+6+7+8/8 = 4.5 # so the computed average_step is 1+2+3+4+5+6+7+8/8 = 4.5
assert float(hass.states.get("sensor.test").state) == pytest.approx(4.5) assert float(hass.states.get("sensor.test").state) == pytest.approx(4.5)
async def test_average_linear_unevenly_timed(hass: HomeAssistant) -> None:
"""Test the average_linear state characteristic with unevenly distributed values.
This also implicitly tests the correct timing of repeating values.
"""
values_and_times = [[5.0, 2], [10.0, 1], [10.0, 1], [10.0, 2], [5.0, 1]]
current_time = dt_util.utcnow()
with (
freeze_time(current_time) as freezer,
):
assert await async_setup_component(
hass,
"sensor",
{
"sensor": [
{
"platform": "statistics",
"name": "test_sensor_average_linear",
"entity_id": "sensor.test_monitored",
"state_characteristic": "average_linear",
"max_age": {"seconds": 10},
},
]
},
)
await hass.async_block_till_done()
for value_and_time in values_and_times:
hass.states.async_set(
"sensor.test_monitored",
str(value_and_time[0]),
{ATTR_UNIT_OF_MEASUREMENT: DEGREE},
)
current_time += timedelta(seconds=value_and_time[1])
freezer.move_to(current_time)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_sensor_average_linear")
assert state is not None
assert state.state == "8.33", (
"value mismatch for characteristic 'sensor/average_linear' - "
f"assert {state.state} == 8.33"
)