Small cleanups to recorder history (#89774)

* Small cleanups to recorder history

* Small cleanups to recorder history

* fixes

* flake8 cannot figure it out
This commit is contained in:
J. Nick Koston 2023-03-15 17:44:33 -10:00 committed by GitHub
parent 99d6b1fa57
commit ed27dae173
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -5,7 +5,6 @@ from collections import defaultdict
from collections.abc import Callable, Iterable, Iterator, MutableMapping
from datetime import datetime
from itertools import groupby
import logging
from operator import itemgetter
from typing import Any, cast
@ -24,12 +23,7 @@ import homeassistant.util.dt as dt_util
from ... import recorder
from ..db_schema import RecorderRuns, StateAttributes, States, StatesMeta
from ..filters import Filters
from ..models import (
LazyState,
process_timestamp,
process_timestamp_to_utc_isoformat,
row_to_compressed_state,
)
from ..models import LazyState, process_timestamp, row_to_compressed_state
from ..util import execute_stmt_lambda_element, session_scope
from .const import (
IGNORE_DOMAINS_ENTITY_ID_LIKE,
@ -40,9 +34,6 @@ from .const import (
STATE_KEY,
)
_LOGGER = logging.getLogger(__name__)
_BASE_STATES = (
States.metadata_id,
States.state,
@ -710,7 +701,7 @@ def _sorted_states_to_dict(
# Append all changes to it
for metadata_id, group in states_iter:
attr_cache: dict[str, dict[str, Any]] = {}
prev_state: Column | str
prev_state: Column | str | None = None
if not (entity_id := metadata_id_to_entity_id.get(metadata_id)):
continue
ent_results = result[entity_id]
@ -741,6 +732,7 @@ def _sorted_states_to_dict(
)
state_idx = field_map["state"]
last_updated_ts_idx = field_map["last_updated_ts"]
#
# minimal_response only makes sense with last_updated == last_updated
@ -749,29 +741,28 @@ def _sorted_states_to_dict(
#
# With minimal response we do not care about attribute
# changes so we can filter out duplicate states
last_updated_ts_idx = field_map["last_updated_ts"]
if compressed_state_format:
for row in group:
if (state := row[state_idx]) != prev_state:
ent_results.append(
{
attr_state: state,
attr_time: row[last_updated_ts_idx],
}
)
prev_state = state
# Compressed state format uses the timestamp directly
ent_results.extend(
{
attr_state: (prev_state := state),
attr_time: row[last_updated_ts_idx],
}
for row in group
if (state := row[state_idx]) != prev_state
)
continue
for row in group:
if (state := row[state_idx]) != prev_state:
ent_results.append(
{
attr_state: state,
attr_time: process_timestamp_to_utc_isoformat(
dt_util.utc_from_timestamp(row[last_updated_ts_idx])
),
}
)
prev_state = state
# Non-compressed state format returns an ISO formatted string
_utc_from_timestamp = dt_util.utc_from_timestamp
ent_results.extend(
{
attr_state: (prev_state := state), # noqa: F841
attr_time: _utc_from_timestamp(row[last_updated_ts_idx]).isoformat(),
}
for row in group
if (state := row[state_idx]) != prev_state
)
# If there are no states beyond the initial state,
# the state a was never popped from initial_states