mirror of
https://github.com/home-assistant/core.git
synced 2025-09-25 21:09:38 +00:00
Compare commits
18 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
4c052643ca | ||
![]() |
b7a071b23f | ||
![]() |
f0a8e8ea04 | ||
![]() |
caf306799b | ||
![]() |
d9a2cc93ba | ||
![]() |
dbdd4f0e39 | ||
![]() |
edc44230b4 | ||
![]() |
4d7a468c0e | ||
![]() |
a06595c08d | ||
![]() |
ff13b4c6b3 | ||
![]() |
8a755e790f | ||
![]() |
6a6dfdff4d | ||
![]() |
b9c233f013 | ||
![]() |
7418011d6d | ||
![]() |
3a6a439c02 | ||
![]() |
34c4dc2e80 | ||
![]() |
56853787e7 | ||
![]() |
976cbdd2aa |
@@ -2,7 +2,9 @@
|
||||
"domain": "frontend",
|
||||
"name": "Home Assistant Frontend",
|
||||
"documentation": "https://www.home-assistant.io/integrations/frontend",
|
||||
"requirements": ["home-assistant-frontend==20200626.0"],
|
||||
"requirements": [
|
||||
"home-assistant-frontend==20200629.0"
|
||||
],
|
||||
"dependencies": [
|
||||
"api",
|
||||
"auth",
|
||||
@@ -15,6 +17,8 @@
|
||||
"system_log",
|
||||
"websocket_api"
|
||||
],
|
||||
"codeowners": ["@home-assistant/frontend"],
|
||||
"codeowners": [
|
||||
"@home-assistant/frontend"
|
||||
],
|
||||
"quality_scale": "internal"
|
||||
}
|
||||
}
|
@@ -2,6 +2,6 @@
|
||||
"domain": "insteon",
|
||||
"name": "Insteon",
|
||||
"documentation": "https://www.home-assistant.io/integrations/insteon",
|
||||
"requirements": ["pyinsteon==1.0.4"],
|
||||
"requirements": ["pyinsteon==1.0.5"],
|
||||
"codeowners": ["@teharris1"]
|
||||
}
|
@@ -994,6 +994,9 @@ class MqttAvailability(Entity):
|
||||
await self._availability_subscribe_topics()
|
||||
async_dispatcher_connect(self.hass, MQTT_CONNECTED, self.async_mqtt_connect)
|
||||
async_dispatcher_connect(self.hass, MQTT_DISCONNECTED, self.async_mqtt_connect)
|
||||
self.async_on_remove(
|
||||
async_dispatcher_connect(self.hass, MQTT_CONNECTED, self.async_mqtt_connect)
|
||||
)
|
||||
|
||||
async def availability_discovery_update(self, config: dict):
|
||||
"""Handle updated discovery message."""
|
||||
@@ -1029,7 +1032,8 @@ class MqttAvailability(Entity):
|
||||
@callback
|
||||
def async_mqtt_connect(self):
|
||||
"""Update state on connection/disconnection to MQTT broker."""
|
||||
self.async_write_ha_state()
|
||||
if self.hass.is_running:
|
||||
self.async_write_ha_state()
|
||||
|
||||
async def async_will_remove_from_hass(self):
|
||||
"""Unsubscribe when removed."""
|
||||
|
@@ -190,17 +190,16 @@ class NWSWeather(WeatherEntity):
|
||||
@property
|
||||
def wind_speed(self):
|
||||
"""Return the current windspeed."""
|
||||
wind_m_s = None
|
||||
wind_km_hr = None
|
||||
if self.observation:
|
||||
wind_m_s = self.observation.get("windSpeed")
|
||||
if wind_m_s is None:
|
||||
wind_km_hr = self.observation.get("windSpeed")
|
||||
if wind_km_hr is None:
|
||||
return None
|
||||
wind_m_hr = wind_m_s * 3600
|
||||
|
||||
if self.is_metric:
|
||||
wind = convert_distance(wind_m_hr, LENGTH_METERS, LENGTH_KILOMETERS)
|
||||
wind = wind_km_hr
|
||||
else:
|
||||
wind = convert_distance(wind_m_hr, LENGTH_METERS, LENGTH_MILES)
|
||||
wind = convert_distance(wind_km_hr, LENGTH_KILOMETERS, LENGTH_MILES)
|
||||
return round(wind)
|
||||
|
||||
@property
|
||||
|
@@ -24,6 +24,19 @@ _LOGGER = logging.getLogger(__name__)
|
||||
|
||||
async def async_setup_entry(hass, entry, async_add_entities):
|
||||
"""Set up OwnTracks based off an entry."""
|
||||
# Restore previously loaded devices
|
||||
dev_reg = await device_registry.async_get_registry(hass)
|
||||
dev_ids = {
|
||||
identifier[1]
|
||||
for device in dev_reg.devices.values()
|
||||
for identifier in device.identifiers
|
||||
if identifier[0] == OT_DOMAIN
|
||||
}
|
||||
|
||||
entities = []
|
||||
for dev_id in dev_ids:
|
||||
entity = hass.data[OT_DOMAIN]["devices"][dev_id] = OwnTracksEntity(dev_id)
|
||||
entities.append(entity)
|
||||
|
||||
@callback
|
||||
def _receive_data(dev_id, **data):
|
||||
@@ -39,24 +52,8 @@ async def async_setup_entry(hass, entry, async_add_entities):
|
||||
|
||||
hass.data[OT_DOMAIN]["context"].set_async_see(_receive_data)
|
||||
|
||||
# Restore previously loaded devices
|
||||
dev_reg = await device_registry.async_get_registry(hass)
|
||||
dev_ids = {
|
||||
identifier[1]
|
||||
for device in dev_reg.devices.values()
|
||||
for identifier in device.identifiers
|
||||
if identifier[0] == OT_DOMAIN
|
||||
}
|
||||
|
||||
if not dev_ids:
|
||||
return True
|
||||
|
||||
entities = []
|
||||
for dev_id in dev_ids:
|
||||
entity = hass.data[OT_DOMAIN]["devices"][dev_id] = OwnTracksEntity(dev_id)
|
||||
entities.append(entity)
|
||||
|
||||
async_add_entities(entities)
|
||||
if entities:
|
||||
async_add_entities(entities)
|
||||
|
||||
return True
|
||||
|
||||
|
@@ -164,7 +164,7 @@ async def async_setup_entry(hass, entry):
|
||||
def get_plex_account(plex_server):
|
||||
try:
|
||||
return plex_server.account
|
||||
except plexapi.exceptions.Unauthorized:
|
||||
except (plexapi.exceptions.BadRequest, plexapi.exceptions.Unauthorized):
|
||||
return None
|
||||
|
||||
plex_account = await hass.async_add_executor_job(get_plex_account, plex_server)
|
||||
|
@@ -4,7 +4,7 @@ import ssl
|
||||
import time
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from plexapi.exceptions import NotFound, Unauthorized
|
||||
from plexapi.exceptions import BadRequest, NotFound, Unauthorized
|
||||
import plexapi.myplex
|
||||
import plexapi.playqueue
|
||||
import plexapi.server
|
||||
@@ -98,7 +98,7 @@ class PlexServer:
|
||||
if not self._plex_account and self._use_plex_tv:
|
||||
try:
|
||||
self._plex_account = plexapi.myplex.MyPlexAccount(token=self._token)
|
||||
except Unauthorized:
|
||||
except (BadRequest, Unauthorized):
|
||||
self._use_plex_tv = False
|
||||
_LOGGER.error("Not authorized to access plex.tv with provided token")
|
||||
raise
|
||||
|
@@ -335,7 +335,7 @@ class Recorder(threading.Thread):
|
||||
self.event_session = self.get_session()
|
||||
# Use a session for the event read loop
|
||||
# with a commit every time the event time
|
||||
# has changed. This reduces the disk io.
|
||||
# has changed. This reduces the disk io.
|
||||
while True:
|
||||
event = self.queue.get()
|
||||
if event is None:
|
||||
@@ -344,7 +344,9 @@ class Recorder(threading.Thread):
|
||||
self.queue.task_done()
|
||||
return
|
||||
if isinstance(event, PurgeTask):
|
||||
purge.purge_old_data(self, event.keep_days, event.repack)
|
||||
# Schedule a new purge task if this one didn't finish
|
||||
if not purge.purge_old_data(self, event.keep_days, event.repack):
|
||||
self.queue.put(PurgeTask(event.keep_days, event.repack))
|
||||
self.queue.task_done()
|
||||
continue
|
||||
if event.event_type == EVENT_TIME_CHANGED:
|
||||
|
@@ -64,7 +64,7 @@ class Events(Base): # type: ignore
|
||||
context_parent_id=event.context.parent_id,
|
||||
)
|
||||
|
||||
def to_native(self):
|
||||
def to_native(self, validate_entity_id=True):
|
||||
"""Convert to a natve HA Event."""
|
||||
context = Context(
|
||||
id=self.context_id,
|
||||
@@ -183,7 +183,7 @@ class RecorderRuns(Base): # type: ignore
|
||||
|
||||
return [row[0] for row in query]
|
||||
|
||||
def to_native(self):
|
||||
def to_native(self, validate_entity_id=True):
|
||||
"""Return self, native format is this model."""
|
||||
return self
|
||||
|
||||
|
@@ -1,38 +1,68 @@
|
||||
"""Purge old data helper."""
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
import time
|
||||
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from sqlalchemy.exc import OperationalError, SQLAlchemyError
|
||||
|
||||
import homeassistant.util.dt as dt_util
|
||||
|
||||
from .models import Events, RecorderRuns, States
|
||||
from .util import session_scope
|
||||
from .util import execute, session_scope
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def purge_old_data(instance, purge_days, repack):
|
||||
"""Purge events and states older than purge_days ago."""
|
||||
def purge_old_data(instance, purge_days: int, repack: bool) -> bool:
|
||||
"""Purge events and states older than purge_days ago.
|
||||
|
||||
Cleans up an timeframe of an hour, based on the oldest record.
|
||||
"""
|
||||
purge_before = dt_util.utcnow() - timedelta(days=purge_days)
|
||||
_LOGGER.debug("Purging events before %s", purge_before)
|
||||
_LOGGER.debug("Purging states and events before target %s", purge_before)
|
||||
|
||||
try:
|
||||
with session_scope(session=instance.get_session()) as session:
|
||||
# Purge a max of 1 hour, based on the oldest states or events record
|
||||
batch_purge_before = purge_before
|
||||
|
||||
query = session.query(States).order_by(States.last_updated.asc()).limit(1)
|
||||
states = execute(query, to_native=True, validate_entity_ids=False)
|
||||
if states:
|
||||
batch_purge_before = min(
|
||||
batch_purge_before, states[0].last_updated + timedelta(hours=1),
|
||||
)
|
||||
|
||||
query = session.query(Events).order_by(Events.time_fired.asc()).limit(1)
|
||||
events = execute(query, to_native=True)
|
||||
if events:
|
||||
batch_purge_before = min(
|
||||
batch_purge_before, events[0].time_fired + timedelta(hours=1),
|
||||
)
|
||||
|
||||
_LOGGER.debug("Purging states and events before %s", batch_purge_before)
|
||||
|
||||
deleted_rows = (
|
||||
session.query(States)
|
||||
.filter(States.last_updated < purge_before)
|
||||
.filter(States.last_updated < batch_purge_before)
|
||||
.delete(synchronize_session=False)
|
||||
)
|
||||
_LOGGER.debug("Deleted %s states", deleted_rows)
|
||||
|
||||
deleted_rows = (
|
||||
session.query(Events)
|
||||
.filter(Events.time_fired < purge_before)
|
||||
.filter(Events.time_fired < batch_purge_before)
|
||||
.delete(synchronize_session=False)
|
||||
)
|
||||
_LOGGER.debug("Deleted %s events", deleted_rows)
|
||||
|
||||
# If states or events purging isn't processing the purge_before yet,
|
||||
# return false, as we are not done yet.
|
||||
if batch_purge_before != purge_before:
|
||||
_LOGGER.debug("Purging hasn't fully completed yet.")
|
||||
return False
|
||||
|
||||
# Recorder runs is small, no need to batch run it
|
||||
deleted_rows = (
|
||||
session.query(RecorderRuns)
|
||||
.filter(RecorderRuns.start < purge_before)
|
||||
@@ -46,9 +76,25 @@ def purge_old_data(instance, purge_days, repack):
|
||||
_LOGGER.debug("Vacuuming SQL DB to free space")
|
||||
instance.engine.execute("VACUUM")
|
||||
# Optimize mysql / mariadb tables to free up space on disk
|
||||
elif instance.engine.driver == "mysqldb":
|
||||
elif instance.engine.driver in ("mysqldb", "pymysql"):
|
||||
_LOGGER.debug("Optimizing SQL DB to free space")
|
||||
instance.engine.execute("OPTIMIZE TABLE states, events, recorder_runs")
|
||||
|
||||
except OperationalError as err:
|
||||
# Retry when one of the following MySQL errors occurred:
|
||||
# 1205: Lock wait timeout exceeded; try restarting transaction
|
||||
# 1206: The total number of locks exceeds the lock table size
|
||||
# 1213: Deadlock found when trying to get lock; try restarting transaction
|
||||
if instance.engine.driver in ("mysqldb", "pymysql") and err.orig.args[0] in (
|
||||
1205,
|
||||
1206,
|
||||
1213,
|
||||
):
|
||||
_LOGGER.info("%s; purge not completed, retrying", err.orig.args[1])
|
||||
time.sleep(instance.db_retry_wait)
|
||||
return False
|
||||
|
||||
_LOGGER.warning("Error purging history: %s.", err)
|
||||
except SQLAlchemyError as err:
|
||||
_LOGGER.warning("Error purging history: %s.", err)
|
||||
return True
|
||||
|
@@ -129,11 +129,10 @@ class SpeedTestDataCoordinator(DataUpdateCoordinator):
|
||||
server_id = self.config_entry.options.get(CONF_SERVER_ID)
|
||||
self.api.closest.clear()
|
||||
self.api.get_servers(servers=[server_id])
|
||||
self.api.get_best_server()
|
||||
_LOGGER.debug(
|
||||
"Executing speedtest.net speed test with server_id: %s", self.api.best["id"]
|
||||
)
|
||||
|
||||
self.api.get_best_server()
|
||||
self.api.download()
|
||||
self.api.upload()
|
||||
return self.api.results.dict()
|
||||
|
@@ -94,7 +94,7 @@ class SpeedTestOptionsFlowHandler(config_entries.OptionsFlow):
|
||||
for (key, value) in self._servers.items()
|
||||
if value.get("id") == self.config_entry.options[CONF_SERVER_ID]
|
||||
]
|
||||
server_name = server[0]
|
||||
server_name = server[0] if server else ""
|
||||
|
||||
options = {
|
||||
vol.Optional(
|
||||
|
@@ -3,6 +3,6 @@
|
||||
"name": "Tesla",
|
||||
"config_flow": true,
|
||||
"documentation": "https://www.home-assistant.io/integrations/tesla",
|
||||
"requirements": ["teslajsonpy==0.8.1"],
|
||||
"requirements": ["teslajsonpy==0.9.0"],
|
||||
"codeowners": ["@zabuldon", "@alandtse"]
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ import asyncio
|
||||
from datetime import timedelta
|
||||
|
||||
from pytile import async_login
|
||||
from pytile.errors import TileError
|
||||
from pytile.errors import SessionExpiredError, TileError
|
||||
|
||||
from homeassistant.const import ATTR_ATTRIBUTION, CONF_PASSWORD, CONF_USERNAME
|
||||
from homeassistant.core import callback
|
||||
@@ -44,6 +44,9 @@ async def async_setup_entry(hass, config_entry):
|
||||
"""Get new data from the API."""
|
||||
try:
|
||||
return await client.tiles.all()
|
||||
except SessionExpiredError:
|
||||
LOGGER.info("Tile session expired; creating a new one")
|
||||
await client.async_init()
|
||||
except TileError as err:
|
||||
raise UpdateFailed(f"Error while retrieving data: {err}")
|
||||
|
||||
|
@@ -84,13 +84,26 @@ class TileDeviceTracker(TileEntity, TrackerEntity):
|
||||
|
||||
Value in meters.
|
||||
"""
|
||||
return round(
|
||||
(
|
||||
self._tile["last_tile_state"]["h_accuracy"]
|
||||
+ self._tile["last_tile_state"]["v_accuracy"]
|
||||
state = self._tile["last_tile_state"]
|
||||
h_accuracy = state.get("h_accuracy")
|
||||
v_accuracy = state.get("v_accuracy")
|
||||
|
||||
if h_accuracy is not None and v_accuracy is not None:
|
||||
return round(
|
||||
(
|
||||
self._tile["last_tile_state"]["h_accuracy"]
|
||||
+ self._tile["last_tile_state"]["v_accuracy"]
|
||||
)
|
||||
/ 2
|
||||
)
|
||||
/ 2
|
||||
)
|
||||
|
||||
if h_accuracy is not None:
|
||||
return h_accuracy
|
||||
|
||||
if v_accuracy is not None:
|
||||
return v_accuracy
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
def latitude(self) -> float:
|
||||
|
@@ -6,7 +6,7 @@
|
||||
"requirements": [
|
||||
"bellows==0.17.0",
|
||||
"pyserial==3.4",
|
||||
"zha-quirks==0.0.40",
|
||||
"zha-quirks==0.0.41",
|
||||
"zigpy-cc==0.4.4",
|
||||
"zigpy-deconz==0.9.2",
|
||||
"zigpy==0.21.0",
|
||||
|
@@ -1,7 +1,7 @@
|
||||
"""Constants used by Home Assistant components."""
|
||||
MAJOR_VERSION = 0
|
||||
MINOR_VERSION = 112
|
||||
PATCH_VERSION = "0b1"
|
||||
PATCH_VERSION = "0b3"
|
||||
__short_version__ = f"{MAJOR_VERSION}.{MINOR_VERSION}"
|
||||
__version__ = f"{__short_version__}.{PATCH_VERSION}"
|
||||
REQUIRED_PYTHON_VER = (3, 7, 0)
|
||||
|
@@ -13,7 +13,7 @@ defusedxml==0.6.0
|
||||
distro==1.5.0
|
||||
emoji==0.5.4
|
||||
hass-nabucasa==0.34.7
|
||||
home-assistant-frontend==20200626.0
|
||||
home-assistant-frontend==20200629.0
|
||||
importlib-metadata==1.6.0;python_version<'3.8'
|
||||
jinja2>=2.11.1
|
||||
netdisco==2.7.1
|
||||
|
@@ -738,7 +738,7 @@ hole==0.5.1
|
||||
holidays==0.10.2
|
||||
|
||||
# homeassistant.components.frontend
|
||||
home-assistant-frontend==20200626.0
|
||||
home-assistant-frontend==20200629.0
|
||||
|
||||
# homeassistant.components.zwave
|
||||
homeassistant-pyozw==0.1.10
|
||||
@@ -1388,7 +1388,7 @@ pyialarm==0.3
|
||||
pyicloud==0.9.7
|
||||
|
||||
# homeassistant.components.insteon
|
||||
pyinsteon==1.0.4
|
||||
pyinsteon==1.0.5
|
||||
|
||||
# homeassistant.components.intesishome
|
||||
pyintesishome==1.7.5
|
||||
@@ -2094,7 +2094,7 @@ temperusb==1.5.3
|
||||
tesla-powerwall==0.2.11
|
||||
|
||||
# homeassistant.components.tesla
|
||||
teslajsonpy==0.8.1
|
||||
teslajsonpy==0.9.0
|
||||
|
||||
# homeassistant.components.thermoworks_smoke
|
||||
thermoworks_smoke==0.1.8
|
||||
@@ -2255,7 +2255,7 @@ zengge==0.2
|
||||
zeroconf==0.27.1
|
||||
|
||||
# homeassistant.components.zha
|
||||
zha-quirks==0.0.40
|
||||
zha-quirks==0.0.41
|
||||
|
||||
# homeassistant.components.zhong_hong
|
||||
zhong_hong_hvac==1.0.9
|
||||
|
@@ -343,7 +343,7 @@ hole==0.5.1
|
||||
holidays==0.10.2
|
||||
|
||||
# homeassistant.components.frontend
|
||||
home-assistant-frontend==20200626.0
|
||||
home-assistant-frontend==20200629.0
|
||||
|
||||
# homeassistant.components.zwave
|
||||
homeassistant-pyozw==0.1.10
|
||||
@@ -890,7 +890,7 @@ tellduslive==0.10.11
|
||||
tesla-powerwall==0.2.11
|
||||
|
||||
# homeassistant.components.tesla
|
||||
teslajsonpy==0.8.1
|
||||
teslajsonpy==0.9.0
|
||||
|
||||
# homeassistant.components.toon
|
||||
toonapi==0.1.0
|
||||
@@ -961,7 +961,7 @@ ya_ma==0.3.8
|
||||
zeroconf==0.27.1
|
||||
|
||||
# homeassistant.components.zha
|
||||
zha-quirks==0.0.40
|
||||
zha-quirks==0.0.41
|
||||
|
||||
# homeassistant.components.zha
|
||||
zigpy-cc==0.4.4
|
||||
|
@@ -60,7 +60,7 @@ EXPECTED_OBSERVATION_IMPERIAL = {
|
||||
),
|
||||
ATTR_WEATHER_WIND_BEARING: 180,
|
||||
ATTR_WEATHER_WIND_SPEED: round(
|
||||
convert_distance(10, LENGTH_METERS, LENGTH_MILES) * 3600
|
||||
convert_distance(10, LENGTH_KILOMETERS, LENGTH_MILES)
|
||||
),
|
||||
ATTR_WEATHER_PRESSURE: round(
|
||||
convert_pressure(100000, PRESSURE_PA, PRESSURE_INHG), 2
|
||||
@@ -74,9 +74,7 @@ EXPECTED_OBSERVATION_IMPERIAL = {
|
||||
EXPECTED_OBSERVATION_METRIC = {
|
||||
ATTR_WEATHER_TEMPERATURE: 10,
|
||||
ATTR_WEATHER_WIND_BEARING: 180,
|
||||
ATTR_WEATHER_WIND_SPEED: round(
|
||||
convert_distance(10, LENGTH_METERS, LENGTH_KILOMETERS) * 3600
|
||||
),
|
||||
ATTR_WEATHER_WIND_SPEED: 10,
|
||||
ATTR_WEATHER_PRESSURE: round(convert_pressure(100000, PRESSURE_PA, PRESSURE_HPA)),
|
||||
ATTR_WEATHER_VISIBILITY: round(
|
||||
convert_distance(10000, LENGTH_METERS, LENGTH_KILOMETERS)
|
||||
|
@@ -317,7 +317,7 @@ def test_auto_purge(hass_recorder):
|
||||
test_time = tz.localize(datetime(2020, 1, 1, 4, 12, 0))
|
||||
|
||||
with patch(
|
||||
"homeassistant.components.recorder.purge.purge_old_data"
|
||||
"homeassistant.components.recorder.purge.purge_old_data", return_value=True
|
||||
) as purge_old_data:
|
||||
for delta in (-1, 0, 1):
|
||||
hass.bus.fire(
|
||||
|
@@ -130,9 +130,16 @@ class TestRecorderPurge(unittest.TestCase):
|
||||
assert states.count() == 6
|
||||
|
||||
# run purge_old_data()
|
||||
purge_old_data(self.hass.data[DATA_INSTANCE], 4, repack=False)
|
||||
finished = purge_old_data(self.hass.data[DATA_INSTANCE], 4, repack=False)
|
||||
assert not finished
|
||||
assert states.count() == 4
|
||||
|
||||
# we should only have 2 states left after purging
|
||||
finished = purge_old_data(self.hass.data[DATA_INSTANCE], 4, repack=False)
|
||||
assert not finished
|
||||
assert states.count() == 2
|
||||
|
||||
finished = purge_old_data(self.hass.data[DATA_INSTANCE], 4, repack=False)
|
||||
assert finished
|
||||
assert states.count() == 2
|
||||
|
||||
def test_purge_old_events(self):
|
||||
@@ -144,9 +151,17 @@ class TestRecorderPurge(unittest.TestCase):
|
||||
assert events.count() == 6
|
||||
|
||||
# run purge_old_data()
|
||||
purge_old_data(self.hass.data[DATA_INSTANCE], 4, repack=False)
|
||||
finished = purge_old_data(self.hass.data[DATA_INSTANCE], 4, repack=False)
|
||||
assert not finished
|
||||
assert events.count() == 4
|
||||
|
||||
finished = purge_old_data(self.hass.data[DATA_INSTANCE], 4, repack=False)
|
||||
assert not finished
|
||||
assert events.count() == 2
|
||||
|
||||
# we should only have 2 events left
|
||||
finished = purge_old_data(self.hass.data[DATA_INSTANCE], 4, repack=False)
|
||||
assert finished
|
||||
assert events.count() == 2
|
||||
|
||||
def test_purge_method(self):
|
||||
@@ -209,6 +224,6 @@ class TestRecorderPurge(unittest.TestCase):
|
||||
self.hass.block_till_done()
|
||||
self.hass.data[DATA_INSTANCE].block_till_done()
|
||||
assert (
|
||||
mock_logger.debug.mock_calls[4][1][0]
|
||||
mock_logger.debug.mock_calls[5][1][0]
|
||||
== "Vacuuming SQL DB to free space"
|
||||
)
|
||||
|
Reference in New Issue
Block a user