Increase recorder queue max backlog to 40k, improve message (#70065)

This commit is contained in:
J. Nick Koston 2022-04-14 11:45:07 -10:00 committed by GitHub
parent 2b908bd542
commit c85387290a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 7 additions and 4 deletions

View File

@ -735,10 +735,13 @@ class Recorder(threading.Thread):
"""
size = self.queue.qsize()
_LOGGER.debug("Recorder queue size is: %s", size)
if self.queue.qsize() <= MAX_QUEUE_BACKLOG:
if size <= MAX_QUEUE_BACKLOG:
return
_LOGGER.error(
"The recorder queue reached the maximum size of %s; Events are no longer being recorded",
"The recorder backlog queue reached the maximum size of %s events; "
"usually, the system is CPU bound, I/O bound, or the database "
"is corrupt due to a disk problem; The recorder will stop "
"recording events to avoid running out of memory",
MAX_QUEUE_BACKLOG,
)
self._async_stop_queue_watcher_and_event_listener()

View File

@ -13,7 +13,7 @@ DOMAIN = "recorder"
CONF_DB_INTEGRITY_CHECK = "db_integrity_check"
MAX_QUEUE_BACKLOG = 30000
MAX_QUEUE_BACKLOG = 40000
# The maximum number of rows (events) we purge in one delete statement

View File

@ -278,7 +278,7 @@ async def test_recorder_info(hass, hass_ws_client):
assert response["success"]
assert response["result"] == {
"backlog": 0,
"max_backlog": 30000,
"max_backlog": 40000,
"migration_in_progress": False,
"recording": True,
"thread_running": True,