Only run garbage collection per module (#84681)

Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>
This commit is contained in:
Joakim Plate 2023-01-02 20:31:12 +01:00 committed by GitHub
parent b302d1f3fc
commit 987e77780a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 36 additions and 12 deletions

View File

@ -377,7 +377,8 @@ class Recorder(threading.Thread):
# Unknown what it is.
return True
def _empty_queue(self, event: Event) -> None:
@callback
def _async_empty_queue(self, event: Event) -> None:
"""Empty the queue if its still present at final write."""
# If the queue is full of events to be processed because
@ -411,7 +412,7 @@ class Recorder(threading.Thread):
def async_register(self) -> None:
"""Post connection initialize."""
bus = self.hass.bus
bus.async_listen_once(EVENT_HOMEASSISTANT_FINAL_WRITE, self._empty_queue)
bus.async_listen_once(EVENT_HOMEASSISTANT_FINAL_WRITE, self._async_empty_queue)
bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._async_shutdown)
async_at_started(self.hass, self._async_hass_started)

View File

@ -131,7 +131,7 @@ class MutexPool(StaticPool): # type: ignore[misc]
if DEBUG_MUTEX_POOL:
_LOGGER.debug("%s wait conn%s", threading.current_thread().name, trace_msg)
# pylint: disable-next=consider-using-with
got_lock = MutexPool.pool_lock.acquire(timeout=1)
got_lock = MutexPool.pool_lock.acquire(timeout=10)
if not got_lock:
raise SQLAlchemyError
conn = super()._do_get()

View File

@ -1,6 +1,7 @@
"""Test the Energy sensors."""
import copy
from datetime import timedelta
import gc
from unittest.mock import patch
from freezegun import freeze_time
@ -31,6 +32,18 @@ from homeassistant.util.unit_system import METRIC_SYSTEM, US_CUSTOMARY_SYSTEM
from tests.components.recorder.common import async_wait_recording_done
@pytest.fixture(autouse=True)
def garbage_collection():
"""Make sure garbage collection is run between all tests.
There are unknown issues with GC triggering during a test
case, leading to the test breaking down. Make sure we
clean up between each testcase to avoid this issue.
"""
yield
gc.collect()
@pytest.fixture
async def setup_integration(recorder_mock):
"""Set up the integration."""

View File

@ -191,6 +191,19 @@ location.async_detect_location_info = check_real(location.async_detect_location_
util.get_local_ip = lambda: "127.0.0.1"
@pytest.fixture(autouse=True, scope="module")
def garbage_collection():
"""Run garbage collection at known locations.
This is to mimic the behavior of pytest-aiohttp, and is
required to avoid warnings during garbage collection from
spilling over into next test case. We run it per module which
handles the most common cases and let each module override
to run per test case if needed.
"""
gc.collect()
@pytest.fixture(autouse=True)
def verify_cleanup(event_loop: asyncio.AbstractEventLoop):
"""Verify that the test has cleaned up resources correctly."""
@ -206,10 +219,6 @@ def verify_cleanup(event_loop: asyncio.AbstractEventLoop):
inst.stop()
pytest.exit(f"Detected non stopped instances ({count}), aborting test run")
threads = frozenset(threading.enumerate()) - threads_before
for thread in threads:
assert isinstance(thread, threading._DummyThread)
# Warn and clean-up lingering tasks and timers
# before moving on to the next test.
tasks = asyncio.all_tasks(event_loop) - tasks_before
@ -224,11 +233,12 @@ def verify_cleanup(event_loop: asyncio.AbstractEventLoop):
_LOGGER.warning("Lingering timer after test %r", handle)
handle.cancel()
# Make sure garbage collect run in same test as allocation
# this is to mimic the behavior of pytest-aiohttp, and is
# required to avoid warnings from spilling over into next
# test case.
gc.collect()
# Verify no threads where left behind.
threads = frozenset(threading.enumerate()) - threads_before
for thread in threads:
assert isinstance(thread, threading._DummyThread) or thread.name.startswith(
"waitpid-"
)
@pytest.fixture(autouse=True)